From 35fa91eed817d2c65c59ef5a9737011313be6ac0 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 16 Aug 2016 17:21:02 +0200 Subject: ARM: kernel: merge core and init PLTs The PLT code uses a separate .init.plt section to allocate PLT entries for jump and call instructions in __init code. However, even for fairly sizable modules like mac80211.ko, we only end up with a couple of PLT entries in the .init section, and so we can simplify the code significantly by emitting all PLT entries into the same section. Tested-by: Jongsung Kim Signed-off-by: Ard Biesheuvel diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index e358b79..464748b 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -23,10 +23,8 @@ struct mod_arch_specific { struct unwind_table *unwind[ARM_SEC_MAX]; #endif #ifdef CONFIG_ARM_MODULE_PLTS - struct elf32_shdr *core_plt; - struct elf32_shdr *init_plt; - int core_plt_count; - int init_plt_count; + struct elf32_shdr *plt; + int plt_count; #endif }; diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c index 0c7efc3..6832d1d 100644 --- a/arch/arm/kernel/module-plts.c +++ b/arch/arm/kernel/module-plts.c @@ -30,28 +30,16 @@ struct plt_entries { u32 lit[PLT_ENT_COUNT]; }; -static bool in_init(const struct module *mod, u32 addr) -{ - return addr - (u32)mod->init_layout.base < mod->init_layout.size; -} - u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) { struct plt_entries *plt, *plt_end; - int c, *count; - - if (in_init(mod, loc)) { - plt = (void *)mod->arch.init_plt->sh_addr; - plt_end = (void *)plt + mod->arch.init_plt->sh_size; - count = &mod->arch.init_plt_count; - } else { - plt = (void *)mod->arch.core_plt->sh_addr; - plt_end = (void *)plt + mod->arch.core_plt->sh_size; - count = &mod->arch.core_plt_count; - } + int c; + + plt = (void *)mod->arch.plt->sh_addr; + plt_end = (void *)plt + mod->arch.plt->sh_size; /* Look for an existing entry pointing to 'val' */ - for (c = *count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) { + for (c = mod->arch.plt_count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) { int i; if (!c) { @@ -60,13 +48,13 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, }, { val, } }; - ++*count; + mod->arch.plt_count++; return (u32)plt->ldr; } for (i = 0; i < PLT_ENT_COUNT; i++) { if (!plt->lit[i]) { plt->lit[i] = val; - ++*count; + mod->arch.plt_count++; } if (plt->lit[i] == val) return (u32)&plt->ldr[i]; @@ -132,21 +120,19 @@ static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num) int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { - unsigned long core_plts = 0, init_plts = 0; + unsigned long plts = 0; Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; /* * To store the PLTs, we expand the .text section for core module code - * and the .init.text section for initialization code. + * and for initialization code. */ for (s = sechdrs; s < sechdrs_end; ++s) - if (strcmp(".core.plt", secstrings + s->sh_name) == 0) - mod->arch.core_plt = s; - else if (strcmp(".init.plt", secstrings + s->sh_name) == 0) - mod->arch.init_plt = s; + if (strcmp(".plt", secstrings + s->sh_name) == 0) + mod->arch.plt = s; - if (!mod->arch.core_plt || !mod->arch.init_plt) { - pr_err("%s: sections missing\n", mod->name); + if (!mod->arch.plt) { + pr_err("%s: module PLT section missing\n", mod->name); return -ENOEXEC; } @@ -158,26 +144,16 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, if (s->sh_type != SHT_REL) continue; - if (strstr(secstrings + s->sh_name, ".init")) - init_plts += count_plts(dstsec->sh_addr, rels, numrels); - else - core_plts += count_plts(dstsec->sh_addr, rels, numrels); + plts += count_plts(dstsec->sh_addr, rels, numrels); } - mod->arch.core_plt->sh_type = SHT_NOBITS; - mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; - mod->arch.core_plt->sh_addralign = L1_CACHE_BYTES; - mod->arch.core_plt->sh_size = round_up(core_plts * PLT_ENT_SIZE, - sizeof(struct plt_entries)); - mod->arch.core_plt_count = 0; - - mod->arch.init_plt->sh_type = SHT_NOBITS; - mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; - mod->arch.init_plt->sh_addralign = L1_CACHE_BYTES; - mod->arch.init_plt->sh_size = round_up(init_plts * PLT_ENT_SIZE, - sizeof(struct plt_entries)); - mod->arch.init_plt_count = 0; - pr_debug("%s: core.plt=%x, init.plt=%x\n", __func__, - mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size); + mod->arch.plt->sh_type = SHT_NOBITS; + mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + mod->arch.plt->sh_addralign = L1_CACHE_BYTES; + mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE, + sizeof(struct plt_entries)); + mod->arch.plt_count = 0; + + pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size); return 0; } diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds index 3682fa1..05881e2 100644 --- a/arch/arm/kernel/module.lds +++ b/arch/arm/kernel/module.lds @@ -1,4 +1,3 @@ SECTIONS { - .core.plt : { BYTE(0) } - .init.plt : { BYTE(0) } + .plt : { BYTE(0) } } -- cgit v0.10.2 From 05123fef098220323e60834d5520b15d277e0415 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 16 Aug 2016 16:49:56 +0200 Subject: ARM: kernel: allocate PLT entries only for external symbols When CONFIG_ARM_MODULE_PLTS is enabled, jump and call instructions in modules no longer need to be within 16 MB (8 MB for Thumb2) of their targets. If they are further away, a PLT entry will be generated on the fly for each of them, which extends the range to the entire 32-bit address space. However, since these PLT entries will become the branch targets of the original jump and call instructions, the PLT itself needs to be in range, or we end up in the same situation we started in. Since the PLT is in a separate section, this essentially means that all jumps and calls inside the same module must be resolvable without PLT entries. The PLT allocation code executes before the module itself is loaded in its final location, and so it has to use a worst-case estimate for which jumps and calls will require an entry in the PLT at relocation time. As an optimization, this code deduplicates entries pointing to the same symbol, using a O(n^2) algorithm. However, it does not take the above into account, i.e., that PLT entries will only be needed for jump and call relocations against symbols that are not defined in the module. So disregard relocations against symbols that are defined in the module itself. As an additional minor optimization, ignore input sections that lack the SHF_EXECINSTR flag. Since jump and call relocations operate on executable instructions only, there is no need to look in sections that do not contain executable code. Tested-by: Jongsung Kim Signed-off-by: Ard Biesheuvel diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c index 6832d1d..6f93a90 100644 --- a/arch/arm/kernel/module-plts.c +++ b/arch/arm/kernel/module-plts.c @@ -88,32 +88,45 @@ static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num, } /* Count how many PLT entries we may need */ -static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num) +static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, + const Elf32_Rel *rel, int num) { unsigned int ret = 0; + const Elf32_Sym *s; + u32 mask; int i; + if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) + mask = __opcode_to_mem_thumb32(0x07ff2fff); + else + mask = __opcode_to_mem_arm(0x00ffffff); + /* * Sure, this is order(n^2), but it's usually short, and not * time critical */ - for (i = 0; i < num; i++) + for (i = 0; i < num; i++) { switch (ELF32_R_TYPE(rel[i].r_info)) { case R_ARM_CALL: case R_ARM_PC24: case R_ARM_JUMP24: - if (!duplicate_rel(base, rel, i, - __opcode_to_mem_arm(0x00ffffff))) - ret++; - break; -#ifdef CONFIG_THUMB2_KERNEL case R_ARM_THM_CALL: case R_ARM_THM_JUMP24: - if (!duplicate_rel(base, rel, i, - __opcode_to_mem_thumb32(0x07ff2fff))) + /* + * We only have to consider branch targets that resolve + * to undefined symbols. This is not simply a heuristic, + * it is a fundamental limitation, since the PLT itself + * is part of the module, and needs to be within range + * as well, so modules can never grow beyond that limit. + */ + s = syms + ELF32_R_SYM(rel[i].r_info); + if (s->st_shndx != SHN_UNDEF) + break; + + if (!duplicate_rel(base, rel, i, mask)) ret++; -#endif } + } return ret; } @@ -122,19 +135,27 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, { unsigned long plts = 0; Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; + Elf32_Sym *syms = NULL; /* * To store the PLTs, we expand the .text section for core module code * and for initialization code. */ - for (s = sechdrs; s < sechdrs_end; ++s) + for (s = sechdrs; s < sechdrs_end; ++s) { if (strcmp(".plt", secstrings + s->sh_name) == 0) mod->arch.plt = s; + else if (s->sh_type == SHT_SYMTAB) + syms = (Elf32_Sym *)s->sh_addr; + } if (!mod->arch.plt) { pr_err("%s: module PLT section missing\n", mod->name); return -ENOEXEC; } + if (!syms) { + pr_err("%s: module symtab section missing\n", mod->name); + return -ENOEXEC; + } for (s = sechdrs + 1; s < sechdrs_end; ++s) { const Elf32_Rel *rels = (void *)ehdr + s->sh_offset; @@ -144,7 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, if (s->sh_type != SHT_REL) continue; - plts += count_plts(dstsec->sh_addr, rels, numrels); + /* ignore relocations that operate on non-exec sections */ + if (!(dstsec->sh_flags & SHF_EXECINSTR)) + continue; + + plts += count_plts(syms, dstsec->sh_addr, rels, numrels); } mod->arch.plt->sh_type = SHT_NOBITS; -- cgit v0.10.2 From 1031a7e674d1de481d641c3723d5f53b776f621f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 17 Aug 2016 13:45:21 +0200 Subject: ARM: kernel: sort relocation sections before allocating PLTs The PLT allocation routines try to establish an upper bound on the number of PLT entries that will be required at relocation time, and optimize this by disregarding duplicates (i.e., PLT entries that will end up pointing to the same function). This is currently a O(n^2) algorithm, but we can greatly simplify this by - sorting the relocation section so that relocations that can use the same PLT entry will be listed adjacently, - disregard jump/call relocations with addends; these are highly unusual, for relocations against SHN_UNDEF symbols, and so we can simply allocate a PLT entry for each one we encounter, without trying to optimize away duplicates. Tested-by: Jongsung Kim Signed-off-by: Ard Biesheuvel diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c index 6f93a90..ad1b98f 100644 --- a/arch/arm/kernel/module-plts.c +++ b/arch/arm/kernel/module-plts.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -63,28 +64,63 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) BUG(); } -static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num, - u32 mask) +#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b)) + +static int cmp_rel(const void *a, const void *b) { - u32 *loc1, *loc2; + const Elf32_Rel *x = a, *y = b; int i; - for (i = 0; i < num; i++) { - if (rel[i].r_info != rel[num].r_info) - continue; + /* sort by type and symbol index */ + i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info)); + if (i == 0) + i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info)); + return i; +} - /* - * Identical relocation types against identical symbols can - * still result in different PLT entries if the addend in the - * place is different. So resolve the target of the relocation - * to compare the values. - */ - loc1 = (u32 *)(base + rel[i].r_offset); - loc2 = (u32 *)(base + rel[num].r_offset); - if (((*loc1 ^ *loc2) & mask) == 0) - return 1; +static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel) +{ + u32 *tval = (u32 *)(base + rel->r_offset); + + /* + * Do a bitwise compare on the raw addend rather than fully decoding + * the offset and doing an arithmetic comparison. + * Note that a zero-addend jump/call relocation is encoded taking the + * PC bias into account, i.e., -8 for ARM and -4 for Thumb2. + */ + switch (ELF32_R_TYPE(rel->r_info)) { + u16 upper, lower; + + case R_ARM_THM_CALL: + case R_ARM_THM_JUMP24: + upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]); + lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]); + + return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe; + + case R_ARM_CALL: + case R_ARM_PC24: + case R_ARM_JUMP24: + return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe; } - return 0; + BUG(); +} + +static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num) +{ + const Elf32_Rel *prev; + + /* + * Entries are sorted by type and symbol index. That means that, + * if a duplicate entry exists, it must be in the preceding + * slot. + */ + if (!num) + return false; + + prev = rel + num - 1; + return cmp_rel(rel + num, prev) == 0 && + is_zero_addend_relocation(base, prev); } /* Count how many PLT entries we may need */ @@ -93,18 +129,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, { unsigned int ret = 0; const Elf32_Sym *s; - u32 mask; int i; - if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) - mask = __opcode_to_mem_thumb32(0x07ff2fff); - else - mask = __opcode_to_mem_arm(0x00ffffff); - - /* - * Sure, this is order(n^2), but it's usually short, and not - * time critical - */ for (i = 0; i < num; i++) { switch (ELF32_R_TYPE(rel[i].r_info)) { case R_ARM_CALL: @@ -123,7 +149,18 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, if (s->st_shndx != SHN_UNDEF) break; - if (!duplicate_rel(base, rel, i, mask)) + /* + * Jump relocations with non-zero addends against + * undefined symbols are supported by the ELF spec, but + * do not occur in practice (e.g., 'jump n bytes past + * the entry point of undefined function symbol f'). + * So we need to support them, but there is no need to + * take them into consideration when trying to optimize + * this code. So let's only check for duplicates when + * the addend is zero. + */ + if (!is_zero_addend_relocation(base, rel + i) || + !duplicate_rel(base, rel, i)) ret++; } } @@ -158,7 +195,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, } for (s = sechdrs + 1; s < sechdrs_end; ++s) { - const Elf32_Rel *rels = (void *)ehdr + s->sh_offset; + Elf32_Rel *rels = (void *)ehdr + s->sh_offset; int numrels = s->sh_size / sizeof(Elf32_Rel); Elf32_Shdr *dstsec = sechdrs + s->sh_info; @@ -169,6 +206,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, if (!(dstsec->sh_flags & SHF_EXECINSTR)) continue; + /* sort by type and symbol index */ + sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL); + plts += count_plts(syms, dstsec->sh_addr, rels, numrels); } -- cgit v0.10.2 From 66e94ba3c8ea5ff5f1443a50441f953ef44010b1 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 18 Aug 2016 10:58:49 +0200 Subject: ARM: kernel: avoid brute force search on PLT generation Given that we now sort the relocation sections in a way that guarantees that entries that can share a single PLT entry end up adjacently, there is no a longer a need to go over the entire list to look for an existing entry that matches our jump target. If such a match exists, it was the last one to be emitted, so we can simply check the preceding slot. Note that this will still work correctly in the [theoretical] presence of call/jump relocations against SHN_UNDEF symbols with non-zero addends, although not optimally. Since the relocations are presented in the same order that we checked them for duplicates, any duplicates that we failed to spot the first time around will be accounted for in the PLT allocation so there is guaranteed to be sufficient space for them when actually emitting the PLT. For instance, the following sequence of relocations: 000004d8 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 000004fc 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 0000050e 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 00000520 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 00000532 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 00000544 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 00000556 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 00000568 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 0000057a 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 0000058c 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 0000059e 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 000005b0 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 000005c2 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null 000005d4 00058b0a R_ARM_THM_CALL 00000000 warn_slowpath_null may result in several PLT entries to be allocated, and also emitted, if any of the entries in the middle refer to a Place that contains a non-zero addend (i.e., one for all the preceding zero-addend relocations, one for all the following zero-addend relocations, and one for the non-zero addend relocation itself) Tested-by: Jongsung Kim Signed-off-by: Ard Biesheuvel diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c index ad1b98f..3a5cba9 100644 --- a/arch/arm/kernel/module-plts.c +++ b/arch/arm/kernel/module-plts.c @@ -33,35 +33,39 @@ struct plt_entries { u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) { - struct plt_entries *plt, *plt_end; - int c; - - plt = (void *)mod->arch.plt->sh_addr; - plt_end = (void *)plt + mod->arch.plt->sh_size; - - /* Look for an existing entry pointing to 'val' */ - for (c = mod->arch.plt_count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) { - int i; - - if (!c) { - /* Populate a new set of entries */ - *plt = (struct plt_entries){ - { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, }, - { val, } - }; - mod->arch.plt_count++; - return (u32)plt->ldr; - } - for (i = 0; i < PLT_ENT_COUNT; i++) { - if (!plt->lit[i]) { - plt->lit[i] = val; - mod->arch.plt_count++; - } - if (plt->lit[i] == val) - return (u32)&plt->ldr[i]; - } + struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr; + int idx = 0; + + /* + * Look for an existing entry pointing to 'val'. Given that the + * relocations are sorted, this will be the last entry we allocated. + * (if one exists). + */ + if (mod->arch.plt_count > 0) { + plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT; + idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT; + + if (plt->lit[idx] == val) + return (u32)&plt->ldr[idx]; + + idx = (idx + 1) % PLT_ENT_COUNT; + if (!idx) + plt++; } - BUG(); + + mod->arch.plt_count++; + BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size); + + if (!idx) + /* Populate a new set of entries */ + *plt = (struct plt_entries){ + { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, }, + { val, } + }; + else + plt->lit[idx] = val; + + return (u32)&plt->ldr[idx]; } #define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b)) -- cgit v0.10.2