diff options
| -rw-r--r-- | Documentation/ABI/testing/sysfs-kernel-livepatch | 6 | ||||
| -rw-r--r-- | arch/alpha/kernel/module.c | 2 | ||||
| -rw-r--r-- | arch/arc/kernel/unwind.c | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/module-plts.c | 2 | ||||
| -rw-r--r-- | arch/avr32/kernel/module.c | 12 | ||||
| -rw-r--r-- | arch/ia64/kernel/module.c | 14 | ||||
| -rw-r--r-- | arch/metag/kernel/module.c | 4 | ||||
| -rw-r--r-- | arch/mips/kernel/vpe.c | 6 | ||||
| -rw-r--r-- | arch/parisc/kernel/module.c | 32 | ||||
| -rw-r--r-- | arch/powerpc/kernel/module_32.c | 6 | ||||
| -rw-r--r-- | arch/s390/kernel/module.c | 22 | ||||
| -rw-r--r-- | arch/x86/kernel/livepatch.c | 29 | ||||
| -rw-r--r-- | include/linux/livepatch.h | 24 | ||||
| -rw-r--r-- | include/linux/module.h | 68 | ||||
| -rw-r--r-- | kernel/debug/kdb/kdb_main.c | 4 | ||||
| -rw-r--r-- | kernel/gcov/base.c | 7 | ||||
| -rw-r--r-- | kernel/livepatch/core.c | 176 | ||||
| -rw-r--r-- | kernel/module.c | 349 |
18 files changed, 340 insertions, 427 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch index 5bf42a840b22..da87f43aec58 100644 --- a/Documentation/ABI/testing/sysfs-kernel-livepatch +++ b/Documentation/ABI/testing/sysfs-kernel-livepatch | |||
| @@ -33,7 +33,7 @@ Description: | |||
| 33 | The object directory contains subdirectories for each function | 33 | The object directory contains subdirectories for each function |
| 34 | that is patched within the object. | 34 | that is patched within the object. |
| 35 | 35 | ||
| 36 | What: /sys/kernel/livepatch/<patch>/<object>/<function> | 36 | What: /sys/kernel/livepatch/<patch>/<object>/<function,sympos> |
| 37 | Date: Nov 2014 | 37 | Date: Nov 2014 |
| 38 | KernelVersion: 3.19.0 | 38 | KernelVersion: 3.19.0 |
| 39 | Contact: live-patching@vger.kernel.org | 39 | Contact: live-patching@vger.kernel.org |
| @@ -41,4 +41,8 @@ Description: | |||
| 41 | The function directory contains attributes regarding the | 41 | The function directory contains attributes regarding the |
| 42 | properties and state of the patched function. | 42 | properties and state of the patched function. |
| 43 | 43 | ||
| 44 | The directory name contains the patched function name and a | ||
| 45 | sympos number corresponding to the nth occurrence of the symbol | ||
| 46 | name in kallsyms for the patched object. | ||
| 47 | |||
| 44 | There are currently no such attributes. | 48 | There are currently no such attributes. |
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index 2fd00b7077e4..936bc8f89a67 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c | |||
| @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, | |||
| 160 | 160 | ||
| 161 | /* The small sections were sorted to the end of the segment. | 161 | /* The small sections were sorted to the end of the segment. |
| 162 | The following should definitely cover them. */ | 162 | The following should definitely cover them. */ |
| 163 | gp = (u64)me->module_core + me->core_size - 0x8000; | 163 | gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000; |
| 164 | got = sechdrs[me->arch.gotsecindex].sh_addr; | 164 | got = sechdrs[me->arch.gotsecindex].sh_addr; |
| 165 | 165 | ||
| 166 | for (i = 0; i < n; i++) { | 166 | for (i = 0; i < n; i++) { |
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 5eb707640e9c..0587bf121d11 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c | |||
| @@ -385,8 +385,8 @@ void *unwind_add_table(struct module *module, const void *table_start, | |||
| 385 | return NULL; | 385 | return NULL; |
| 386 | 386 | ||
| 387 | init_unwind_table(table, module->name, | 387 | init_unwind_table(table, module->name, |
| 388 | module->module_core, module->core_size, | 388 | module->core_layout.base, module->core_layout.size, |
| 389 | module->module_init, module->init_size, | 389 | module->init_layout.base, module->init_layout.size, |
| 390 | table_start, table_size, | 390 | table_start, table_size, |
| 391 | NULL, 0); | 391 | NULL, 0); |
| 392 | 392 | ||
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c index 097e2e201b9f..0c7efc3446c0 100644 --- a/arch/arm/kernel/module-plts.c +++ b/arch/arm/kernel/module-plts.c | |||
| @@ -32,7 +32,7 @@ struct plt_entries { | |||
| 32 | 32 | ||
| 33 | static bool in_init(const struct module *mod, u32 addr) | 33 | static bool in_init(const struct module *mod, u32 addr) |
| 34 | { | 34 | { |
| 35 | return addr - (u32)mod->module_init < mod->init_size; | 35 | return addr - (u32)mod->init_layout.base < mod->init_layout.size; |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) | 38 | u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) |
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c index 164efa009e5b..2b4c54c04cb6 100644 --- a/arch/avr32/kernel/module.c +++ b/arch/avr32/kernel/module.c | |||
| @@ -118,9 +118,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
| 118 | * Increase core size to make room for GOT and set start | 118 | * Increase core size to make room for GOT and set start |
| 119 | * offset for GOT. | 119 | * offset for GOT. |
| 120 | */ | 120 | */ |
| 121 | module->core_size = ALIGN(module->core_size, 4); | 121 | module->core_layout.size = ALIGN(module->core_layout.size, 4); |
| 122 | module->arch.got_offset = module->core_size; | 122 | module->arch.got_offset = module->core_layout.size; |
| 123 | module->core_size += module->arch.got_size; | 123 | module->core_layout.size += module->arch.got_size; |
| 124 | 124 | ||
| 125 | return 0; | 125 | return 0; |
| 126 | 126 | ||
| @@ -177,7 +177,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, | |||
| 177 | if (!info->got_initialized) { | 177 | if (!info->got_initialized) { |
| 178 | Elf32_Addr *gotent; | 178 | Elf32_Addr *gotent; |
| 179 | 179 | ||
| 180 | gotent = (module->module_core | 180 | gotent = (module->core_layout.base |
| 181 | + module->arch.got_offset | 181 | + module->arch.got_offset |
| 182 | + info->got_offset); | 182 | + info->got_offset); |
| 183 | *gotent = relocation; | 183 | *gotent = relocation; |
| @@ -255,8 +255,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, | |||
| 255 | */ | 255 | */ |
| 256 | pr_debug("GOTPC: PC=0x%x, got_offset=0x%lx, core=0x%p\n", | 256 | pr_debug("GOTPC: PC=0x%x, got_offset=0x%lx, core=0x%p\n", |
| 257 | relocation, module->arch.got_offset, | 257 | relocation, module->arch.got_offset, |
| 258 | module->module_core); | 258 | module->core_layout.base); |
| 259 | relocation -= ((unsigned long)module->module_core | 259 | relocation -= ((unsigned long)module->core_layout.base |
| 260 | + module->arch.got_offset); | 260 | + module->arch.got_offset); |
| 261 | *location = relocation; | 261 | *location = relocation; |
| 262 | break; | 262 | break; |
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index b15933c31b2f..6ab0ae7d6535 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c | |||
| @@ -486,13 +486,13 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, | |||
| 486 | static inline int | 486 | static inline int |
| 487 | in_init (const struct module *mod, uint64_t addr) | 487 | in_init (const struct module *mod, uint64_t addr) |
| 488 | { | 488 | { |
| 489 | return addr - (uint64_t) mod->module_init < mod->init_size; | 489 | return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size; |
| 490 | } | 490 | } |
| 491 | 491 | ||
| 492 | static inline int | 492 | static inline int |
| 493 | in_core (const struct module *mod, uint64_t addr) | 493 | in_core (const struct module *mod, uint64_t addr) |
| 494 | { | 494 | { |
| 495 | return addr - (uint64_t) mod->module_core < mod->core_size; | 495 | return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size; |
| 496 | } | 496 | } |
| 497 | 497 | ||
| 498 | static inline int | 498 | static inline int |
| @@ -675,7 +675,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, | |||
| 675 | break; | 675 | break; |
| 676 | 676 | ||
| 677 | case RV_BDREL: | 677 | case RV_BDREL: |
| 678 | val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); | 678 | val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base); |
| 679 | break; | 679 | break; |
| 680 | 680 | ||
| 681 | case RV_LTV: | 681 | case RV_LTV: |
| @@ -810,15 +810,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind | |||
| 810 | * addresses have been selected... | 810 | * addresses have been selected... |
| 811 | */ | 811 | */ |
| 812 | uint64_t gp; | 812 | uint64_t gp; |
| 813 | if (mod->core_size > MAX_LTOFF) | 813 | if (mod->core_layout.size > MAX_LTOFF) |
| 814 | /* | 814 | /* |
| 815 | * This takes advantage of fact that SHF_ARCH_SMALL gets allocated | 815 | * This takes advantage of fact that SHF_ARCH_SMALL gets allocated |
| 816 | * at the end of the module. | 816 | * at the end of the module. |
| 817 | */ | 817 | */ |
| 818 | gp = mod->core_size - MAX_LTOFF / 2; | 818 | gp = mod->core_layout.size - MAX_LTOFF / 2; |
| 819 | else | 819 | else |
| 820 | gp = mod->core_size / 2; | 820 | gp = mod->core_layout.size / 2; |
| 821 | gp = (uint64_t) mod->module_core + ((gp + 7) & -8); | 821 | gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8); |
| 822 | mod->arch.gp = gp; | 822 | mod->arch.gp = gp; |
| 823 | DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); | 823 | DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); |
| 824 | } | 824 | } |
diff --git a/arch/metag/kernel/module.c b/arch/metag/kernel/module.c index 986331cd0a52..bb8dfba9a763 100644 --- a/arch/metag/kernel/module.c +++ b/arch/metag/kernel/module.c | |||
| @@ -176,8 +176,8 @@ static uint32_t do_plt_call(void *location, Elf32_Addr val, | |||
| 176 | tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3); | 176 | tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3); |
| 177 | 177 | ||
| 178 | /* Init, or core PLT? */ | 178 | /* Init, or core PLT? */ |
| 179 | if (location >= mod->module_core | 179 | if (location >= mod->core_layout.base |
| 180 | && location < mod->module_core + mod->core_size) | 180 | && location < mod->core_layout.base + mod->core_layout.size) |
| 181 | entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; | 181 | entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; |
| 182 | else | 182 | else |
| 183 | entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; | 183 | entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 9067b651c7a2..544ea21bfef9 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
| @@ -205,11 +205,11 @@ static void layout_sections(struct module *mod, const Elf_Ehdr *hdr, | |||
| 205 | || s->sh_entsize != ~0UL) | 205 | || s->sh_entsize != ~0UL) |
| 206 | continue; | 206 | continue; |
| 207 | s->sh_entsize = | 207 | s->sh_entsize = |
| 208 | get_offset((unsigned long *)&mod->core_size, s); | 208 | get_offset((unsigned long *)&mod->core_layout.size, s); |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | if (m == 0) | 211 | if (m == 0) |
| 212 | mod->core_text_size = mod->core_size; | 212 | mod->core_layout.text_size = mod->core_layout.size; |
| 213 | 213 | ||
| 214 | } | 214 | } |
| 215 | } | 215 | } |
| @@ -641,7 +641,7 @@ static int vpe_elfload(struct vpe *v) | |||
| 641 | layout_sections(&mod, hdr, sechdrs, secstrings); | 641 | layout_sections(&mod, hdr, sechdrs, secstrings); |
| 642 | } | 642 | } |
| 643 | 643 | ||
| 644 | v->load_addr = alloc_progmem(mod.core_size); | 644 | v->load_addr = alloc_progmem(mod.core_layout.size); |
| 645 | if (!v->load_addr) | 645 | if (!v->load_addr) |
| 646 | return -ENOMEM; | 646 | return -ENOMEM; |
| 647 | 647 | ||
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 3c63a820fcda..b9d75d9fa9ac 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
| @@ -42,9 +42,9 @@ | |||
| 42 | * We are not doing SEGREL32 handling correctly. According to the ABI, we | 42 | * We are not doing SEGREL32 handling correctly. According to the ABI, we |
| 43 | * should do a value offset, like this: | 43 | * should do a value offset, like this: |
| 44 | * if (in_init(me, (void *)val)) | 44 | * if (in_init(me, (void *)val)) |
| 45 | * val -= (uint32_t)me->module_init; | 45 | * val -= (uint32_t)me->init_layout.base; |
| 46 | * else | 46 | * else |
| 47 | * val -= (uint32_t)me->module_core; | 47 | * val -= (uint32_t)me->core_layout.base; |
| 48 | * However, SEGREL32 is used only for PARISC unwind entries, and we want | 48 | * However, SEGREL32 is used only for PARISC unwind entries, and we want |
| 49 | * those entries to have an absolute address, and not just an offset. | 49 | * those entries to have an absolute address, and not just an offset. |
| 50 | * | 50 | * |
| @@ -100,14 +100,14 @@ | |||
| 100 | * or init pieces the location is */ | 100 | * or init pieces the location is */ |
| 101 | static inline int in_init(struct module *me, void *loc) | 101 | static inline int in_init(struct module *me, void *loc) |
| 102 | { | 102 | { |
| 103 | return (loc >= me->module_init && | 103 | return (loc >= me->init_layout.base && |
| 104 | loc <= (me->module_init + me->init_size)); | 104 | loc <= (me->init_layout.base + me->init_layout.size)); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static inline int in_core(struct module *me, void *loc) | 107 | static inline int in_core(struct module *me, void *loc) |
| 108 | { | 108 | { |
| 109 | return (loc >= me->module_core && | 109 | return (loc >= me->core_layout.base && |
| 110 | loc <= (me->module_core + me->core_size)); | 110 | loc <= (me->core_layout.base + me->core_layout.size)); |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static inline int in_local(struct module *me, void *loc) | 113 | static inline int in_local(struct module *me, void *loc) |
| @@ -367,13 +367,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, | |||
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | /* align things a bit */ | 369 | /* align things a bit */ |
| 370 | me->core_size = ALIGN(me->core_size, 16); | 370 | me->core_layout.size = ALIGN(me->core_layout.size, 16); |
| 371 | me->arch.got_offset = me->core_size; | 371 | me->arch.got_offset = me->core_layout.size; |
| 372 | me->core_size += gots * sizeof(struct got_entry); | 372 | me->core_layout.size += gots * sizeof(struct got_entry); |
| 373 | 373 | ||
| 374 | me->core_size = ALIGN(me->core_size, 16); | 374 | me->core_layout.size = ALIGN(me->core_layout.size, 16); |
| 375 | me->arch.fdesc_offset = me->core_size; | 375 | me->arch.fdesc_offset = me->core_layout.size; |
| 376 | me->core_size += fdescs * sizeof(Elf_Fdesc); | 376 | me->core_layout.size += fdescs * sizeof(Elf_Fdesc); |
| 377 | 377 | ||
| 378 | me->arch.got_max = gots; | 378 | me->arch.got_max = gots; |
| 379 | me->arch.fdesc_max = fdescs; | 379 | me->arch.fdesc_max = fdescs; |
| @@ -391,7 +391,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) | |||
| 391 | 391 | ||
| 392 | BUG_ON(value == 0); | 392 | BUG_ON(value == 0); |
| 393 | 393 | ||
| 394 | got = me->module_core + me->arch.got_offset; | 394 | got = me->core_layout.base + me->arch.got_offset; |
| 395 | for (i = 0; got[i].addr; i++) | 395 | for (i = 0; got[i].addr; i++) |
| 396 | if (got[i].addr == value) | 396 | if (got[i].addr == value) |
| 397 | goto out; | 397 | goto out; |
| @@ -409,7 +409,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) | |||
| 409 | #ifdef CONFIG_64BIT | 409 | #ifdef CONFIG_64BIT |
| 410 | static Elf_Addr get_fdesc(struct module *me, unsigned long value) | 410 | static Elf_Addr get_fdesc(struct module *me, unsigned long value) |
| 411 | { | 411 | { |
| 412 | Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; | 412 | Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset; |
| 413 | 413 | ||
| 414 | if (!value) { | 414 | if (!value) { |
| 415 | printk(KERN_ERR "%s: zero OPD requested!\n", me->name); | 415 | printk(KERN_ERR "%s: zero OPD requested!\n", me->name); |
| @@ -427,7 +427,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value) | |||
| 427 | 427 | ||
| 428 | /* Create new one */ | 428 | /* Create new one */ |
| 429 | fdesc->addr = value; | 429 | fdesc->addr = value; |
| 430 | fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; | 430 | fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; |
| 431 | return (Elf_Addr)fdesc; | 431 | return (Elf_Addr)fdesc; |
| 432 | } | 432 | } |
| 433 | #endif /* CONFIG_64BIT */ | 433 | #endif /* CONFIG_64BIT */ |
| @@ -839,7 +839,7 @@ register_unwind_table(struct module *me, | |||
| 839 | 839 | ||
| 840 | table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; | 840 | table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; |
| 841 | end = table + sechdrs[me->arch.unwind_section].sh_size; | 841 | end = table + sechdrs[me->arch.unwind_section].sh_size; |
| 842 | gp = (Elf_Addr)me->module_core + me->arch.got_offset; | 842 | gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; |
| 843 | 843 | ||
| 844 | DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", | 844 | DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", |
| 845 | me->arch.unwind_section, table, end, gp); | 845 | me->arch.unwind_section, table, end, gp); |
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index c94d2e018d84..2c01665eb410 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c | |||
| @@ -188,8 +188,8 @@ static uint32_t do_plt_call(void *location, | |||
| 188 | 188 | ||
| 189 | pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); | 189 | pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); |
| 190 | /* Init, or core PLT? */ | 190 | /* Init, or core PLT? */ |
| 191 | if (location >= mod->module_core | 191 | if (location >= mod->core_layout.base |
| 192 | && location < mod->module_core + mod->core_size) | 192 | && location < mod->core_layout.base + mod->core_layout.size) |
| 193 | entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; | 193 | entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; |
| 194 | else | 194 | else |
| 195 | entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; | 195 | entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; |
| @@ -296,7 +296,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
| 296 | } | 296 | } |
| 297 | #ifdef CONFIG_DYNAMIC_FTRACE | 297 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 298 | module->arch.tramp = | 298 | module->arch.tramp = |
| 299 | do_plt_call(module->module_core, | 299 | do_plt_call(module->core_layout.base, |
| 300 | (unsigned long)ftrace_caller, | 300 | (unsigned long)ftrace_caller, |
| 301 | sechdrs, module); | 301 | sechdrs, module); |
| 302 | #endif | 302 | #endif |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 0c1a679314dd..7873e171457c 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
| @@ -159,11 +159,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
| 159 | 159 | ||
| 160 | /* Increase core size by size of got & plt and set start | 160 | /* Increase core size by size of got & plt and set start |
| 161 | offsets for got and plt. */ | 161 | offsets for got and plt. */ |
| 162 | me->core_size = ALIGN(me->core_size, 4); | 162 | me->core_layout.size = ALIGN(me->core_layout.size, 4); |
| 163 | me->arch.got_offset = me->core_size; | 163 | me->arch.got_offset = me->core_layout.size; |
| 164 | me->core_size += me->arch.got_size; | 164 | me->core_layout.size += me->arch.got_size; |
| 165 | me->arch.plt_offset = me->core_size; | 165 | me->arch.plt_offset = me->core_layout.size; |
| 166 | me->core_size += me->arch.plt_size; | 166 | me->core_layout.size += me->arch.plt_size; |
| 167 | return 0; | 167 | return 0; |
| 168 | } | 168 | } |
| 169 | 169 | ||
| @@ -279,7 +279,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 279 | if (info->got_initialized == 0) { | 279 | if (info->got_initialized == 0) { |
| 280 | Elf_Addr *gotent; | 280 | Elf_Addr *gotent; |
| 281 | 281 | ||
| 282 | gotent = me->module_core + me->arch.got_offset + | 282 | gotent = me->core_layout.base + me->arch.got_offset + |
| 283 | info->got_offset; | 283 | info->got_offset; |
| 284 | *gotent = val; | 284 | *gotent = val; |
| 285 | info->got_initialized = 1; | 285 | info->got_initialized = 1; |
| @@ -302,7 +302,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 302 | rc = apply_rela_bits(loc, val, 0, 64, 0); | 302 | rc = apply_rela_bits(loc, val, 0, 64, 0); |
| 303 | else if (r_type == R_390_GOTENT || | 303 | else if (r_type == R_390_GOTENT || |
| 304 | r_type == R_390_GOTPLTENT) { | 304 | r_type == R_390_GOTPLTENT) { |
| 305 | val += (Elf_Addr) me->module_core - loc; | 305 | val += (Elf_Addr) me->core_layout.base - loc; |
| 306 | rc = apply_rela_bits(loc, val, 1, 32, 1); | 306 | rc = apply_rela_bits(loc, val, 1, 32, 1); |
| 307 | } | 307 | } |
| 308 | break; | 308 | break; |
| @@ -315,7 +315,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 315 | case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ | 315 | case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ |
| 316 | if (info->plt_initialized == 0) { | 316 | if (info->plt_initialized == 0) { |
| 317 | unsigned int *ip; | 317 | unsigned int *ip; |
| 318 | ip = me->module_core + me->arch.plt_offset + | 318 | ip = me->core_layout.base + me->arch.plt_offset + |
| 319 | info->plt_offset; | 319 | info->plt_offset; |
| 320 | ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ | 320 | ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ |
| 321 | ip[1] = 0x100a0004; | 321 | ip[1] = 0x100a0004; |
| @@ -334,7 +334,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 334 | val - loc + 0xffffUL < 0x1ffffeUL) || | 334 | val - loc + 0xffffUL < 0x1ffffeUL) || |
| 335 | (r_type == R_390_PLT32DBL && | 335 | (r_type == R_390_PLT32DBL && |
| 336 | val - loc + 0xffffffffULL < 0x1fffffffeULL))) | 336 | val - loc + 0xffffffffULL < 0x1fffffffeULL))) |
| 337 | val = (Elf_Addr) me->module_core + | 337 | val = (Elf_Addr) me->core_layout.base + |
| 338 | me->arch.plt_offset + | 338 | me->arch.plt_offset + |
| 339 | info->plt_offset; | 339 | info->plt_offset; |
| 340 | val += rela->r_addend - loc; | 340 | val += rela->r_addend - loc; |
| @@ -356,7 +356,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 356 | case R_390_GOTOFF32: /* 32 bit offset to GOT. */ | 356 | case R_390_GOTOFF32: /* 32 bit offset to GOT. */ |
| 357 | case R_390_GOTOFF64: /* 64 bit offset to GOT. */ | 357 | case R_390_GOTOFF64: /* 64 bit offset to GOT. */ |
| 358 | val = val + rela->r_addend - | 358 | val = val + rela->r_addend - |
| 359 | ((Elf_Addr) me->module_core + me->arch.got_offset); | 359 | ((Elf_Addr) me->core_layout.base + me->arch.got_offset); |
| 360 | if (r_type == R_390_GOTOFF16) | 360 | if (r_type == R_390_GOTOFF16) |
| 361 | rc = apply_rela_bits(loc, val, 0, 16, 0); | 361 | rc = apply_rela_bits(loc, val, 0, 16, 0); |
| 362 | else if (r_type == R_390_GOTOFF32) | 362 | else if (r_type == R_390_GOTOFF32) |
| @@ -366,7 +366,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 366 | break; | 366 | break; |
| 367 | case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ | 367 | case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ |
| 368 | case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ | 368 | case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ |
| 369 | val = (Elf_Addr) me->module_core + me->arch.got_offset + | 369 | val = (Elf_Addr) me->core_layout.base + me->arch.got_offset + |
| 370 | rela->r_addend - loc; | 370 | rela->r_addend - loc; |
| 371 | if (r_type == R_390_GOTPC) | 371 | if (r_type == R_390_GOTPC) |
| 372 | rc = apply_rela_bits(loc, val, 1, 32, 0); | 372 | rc = apply_rela_bits(loc, val, 1, 32, 0); |
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index d1d35ccffed3..92fc1a51f994 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c | |||
| @@ -20,8 +20,6 @@ | |||
| 20 | 20 | ||
| 21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
| 23 | #include <asm/cacheflush.h> | ||
| 24 | #include <asm/page_types.h> | ||
| 25 | #include <asm/elf.h> | 23 | #include <asm/elf.h> |
| 26 | #include <asm/livepatch.h> | 24 | #include <asm/livepatch.h> |
| 27 | 25 | ||
| @@ -38,11 +36,10 @@ | |||
| 38 | int klp_write_module_reloc(struct module *mod, unsigned long type, | 36 | int klp_write_module_reloc(struct module *mod, unsigned long type, |
| 39 | unsigned long loc, unsigned long value) | 37 | unsigned long loc, unsigned long value) |
| 40 | { | 38 | { |
| 41 | int ret, numpages, size = 4; | 39 | size_t size = 4; |
| 42 | bool readonly; | ||
| 43 | unsigned long val; | 40 | unsigned long val; |
| 44 | unsigned long core = (unsigned long)mod->module_core; | 41 | unsigned long core = (unsigned long)mod->core_layout.base; |
| 45 | unsigned long core_size = mod->core_size; | 42 | unsigned long core_size = mod->core_layout.size; |
| 46 | 43 | ||
| 47 | switch (type) { | 44 | switch (type) { |
| 48 | case R_X86_64_NONE: | 45 | case R_X86_64_NONE: |
| @@ -69,23 +66,5 @@ int klp_write_module_reloc(struct module *mod, unsigned long type, | |||
| 69 | /* loc does not point to any symbol inside the module */ | 66 | /* loc does not point to any symbol inside the module */ |
| 70 | return -EINVAL; | 67 | return -EINVAL; |
| 71 | 68 | ||
| 72 | readonly = false; | 69 | return probe_kernel_write((void *)loc, &val, size); |
| 73 | |||
| 74 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX | ||
| 75 | if (loc < core + mod->core_ro_size) | ||
| 76 | readonly = true; | ||
| 77 | #endif | ||
| 78 | |||
| 79 | /* determine if the relocation spans a page boundary */ | ||
| 80 | numpages = ((loc & PAGE_MASK) == ((loc + size) & PAGE_MASK)) ? 1 : 2; | ||
| 81 | |||
| 82 | if (readonly) | ||
| 83 | set_memory_rw(loc & PAGE_MASK, numpages); | ||
| 84 | |||
| 85 | ret = probe_kernel_write((void *)loc, &val, size); | ||
| 86 | |||
| 87 | if (readonly) | ||
| 88 | set_memory_ro(loc & PAGE_MASK, numpages); | ||
| 89 | |||
| 90 | return ret; | ||
| 91 | } | 70 | } |
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 31db7a05dd36..a8828652f794 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h | |||
| @@ -37,8 +37,9 @@ enum klp_state { | |||
| 37 | * struct klp_func - function structure for live patching | 37 | * struct klp_func - function structure for live patching |
| 38 | * @old_name: name of the function to be patched | 38 | * @old_name: name of the function to be patched |
| 39 | * @new_func: pointer to the patched function code | 39 | * @new_func: pointer to the patched function code |
| 40 | * @old_addr: a hint conveying at what address the old function | 40 | * @old_sympos: a hint indicating which symbol position the old function |
| 41 | * can be found (optional, vmlinux patches only) | 41 | * can be found (optional) |
| 42 | * @old_addr: the address of the function being patched | ||
| 42 | * @kobj: kobject for sysfs resources | 43 | * @kobj: kobject for sysfs resources |
| 43 | * @state: tracks function-level patch application state | 44 | * @state: tracks function-level patch application state |
| 44 | * @stack_node: list node for klp_ops func_stack list | 45 | * @stack_node: list node for klp_ops func_stack list |
| @@ -48,16 +49,16 @@ struct klp_func { | |||
| 48 | const char *old_name; | 49 | const char *old_name; |
| 49 | void *new_func; | 50 | void *new_func; |
| 50 | /* | 51 | /* |
| 51 | * The old_addr field is optional and can be used to resolve | 52 | * The old_sympos field is optional and can be used to resolve |
| 52 | * duplicate symbol names in the vmlinux object. If this | 53 | * duplicate symbol names in livepatch objects. If this field is zero, |
| 53 | * information is not present, the symbol is located by name | 54 | * it is expected the symbol is unique, otherwise patching fails. If |
| 54 | * with kallsyms. If the name is not unique and old_addr is | 55 | * this value is greater than zero then that occurrence of the symbol |
| 55 | * not provided, the patch application fails as there is no | 56 | * in kallsyms for the given object is used. |
| 56 | * way to resolve the ambiguity. | ||
| 57 | */ | 57 | */ |
| 58 | unsigned long old_addr; | 58 | unsigned long old_sympos; |
| 59 | 59 | ||
| 60 | /* internal */ | 60 | /* internal */ |
| 61 | unsigned long old_addr; | ||
| 61 | struct kobject kobj; | 62 | struct kobject kobj; |
| 62 | enum klp_state state; | 63 | enum klp_state state; |
| 63 | struct list_head stack_node; | 64 | struct list_head stack_node; |
| @@ -66,8 +67,7 @@ struct klp_func { | |||
| 66 | /** | 67 | /** |
| 67 | * struct klp_reloc - relocation structure for live patching | 68 | * struct klp_reloc - relocation structure for live patching |
| 68 | * @loc: address where the relocation will be written | 69 | * @loc: address where the relocation will be written |
| 69 | * @val: address of the referenced symbol (optional, | 70 | * @sympos: position in kallsyms to disambiguate symbols (optional) |
| 70 | * vmlinux patches only) | ||
| 71 | * @type: ELF relocation type | 71 | * @type: ELF relocation type |
| 72 | * @name: name of the referenced symbol (for lookup/verification) | 72 | * @name: name of the referenced symbol (for lookup/verification) |
| 73 | * @addend: offset from the referenced symbol | 73 | * @addend: offset from the referenced symbol |
| @@ -75,7 +75,7 @@ struct klp_func { | |||
| 75 | */ | 75 | */ |
| 76 | struct klp_reloc { | 76 | struct klp_reloc { |
| 77 | unsigned long loc; | 77 | unsigned long loc; |
| 78 | unsigned long val; | 78 | unsigned long sympos; |
| 79 | unsigned long type; | 79 | unsigned long type; |
| 80 | const char *name; | 80 | const char *name; |
| 81 | int addend; | 81 | int addend; |
diff --git a/include/linux/module.h b/include/linux/module.h index 3a19c79918e0..4560d8f1545d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -302,6 +302,28 @@ struct mod_tree_node { | |||
| 302 | struct latch_tree_node node; | 302 | struct latch_tree_node node; |
| 303 | }; | 303 | }; |
| 304 | 304 | ||
| 305 | struct module_layout { | ||
| 306 | /* The actual code + data. */ | ||
| 307 | void *base; | ||
| 308 | /* Total size. */ | ||
| 309 | unsigned int size; | ||
| 310 | /* The size of the executable code. */ | ||
| 311 | unsigned int text_size; | ||
| 312 | /* Size of RO section of the module (text+rodata) */ | ||
| 313 | unsigned int ro_size; | ||
| 314 | |||
| 315 | #ifdef CONFIG_MODULES_TREE_LOOKUP | ||
| 316 | struct mod_tree_node mtn; | ||
| 317 | #endif | ||
| 318 | }; | ||
| 319 | |||
| 320 | #ifdef CONFIG_MODULES_TREE_LOOKUP | ||
| 321 | /* Only touch one cacheline for common rbtree-for-core-layout case. */ | ||
| 322 | #define __module_layout_align ____cacheline_aligned | ||
| 323 | #else | ||
| 324 | #define __module_layout_align | ||
| 325 | #endif | ||
| 326 | |||
| 305 | struct module { | 327 | struct module { |
| 306 | enum module_state state; | 328 | enum module_state state; |
| 307 | 329 | ||
| @@ -366,37 +388,9 @@ struct module { | |||
| 366 | /* Startup function. */ | 388 | /* Startup function. */ |
| 367 | int (*init)(void); | 389 | int (*init)(void); |
| 368 | 390 | ||
| 369 | /* | 391 | /* Core layout: rbtree is accessed frequently, so keep together. */ |
| 370 | * If this is non-NULL, vfree() after init() returns. | 392 | struct module_layout core_layout __module_layout_align; |
| 371 | * | 393 | struct module_layout init_layout; |
| 372 | * Cacheline align here, such that: | ||
| 373 | * module_init, module_core, init_size, core_size, | ||
| 374 | * init_text_size, core_text_size and mtn_core::{mod,node[0]} | ||
| 375 | * are on the same cacheline. | ||
| 376 | */ | ||
| 377 | void *module_init ____cacheline_aligned; | ||
| 378 | |||
| 379 | /* Here is the actual code + data, vfree'd on unload. */ | ||
| 380 | void *module_core; | ||
| 381 | |||
| 382 | /* Here are the sizes of the init and core sections */ | ||
| 383 | unsigned int init_size, core_size; | ||
| 384 | |||
| 385 | /* The size of the executable code in each section. */ | ||
| 386 | unsigned int init_text_size, core_text_size; | ||
| 387 | |||
| 388 | #ifdef CONFIG_MODULES_TREE_LOOKUP | ||
| 389 | /* | ||
| 390 | * We want mtn_core::{mod,node[0]} to be in the same cacheline as the | ||
| 391 | * above entries such that a regular lookup will only touch one | ||
| 392 | * cacheline. | ||
| 393 | */ | ||
| 394 | struct mod_tree_node mtn_core; | ||
| 395 | struct mod_tree_node mtn_init; | ||
| 396 | #endif | ||
| 397 | |||
| 398 | /* Size of RO sections of the module (text+rodata) */ | ||
| 399 | unsigned int init_ro_size, core_ro_size; | ||
| 400 | 394 | ||
| 401 | /* Arch-specific module values */ | 395 | /* Arch-specific module values */ |
| 402 | struct mod_arch_specific arch; | 396 | struct mod_arch_specific arch; |
| @@ -505,15 +499,15 @@ bool is_module_text_address(unsigned long addr); | |||
| 505 | static inline bool within_module_core(unsigned long addr, | 499 | static inline bool within_module_core(unsigned long addr, |
| 506 | const struct module *mod) | 500 | const struct module *mod) |
| 507 | { | 501 | { |
| 508 | return (unsigned long)mod->module_core <= addr && | 502 | return (unsigned long)mod->core_layout.base <= addr && |
| 509 | addr < (unsigned long)mod->module_core + mod->core_size; | 503 | addr < (unsigned long)mod->core_layout.base + mod->core_layout.size; |
| 510 | } | 504 | } |
| 511 | 505 | ||
| 512 | static inline bool within_module_init(unsigned long addr, | 506 | static inline bool within_module_init(unsigned long addr, |
| 513 | const struct module *mod) | 507 | const struct module *mod) |
| 514 | { | 508 | { |
| 515 | return (unsigned long)mod->module_init <= addr && | 509 | return (unsigned long)mod->init_layout.base <= addr && |
| 516 | addr < (unsigned long)mod->module_init + mod->init_size; | 510 | addr < (unsigned long)mod->init_layout.base + mod->init_layout.size; |
| 517 | } | 511 | } |
| 518 | 512 | ||
| 519 | static inline bool within_module(unsigned long addr, const struct module *mod) | 513 | static inline bool within_module(unsigned long addr, const struct module *mod) |
| @@ -768,9 +762,13 @@ extern int module_sysfs_initialized; | |||
| 768 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX | 762 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX |
| 769 | extern void set_all_modules_text_rw(void); | 763 | extern void set_all_modules_text_rw(void); |
| 770 | extern void set_all_modules_text_ro(void); | 764 | extern void set_all_modules_text_ro(void); |
| 765 | extern void module_enable_ro(const struct module *mod); | ||
| 766 | extern void module_disable_ro(const struct module *mod); | ||
| 771 | #else | 767 | #else |
| 772 | static inline void set_all_modules_text_rw(void) { } | 768 | static inline void set_all_modules_text_rw(void) { } |
| 773 | static inline void set_all_modules_text_ro(void) { } | 769 | static inline void set_all_modules_text_ro(void) { } |
| 770 | static inline void module_enable_ro(const struct module *mod) { } | ||
| 771 | static inline void module_disable_ro(const struct module *mod) { } | ||
| 774 | #endif | 772 | #endif |
| 775 | 773 | ||
| 776 | #ifdef CONFIG_GENERIC_BUG | 774 | #ifdef CONFIG_GENERIC_BUG |
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 4121345498e0..2a20c0dfdafc 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
| @@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv) | |||
| 2021 | continue; | 2021 | continue; |
| 2022 | 2022 | ||
| 2023 | kdb_printf("%-20s%8u 0x%p ", mod->name, | 2023 | kdb_printf("%-20s%8u 0x%p ", mod->name, |
| 2024 | mod->core_size, (void *)mod); | 2024 | mod->core_layout.size, (void *)mod); |
| 2025 | #ifdef CONFIG_MODULE_UNLOAD | 2025 | #ifdef CONFIG_MODULE_UNLOAD |
| 2026 | kdb_printf("%4d ", module_refcount(mod)); | 2026 | kdb_printf("%4d ", module_refcount(mod)); |
| 2027 | #endif | 2027 | #endif |
| @@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv) | |||
| 2031 | kdb_printf(" (Loading)"); | 2031 | kdb_printf(" (Loading)"); |
| 2032 | else | 2032 | else |
| 2033 | kdb_printf(" (Live)"); | 2033 | kdb_printf(" (Live)"); |
| 2034 | kdb_printf(" 0x%p", mod->module_core); | 2034 | kdb_printf(" 0x%p", mod->core_layout.base); |
| 2035 | 2035 | ||
| 2036 | #ifdef CONFIG_MODULE_UNLOAD | 2036 | #ifdef CONFIG_MODULE_UNLOAD |
| 2037 | { | 2037 | { |
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c index 7080ae1eb6c1..2f9df37940a0 100644 --- a/kernel/gcov/base.c +++ b/kernel/gcov/base.c | |||
| @@ -123,11 +123,6 @@ void gcov_enable_events(void) | |||
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | #ifdef CONFIG_MODULES | 125 | #ifdef CONFIG_MODULES |
| 126 | static inline int within(void *addr, void *start, unsigned long size) | ||
| 127 | { | ||
| 128 | return ((addr >= start) && (addr < start + size)); | ||
| 129 | } | ||
| 130 | |||
| 131 | /* Update list and generate events when modules are unloaded. */ | 126 | /* Update list and generate events when modules are unloaded. */ |
| 132 | static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, | 127 | static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, |
| 133 | void *data) | 128 | void *data) |
| @@ -142,7 +137,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, | |||
| 142 | 137 | ||
| 143 | /* Remove entries located in module from linked list. */ | 138 | /* Remove entries located in module from linked list. */ |
| 144 | while ((info = gcov_info_next(info))) { | 139 | while ((info = gcov_info_next(info))) { |
| 145 | if (within(info, mod->module_core, mod->core_size)) { | 140 | if (within_module((unsigned long)info, mod)) { |
| 146 | gcov_info_unlink(prev, info); | 141 | gcov_info_unlink(prev, info); |
| 147 | if (gcov_events_enabled) | 142 | if (gcov_events_enabled) |
| 148 | gcov_event(GCOV_REMOVE, info); | 143 | gcov_event(GCOV_REMOVE, info); |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index db545cbcdb89..bc2c85c064c1 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
| 29 | #include <linux/kallsyms.h> | 29 | #include <linux/kallsyms.h> |
| 30 | #include <linux/livepatch.h> | 30 | #include <linux/livepatch.h> |
| 31 | #include <asm/cacheflush.h> | ||
| 31 | 32 | ||
| 32 | /** | 33 | /** |
| 33 | * struct klp_ops - structure for tracking registered ftrace ops structs | 34 | * struct klp_ops - structure for tracking registered ftrace ops structs |
| @@ -135,13 +136,8 @@ struct klp_find_arg { | |||
| 135 | const char *objname; | 136 | const char *objname; |
| 136 | const char *name; | 137 | const char *name; |
| 137 | unsigned long addr; | 138 | unsigned long addr; |
| 138 | /* | ||
| 139 | * If count == 0, the symbol was not found. If count == 1, a unique | ||
| 140 | * match was found and addr is set. If count > 1, there is | ||
| 141 | * unresolvable ambiguity among "count" number of symbols with the same | ||
| 142 | * name in the same object. | ||
| 143 | */ | ||
| 144 | unsigned long count; | 139 | unsigned long count; |
| 140 | unsigned long pos; | ||
| 145 | }; | 141 | }; |
| 146 | 142 | ||
| 147 | static int klp_find_callback(void *data, const char *name, | 143 | static int klp_find_callback(void *data, const char *name, |
| @@ -158,37 +154,48 @@ static int klp_find_callback(void *data, const char *name, | |||
| 158 | if (args->objname && strcmp(args->objname, mod->name)) | 154 | if (args->objname && strcmp(args->objname, mod->name)) |
| 159 | return 0; | 155 | return 0; |
| 160 | 156 | ||
| 161 | /* | ||
| 162 | * args->addr might be overwritten if another match is found | ||
| 163 | * but klp_find_object_symbol() handles this and only returns the | ||
| 164 | * addr if count == 1. | ||
| 165 | */ | ||
| 166 | args->addr = addr; | 157 | args->addr = addr; |
| 167 | args->count++; | 158 | args->count++; |
| 168 | 159 | ||
| 160 | /* | ||
| 161 | * Finish the search when the symbol is found for the desired position | ||
| 162 | * or the position is not defined for a non-unique symbol. | ||
| 163 | */ | ||
| 164 | if ((args->pos && (args->count == args->pos)) || | ||
| 165 | (!args->pos && (args->count > 1))) | ||
| 166 | return 1; | ||
| 167 | |||
| 169 | return 0; | 168 | return 0; |
| 170 | } | 169 | } |
| 171 | 170 | ||
| 172 | static int klp_find_object_symbol(const char *objname, const char *name, | 171 | static int klp_find_object_symbol(const char *objname, const char *name, |
| 173 | unsigned long *addr) | 172 | unsigned long sympos, unsigned long *addr) |
| 174 | { | 173 | { |
| 175 | struct klp_find_arg args = { | 174 | struct klp_find_arg args = { |
| 176 | .objname = objname, | 175 | .objname = objname, |
| 177 | .name = name, | 176 | .name = name, |
| 178 | .addr = 0, | 177 | .addr = 0, |
| 179 | .count = 0 | 178 | .count = 0, |
| 179 | .pos = sympos, | ||
| 180 | }; | 180 | }; |
| 181 | 181 | ||
| 182 | mutex_lock(&module_mutex); | 182 | mutex_lock(&module_mutex); |
| 183 | kallsyms_on_each_symbol(klp_find_callback, &args); | 183 | kallsyms_on_each_symbol(klp_find_callback, &args); |
| 184 | mutex_unlock(&module_mutex); | 184 | mutex_unlock(&module_mutex); |
| 185 | 185 | ||
| 186 | if (args.count == 0) | 186 | /* |
| 187 | * Ensure an address was found. If sympos is 0, ensure symbol is unique; | ||
| 188 | * otherwise ensure the symbol position count matches sympos. | ||
| 189 | */ | ||
| 190 | if (args.addr == 0) | ||
| 187 | pr_err("symbol '%s' not found in symbol table\n", name); | 191 | pr_err("symbol '%s' not found in symbol table\n", name); |
| 188 | else if (args.count > 1) | 192 | else if (args.count > 1 && sympos == 0) { |
| 189 | pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n", | 193 | pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n", |
| 190 | args.count, name, objname); | 194 | args.count, name, objname); |
| 191 | else { | 195 | } else if (sympos != args.count && sympos > 0) { |
| 196 | pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n", | ||
| 197 | sympos, name, objname ? objname : "vmlinux"); | ||
| 198 | } else { | ||
| 192 | *addr = args.addr; | 199 | *addr = args.addr; |
| 193 | return 0; | 200 | return 0; |
| 194 | } | 201 | } |
| @@ -197,66 +204,6 @@ static int klp_find_object_symbol(const char *objname, const char *name, | |||
| 197 | return -EINVAL; | 204 | return -EINVAL; |
| 198 | } | 205 | } |
| 199 | 206 | ||
| 200 | struct klp_verify_args { | ||
| 201 | const char *name; | ||
| 202 | const unsigned long addr; | ||
| 203 | }; | ||
| 204 | |||
| 205 | static int klp_verify_callback(void *data, const char *name, | ||
| 206 | struct module *mod, unsigned long addr) | ||
| 207 | { | ||
| 208 | struct klp_verify_args *args = data; | ||
| 209 | |||
| 210 | if (!mod && | ||
| 211 | !strcmp(args->name, name) && | ||
| 212 | args->addr == addr) | ||
| 213 | return 1; | ||
| 214 | |||
| 215 | return 0; | ||
| 216 | } | ||
| 217 | |||
| 218 | static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr) | ||
| 219 | { | ||
| 220 | struct klp_verify_args args = { | ||
| 221 | .name = name, | ||
| 222 | .addr = addr, | ||
| 223 | }; | ||
| 224 | int ret; | ||
| 225 | |||
| 226 | mutex_lock(&module_mutex); | ||
| 227 | ret = kallsyms_on_each_symbol(klp_verify_callback, &args); | ||
| 228 | mutex_unlock(&module_mutex); | ||
| 229 | |||
| 230 | if (!ret) { | ||
| 231 | pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n", | ||
| 232 | name, addr); | ||
| 233 | return -EINVAL; | ||
| 234 | } | ||
| 235 | |||
| 236 | return 0; | ||
| 237 | } | ||
| 238 | |||
| 239 | static int klp_find_verify_func_addr(struct klp_object *obj, | ||
| 240 | struct klp_func *func) | ||
| 241 | { | ||
| 242 | int ret; | ||
| 243 | |||
| 244 | #if defined(CONFIG_RANDOMIZE_BASE) | ||
| 245 | /* If KASLR has been enabled, adjust old_addr accordingly */ | ||
| 246 | if (kaslr_enabled() && func->old_addr) | ||
| 247 | func->old_addr += kaslr_offset(); | ||
| 248 | #endif | ||
| 249 | |||
| 250 | if (!func->old_addr || klp_is_module(obj)) | ||
| 251 | ret = klp_find_object_symbol(obj->name, func->old_name, | ||
| 252 | &func->old_addr); | ||
| 253 | else | ||
| 254 | ret = klp_verify_vmlinux_symbol(func->old_name, | ||
| 255 | func->old_addr); | ||
| 256 | |||
| 257 | return ret; | ||
| 258 | } | ||
| 259 | |||
| 260 | /* | 207 | /* |
| 261 | * external symbols are located outside the parent object (where the parent | 208 | * external symbols are located outside the parent object (where the parent |
| 262 | * object is either vmlinux or the kmod being patched). | 209 | * object is either vmlinux or the kmod being patched). |
| @@ -276,14 +223,18 @@ static int klp_find_external_symbol(struct module *pmod, const char *name, | |||
| 276 | } | 223 | } |
| 277 | preempt_enable(); | 224 | preempt_enable(); |
| 278 | 225 | ||
| 279 | /* otherwise check if it's in another .o within the patch module */ | 226 | /* |
| 280 | return klp_find_object_symbol(pmod->name, name, addr); | 227 | * Check if it's in another .o within the patch module. This also |
| 228 | * checks that the external symbol is unique. | ||
| 229 | */ | ||
| 230 | return klp_find_object_symbol(pmod->name, name, 0, addr); | ||
| 281 | } | 231 | } |
| 282 | 232 | ||
| 283 | static int klp_write_object_relocations(struct module *pmod, | 233 | static int klp_write_object_relocations(struct module *pmod, |
| 284 | struct klp_object *obj) | 234 | struct klp_object *obj) |
| 285 | { | 235 | { |
| 286 | int ret; | 236 | int ret = 0; |
| 237 | unsigned long val; | ||
| 287 | struct klp_reloc *reloc; | 238 | struct klp_reloc *reloc; |
| 288 | 239 | ||
| 289 | if (WARN_ON(!klp_is_object_loaded(obj))) | 240 | if (WARN_ON(!klp_is_object_loaded(obj))) |
| @@ -292,41 +243,38 @@ static int klp_write_object_relocations(struct module *pmod, | |||
| 292 | if (WARN_ON(!obj->relocs)) | 243 | if (WARN_ON(!obj->relocs)) |
| 293 | return -EINVAL; | 244 | return -EINVAL; |
| 294 | 245 | ||
| 246 | module_disable_ro(pmod); | ||
| 247 | |||
| 295 | for (reloc = obj->relocs; reloc->name; reloc++) { | 248 | for (reloc = obj->relocs; reloc->name; reloc++) { |
| 296 | if (!klp_is_module(obj)) { | 249 | /* discover the address of the referenced symbol */ |
| 297 | 250 | if (reloc->external) { | |
| 298 | #if defined(CONFIG_RANDOMIZE_BASE) | 251 | if (reloc->sympos > 0) { |
| 299 | /* If KASLR has been enabled, adjust old value accordingly */ | 252 | pr_err("non-zero sympos for external reloc symbol '%s' is not supported\n", |
| 300 | if (kaslr_enabled()) | 253 | reloc->name); |
| 301 | reloc->val += kaslr_offset(); | 254 | ret = -EINVAL; |
| 302 | #endif | 255 | goto out; |
| 303 | ret = klp_verify_vmlinux_symbol(reloc->name, | 256 | } |
| 304 | reloc->val); | 257 | ret = klp_find_external_symbol(pmod, reloc->name, &val); |
| 305 | if (ret) | 258 | } else |
| 306 | return ret; | 259 | ret = klp_find_object_symbol(obj->name, |
| 307 | } else { | 260 | reloc->name, |
| 308 | /* module, reloc->val needs to be discovered */ | 261 | reloc->sympos, |
| 309 | if (reloc->external) | 262 | &val); |
| 310 | ret = klp_find_external_symbol(pmod, | 263 | if (ret) |
| 311 | reloc->name, | 264 | goto out; |
| 312 | &reloc->val); | 265 | |
| 313 | else | ||
| 314 | ret = klp_find_object_symbol(obj->mod->name, | ||
| 315 | reloc->name, | ||
| 316 | &reloc->val); | ||
| 317 | if (ret) | ||
| 318 | return ret; | ||
| 319 | } | ||
| 320 | ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc, | 266 | ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc, |
| 321 | reloc->val + reloc->addend); | 267 | val + reloc->addend); |
| 322 | if (ret) { | 268 | if (ret) { |
| 323 | pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n", | 269 | pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n", |
| 324 | reloc->name, reloc->val, ret); | 270 | reloc->name, val, ret); |
| 325 | return ret; | 271 | goto out; |
| 326 | } | 272 | } |
| 327 | } | 273 | } |
| 328 | 274 | ||
| 329 | return 0; | 275 | out: |
| 276 | module_enable_ro(pmod); | ||
| 277 | return ret; | ||
| 330 | } | 278 | } |
| 331 | 279 | ||
| 332 | static void notrace klp_ftrace_handler(unsigned long ip, | 280 | static void notrace klp_ftrace_handler(unsigned long ip, |
| @@ -593,7 +541,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch); | |||
| 593 | * /sys/kernel/livepatch/<patch> | 541 | * /sys/kernel/livepatch/<patch> |
| 594 | * /sys/kernel/livepatch/<patch>/enabled | 542 | * /sys/kernel/livepatch/<patch>/enabled |
| 595 | * /sys/kernel/livepatch/<patch>/<object> | 543 | * /sys/kernel/livepatch/<patch>/<object> |
| 596 | * /sys/kernel/livepatch/<patch>/<object>/<func> | 544 | * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> |
| 597 | */ | 545 | */ |
| 598 | 546 | ||
| 599 | static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, | 547 | static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, |
| @@ -738,8 +686,14 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) | |||
| 738 | INIT_LIST_HEAD(&func->stack_node); | 686 | INIT_LIST_HEAD(&func->stack_node); |
| 739 | func->state = KLP_DISABLED; | 687 | func->state = KLP_DISABLED; |
| 740 | 688 | ||
| 689 | /* The format for the sysfs directory is <function,sympos> where sympos | ||
| 690 | * is the nth occurrence of this symbol in kallsyms for the patched | ||
| 691 | * object. If the user selects 0 for old_sympos, then 1 will be used | ||
| 692 | * since a unique symbol will be the first occurrence. | ||
| 693 | */ | ||
| 741 | return kobject_init_and_add(&func->kobj, &klp_ktype_func, | 694 | return kobject_init_and_add(&func->kobj, &klp_ktype_func, |
| 742 | &obj->kobj, "%s", func->old_name); | 695 | &obj->kobj, "%s,%lu", func->old_name, |
| 696 | func->old_sympos ? func->old_sympos : 1); | ||
| 743 | } | 697 | } |
| 744 | 698 | ||
| 745 | /* parts of the initialization that is done only when the object is loaded */ | 699 | /* parts of the initialization that is done only when the object is loaded */ |
| @@ -756,7 +710,9 @@ static int klp_init_object_loaded(struct klp_patch *patch, | |||
| 756 | } | 710 | } |
| 757 | 711 | ||
| 758 | klp_for_each_func(obj, func) { | 712 | klp_for_each_func(obj, func) { |
| 759 | ret = klp_find_verify_func_addr(obj, func); | 713 | ret = klp_find_object_symbol(obj->name, func->old_name, |
| 714 | func->old_sympos, | ||
| 715 | &func->old_addr); | ||
| 760 | if (ret) | 716 | if (ret) |
| 761 | return ret; | 717 | return ret; |
| 762 | } | 718 | } |
diff --git a/kernel/module.c b/kernel/module.c index 38c7bd5583ff..8358f4697c0c 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -80,15 +80,6 @@ | |||
| 80 | # define debug_align(X) (X) | 80 | # define debug_align(X) (X) |
| 81 | #endif | 81 | #endif |
| 82 | 82 | ||
| 83 | /* | ||
| 84 | * Given BASE and SIZE this macro calculates the number of pages the | ||
| 85 | * memory regions occupies | ||
| 86 | */ | ||
| 87 | #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \ | ||
| 88 | (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \ | ||
| 89 | PFN_DOWN((unsigned long)BASE) + 1) \ | ||
| 90 | : (0UL)) | ||
| 91 | |||
| 92 | /* If this is set, the section belongs in the init part of the module */ | 83 | /* If this is set, the section belongs in the init part of the module */ |
| 93 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) | 84 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) |
| 94 | 85 | ||
| @@ -108,13 +99,6 @@ static LIST_HEAD(modules); | |||
| 108 | * Use a latched RB-tree for __module_address(); this allows us to use | 99 | * Use a latched RB-tree for __module_address(); this allows us to use |
| 109 | * RCU-sched lookups of the address from any context. | 100 | * RCU-sched lookups of the address from any context. |
| 110 | * | 101 | * |
| 111 | * Because modules have two address ranges: init and core, we need two | ||
| 112 | * latch_tree_nodes entries. Therefore we need the back-pointer from | ||
| 113 | * mod_tree_node. | ||
| 114 | * | ||
| 115 | * Because init ranges are short lived we mark them unlikely and have placed | ||
| 116 | * them outside the critical cacheline in struct module. | ||
| 117 | * | ||
| 118 | * This is conditional on PERF_EVENTS || TRACING because those can really hit | 102 | * This is conditional on PERF_EVENTS || TRACING because those can really hit |
| 119 | * __module_address() hard by doing a lot of stack unwinding; potentially from | 103 | * __module_address() hard by doing a lot of stack unwinding; potentially from |
| 120 | * NMI context. | 104 | * NMI context. |
| @@ -122,24 +106,16 @@ static LIST_HEAD(modules); | |||
| 122 | 106 | ||
| 123 | static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) | 107 | static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) |
| 124 | { | 108 | { |
| 125 | struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); | 109 | struct module_layout *layout = container_of(n, struct module_layout, mtn.node); |
| 126 | struct module *mod = mtn->mod; | ||
| 127 | 110 | ||
| 128 | if (unlikely(mtn == &mod->mtn_init)) | 111 | return (unsigned long)layout->base; |
| 129 | return (unsigned long)mod->module_init; | ||
| 130 | |||
| 131 | return (unsigned long)mod->module_core; | ||
| 132 | } | 112 | } |
| 133 | 113 | ||
| 134 | static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) | 114 | static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) |
| 135 | { | 115 | { |
| 136 | struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); | 116 | struct module_layout *layout = container_of(n, struct module_layout, mtn.node); |
| 137 | struct module *mod = mtn->mod; | ||
| 138 | |||
| 139 | if (unlikely(mtn == &mod->mtn_init)) | ||
| 140 | return (unsigned long)mod->init_size; | ||
| 141 | 117 | ||
| 142 | return (unsigned long)mod->core_size; | 118 | return (unsigned long)layout->size; |
| 143 | } | 119 | } |
| 144 | 120 | ||
| 145 | static __always_inline bool | 121 | static __always_inline bool |
| @@ -197,23 +173,23 @@ static void __mod_tree_remove(struct mod_tree_node *node) | |||
| 197 | */ | 173 | */ |
| 198 | static void mod_tree_insert(struct module *mod) | 174 | static void mod_tree_insert(struct module *mod) |
| 199 | { | 175 | { |
| 200 | mod->mtn_core.mod = mod; | 176 | mod->core_layout.mtn.mod = mod; |
| 201 | mod->mtn_init.mod = mod; | 177 | mod->init_layout.mtn.mod = mod; |
| 202 | 178 | ||
| 203 | __mod_tree_insert(&mod->mtn_core); | 179 | __mod_tree_insert(&mod->core_layout.mtn); |
| 204 | if (mod->init_size) | 180 | if (mod->init_layout.size) |
| 205 | __mod_tree_insert(&mod->mtn_init); | 181 | __mod_tree_insert(&mod->init_layout.mtn); |
| 206 | } | 182 | } |
| 207 | 183 | ||
| 208 | static void mod_tree_remove_init(struct module *mod) | 184 | static void mod_tree_remove_init(struct module *mod) |
| 209 | { | 185 | { |
| 210 | if (mod->init_size) | 186 | if (mod->init_layout.size) |
| 211 | __mod_tree_remove(&mod->mtn_init); | 187 | __mod_tree_remove(&mod->init_layout.mtn); |
| 212 | } | 188 | } |
| 213 | 189 | ||
| 214 | static void mod_tree_remove(struct module *mod) | 190 | static void mod_tree_remove(struct module *mod) |
| 215 | { | 191 | { |
| 216 | __mod_tree_remove(&mod->mtn_core); | 192 | __mod_tree_remove(&mod->core_layout.mtn); |
| 217 | mod_tree_remove_init(mod); | 193 | mod_tree_remove_init(mod); |
| 218 | } | 194 | } |
| 219 | 195 | ||
| @@ -267,9 +243,9 @@ static void __mod_update_bounds(void *base, unsigned int size) | |||
| 267 | 243 | ||
| 268 | static void mod_update_bounds(struct module *mod) | 244 | static void mod_update_bounds(struct module *mod) |
| 269 | { | 245 | { |
| 270 | __mod_update_bounds(mod->module_core, mod->core_size); | 246 | __mod_update_bounds(mod->core_layout.base, mod->core_layout.size); |
| 271 | if (mod->init_size) | 247 | if (mod->init_layout.size) |
| 272 | __mod_update_bounds(mod->module_init, mod->init_size); | 248 | __mod_update_bounds(mod->init_layout.base, mod->init_layout.size); |
| 273 | } | 249 | } |
| 274 | 250 | ||
| 275 | #ifdef CONFIG_KGDB_KDB | 251 | #ifdef CONFIG_KGDB_KDB |
| @@ -1214,7 +1190,7 @@ struct module_attribute module_uevent = | |||
| 1214 | static ssize_t show_coresize(struct module_attribute *mattr, | 1190 | static ssize_t show_coresize(struct module_attribute *mattr, |
| 1215 | struct module_kobject *mk, char *buffer) | 1191 | struct module_kobject *mk, char *buffer) |
| 1216 | { | 1192 | { |
| 1217 | return sprintf(buffer, "%u\n", mk->mod->core_size); | 1193 | return sprintf(buffer, "%u\n", mk->mod->core_layout.size); |
| 1218 | } | 1194 | } |
| 1219 | 1195 | ||
| 1220 | static struct module_attribute modinfo_coresize = | 1196 | static struct module_attribute modinfo_coresize = |
| @@ -1223,7 +1199,7 @@ static struct module_attribute modinfo_coresize = | |||
| 1223 | static ssize_t show_initsize(struct module_attribute *mattr, | 1199 | static ssize_t show_initsize(struct module_attribute *mattr, |
| 1224 | struct module_kobject *mk, char *buffer) | 1200 | struct module_kobject *mk, char *buffer) |
| 1225 | { | 1201 | { |
| 1226 | return sprintf(buffer, "%u\n", mk->mod->init_size); | 1202 | return sprintf(buffer, "%u\n", mk->mod->init_layout.size); |
| 1227 | } | 1203 | } |
| 1228 | 1204 | ||
| 1229 | static struct module_attribute modinfo_initsize = | 1205 | static struct module_attribute modinfo_initsize = |
| @@ -1873,64 +1849,75 @@ static void mod_sysfs_teardown(struct module *mod) | |||
| 1873 | /* | 1849 | /* |
| 1874 | * LKM RO/NX protection: protect module's text/ro-data | 1850 | * LKM RO/NX protection: protect module's text/ro-data |
| 1875 | * from modification and any data from execution. | 1851 | * from modification and any data from execution. |
| 1852 | * | ||
| 1853 | * General layout of module is: | ||
| 1854 | * [text] [read-only-data] [writable data] | ||
| 1855 | * text_size -----^ ^ ^ | ||
| 1856 | * ro_size ------------------------| | | ||
| 1857 | * size -------------------------------------------| | ||
| 1858 | * | ||
| 1859 | * These values are always page-aligned (as is base) | ||
| 1876 | */ | 1860 | */ |
| 1877 | void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages)) | 1861 | static void frob_text(const struct module_layout *layout, |
| 1862 | int (*set_memory)(unsigned long start, int num_pages)) | ||
| 1878 | { | 1863 | { |
| 1879 | unsigned long begin_pfn = PFN_DOWN((unsigned long)start); | 1864 | BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); |
| 1880 | unsigned long end_pfn = PFN_DOWN((unsigned long)end); | 1865 | BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); |
| 1866 | set_memory((unsigned long)layout->base, | ||
| 1867 | layout->text_size >> PAGE_SHIFT); | ||
| 1868 | } | ||
| 1881 | 1869 | ||
| 1882 | if (end_pfn > begin_pfn) | 1870 | static void frob_rodata(const struct module_layout *layout, |
| 1883 | set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); | 1871 | int (*set_memory)(unsigned long start, int num_pages)) |
| 1872 | { | ||
| 1873 | BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); | ||
| 1874 | BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); | ||
| 1875 | BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); | ||
| 1876 | set_memory((unsigned long)layout->base + layout->text_size, | ||
| 1877 | (layout->ro_size - layout->text_size) >> PAGE_SHIFT); | ||
| 1884 | } | 1878 | } |
| 1885 | 1879 | ||
| 1886 | static void set_section_ro_nx(void *base, | 1880 | static void frob_writable_data(const struct module_layout *layout, |
| 1887 | unsigned long text_size, | 1881 | int (*set_memory)(unsigned long start, int num_pages)) |
| 1888 | unsigned long ro_size, | ||
| 1889 | unsigned long total_size) | ||
| 1890 | { | 1882 | { |
| 1891 | /* begin and end PFNs of the current subsection */ | 1883 | BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); |
| 1892 | unsigned long begin_pfn; | 1884 | BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); |
| 1893 | unsigned long end_pfn; | 1885 | BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1)); |
| 1886 | set_memory((unsigned long)layout->base + layout->ro_size, | ||
| 1887 | (layout->size - layout->ro_size) >> PAGE_SHIFT); | ||
| 1888 | } | ||
| 1894 | 1889 | ||
| 1895 | /* | 1890 | /* livepatching wants to disable read-only so it can frob module. */ |
| 1896 | * Set RO for module text and RO-data: | 1891 | void module_disable_ro(const struct module *mod) |
| 1897 | * - Always protect first page. | 1892 | { |
| 1898 | * - Do not protect last partial page. | 1893 | frob_text(&mod->core_layout, set_memory_rw); |
| 1899 | */ | 1894 | frob_rodata(&mod->core_layout, set_memory_rw); |
| 1900 | if (ro_size > 0) | 1895 | frob_text(&mod->init_layout, set_memory_rw); |
| 1901 | set_page_attributes(base, base + ro_size, set_memory_ro); | 1896 | frob_rodata(&mod->init_layout, set_memory_rw); |
| 1897 | } | ||
| 1902 | 1898 | ||
| 1903 | /* | 1899 | void module_enable_ro(const struct module *mod) |
| 1904 | * Set NX permissions for module data: | 1900 | { |
| 1905 | * - Do not protect first partial page. | 1901 | frob_text(&mod->core_layout, set_memory_ro); |
| 1906 | * - Always protect last page. | 1902 | frob_rodata(&mod->core_layout, set_memory_ro); |
| 1907 | */ | 1903 | frob_text(&mod->init_layout, set_memory_ro); |
| 1908 | if (total_size > text_size) { | 1904 | frob_rodata(&mod->init_layout, set_memory_ro); |
| 1909 | begin_pfn = PFN_UP((unsigned long)base + text_size); | ||
| 1910 | end_pfn = PFN_UP((unsigned long)base + total_size); | ||
| 1911 | if (end_pfn > begin_pfn) | ||
| 1912 | set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); | ||
| 1913 | } | ||
| 1914 | } | 1905 | } |
| 1915 | 1906 | ||
| 1916 | static void unset_module_core_ro_nx(struct module *mod) | 1907 | static void module_enable_nx(const struct module *mod) |
| 1917 | { | 1908 | { |
| 1918 | set_page_attributes(mod->module_core + mod->core_text_size, | 1909 | frob_rodata(&mod->core_layout, set_memory_nx); |
| 1919 | mod->module_core + mod->core_size, | 1910 | frob_writable_data(&mod->core_layout, set_memory_nx); |
| 1920 | set_memory_x); | 1911 | frob_rodata(&mod->init_layout, set_memory_nx); |
| 1921 | set_page_attributes(mod->module_core, | 1912 | frob_writable_data(&mod->init_layout, set_memory_nx); |
| 1922 | mod->module_core + mod->core_ro_size, | ||
| 1923 | set_memory_rw); | ||
| 1924 | } | 1913 | } |
| 1925 | 1914 | ||
| 1926 | static void unset_module_init_ro_nx(struct module *mod) | 1915 | static void module_disable_nx(const struct module *mod) |
| 1927 | { | 1916 | { |
| 1928 | set_page_attributes(mod->module_init + mod->init_text_size, | 1917 | frob_rodata(&mod->core_layout, set_memory_x); |
| 1929 | mod->module_init + mod->init_size, | 1918 | frob_writable_data(&mod->core_layout, set_memory_x); |
| 1930 | set_memory_x); | 1919 | frob_rodata(&mod->init_layout, set_memory_x); |
| 1931 | set_page_attributes(mod->module_init, | 1920 | frob_writable_data(&mod->init_layout, set_memory_x); |
| 1932 | mod->module_init + mod->init_ro_size, | ||
| 1933 | set_memory_rw); | ||
| 1934 | } | 1921 | } |
| 1935 | 1922 | ||
| 1936 | /* Iterate through all modules and set each module's text as RW */ | 1923 | /* Iterate through all modules and set each module's text as RW */ |
| @@ -1942,16 +1929,9 @@ void set_all_modules_text_rw(void) | |||
| 1942 | list_for_each_entry_rcu(mod, &modules, list) { | 1929 | list_for_each_entry_rcu(mod, &modules, list) { |
| 1943 | if (mod->state == MODULE_STATE_UNFORMED) | 1930 | if (mod->state == MODULE_STATE_UNFORMED) |
| 1944 | continue; | 1931 | continue; |
| 1945 | if ((mod->module_core) && (mod->core_text_size)) { | 1932 | |
| 1946 | set_page_attributes(mod->module_core, | 1933 | frob_text(&mod->core_layout, set_memory_rw); |
| 1947 | mod->module_core + mod->core_text_size, | 1934 | frob_text(&mod->init_layout, set_memory_rw); |
| 1948 | set_memory_rw); | ||
| 1949 | } | ||
| 1950 | if ((mod->module_init) && (mod->init_text_size)) { | ||
| 1951 | set_page_attributes(mod->module_init, | ||
| 1952 | mod->module_init + mod->init_text_size, | ||
| 1953 | set_memory_rw); | ||
| 1954 | } | ||
| 1955 | } | 1935 | } |
| 1956 | mutex_unlock(&module_mutex); | 1936 | mutex_unlock(&module_mutex); |
| 1957 | } | 1937 | } |
| @@ -1965,23 +1945,25 @@ void set_all_modules_text_ro(void) | |||
| 1965 | list_for_each_entry_rcu(mod, &modules, list) { | 1945 | list_for_each_entry_rcu(mod, &modules, list) { |
| 1966 | if (mod->state == MODULE_STATE_UNFORMED) | 1946 | if (mod->state == MODULE_STATE_UNFORMED) |
| 1967 | continue; | 1947 | continue; |
| 1968 | if ((mod->module_core) && (mod->core_text_size)) { | 1948 | |
| 1969 | set_page_attributes(mod->module_core, | 1949 | frob_text(&mod->core_layout, set_memory_ro); |
| 1970 | mod->module_core + mod->core_text_size, | 1950 | frob_text(&mod->init_layout, set_memory_ro); |
| 1971 | set_memory_ro); | ||
| 1972 | } | ||
| 1973 | if ((mod->module_init) && (mod->init_text_size)) { | ||
| 1974 | set_page_attributes(mod->module_init, | ||
| 1975 | mod->module_init + mod->init_text_size, | ||
| 1976 | set_memory_ro); | ||
| 1977 | } | ||
| 1978 | } | 1951 | } |
| 1979 | mutex_unlock(&module_mutex); | 1952 | mutex_unlock(&module_mutex); |
| 1980 | } | 1953 | } |
| 1954 | |||
| 1955 | static void disable_ro_nx(const struct module_layout *layout) | ||
| 1956 | { | ||
| 1957 | frob_text(layout, set_memory_rw); | ||
| 1958 | frob_rodata(layout, set_memory_rw); | ||
| 1959 | frob_rodata(layout, set_memory_x); | ||
| 1960 | frob_writable_data(layout, set_memory_x); | ||
| 1961 | } | ||
| 1962 | |||
| 1981 | #else | 1963 | #else |
| 1982 | static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } | 1964 | static void disable_ro_nx(const struct module_layout *layout) { } |
| 1983 | static void unset_module_core_ro_nx(struct module *mod) { } | 1965 | static void module_enable_nx(const struct module *mod) { } |
| 1984 | static void unset_module_init_ro_nx(struct module *mod) { } | 1966 | static void module_disable_nx(const struct module *mod) { } |
| 1985 | #endif | 1967 | #endif |
| 1986 | 1968 | ||
| 1987 | void __weak module_memfree(void *module_region) | 1969 | void __weak module_memfree(void *module_region) |
| @@ -2033,19 +2015,19 @@ static void free_module(struct module *mod) | |||
| 2033 | synchronize_sched(); | 2015 | synchronize_sched(); |
| 2034 | mutex_unlock(&module_mutex); | 2016 | mutex_unlock(&module_mutex); |
| 2035 | 2017 | ||
| 2036 | /* This may be NULL, but that's OK */ | 2018 | /* This may be empty, but that's OK */ |
| 2037 | unset_module_init_ro_nx(mod); | 2019 | disable_ro_nx(&mod->init_layout); |
| 2038 | module_arch_freeing_init(mod); | 2020 | module_arch_freeing_init(mod); |
| 2039 | module_memfree(mod->module_init); | 2021 | module_memfree(mod->init_layout.base); |
| 2040 | kfree(mod->args); | 2022 | kfree(mod->args); |
| 2041 | percpu_modfree(mod); | 2023 | percpu_modfree(mod); |
| 2042 | 2024 | ||
| 2043 | /* Free lock-classes; relies on the preceding sync_rcu(). */ | 2025 | /* Free lock-classes; relies on the preceding sync_rcu(). */ |
| 2044 | lockdep_free_key_range(mod->module_core, mod->core_size); | 2026 | lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); |
| 2045 | 2027 | ||
| 2046 | /* Finally, free the core (containing the module structure) */ | 2028 | /* Finally, free the core (containing the module structure) */ |
| 2047 | unset_module_core_ro_nx(mod); | 2029 | disable_ro_nx(&mod->core_layout); |
| 2048 | module_memfree(mod->module_core); | 2030 | module_memfree(mod->core_layout.base); |
| 2049 | 2031 | ||
| 2050 | #ifdef CONFIG_MPU | 2032 | #ifdef CONFIG_MPU |
| 2051 | update_protections(current->mm); | 2033 | update_protections(current->mm); |
| @@ -2248,20 +2230,20 @@ static void layout_sections(struct module *mod, struct load_info *info) | |||
| 2248 | || s->sh_entsize != ~0UL | 2230 | || s->sh_entsize != ~0UL |
| 2249 | || strstarts(sname, ".init")) | 2231 | || strstarts(sname, ".init")) |
| 2250 | continue; | 2232 | continue; |
| 2251 | s->sh_entsize = get_offset(mod, &mod->core_size, s, i); | 2233 | s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i); |
| 2252 | pr_debug("\t%s\n", sname); | 2234 | pr_debug("\t%s\n", sname); |
| 2253 | } | 2235 | } |
| 2254 | switch (m) { | 2236 | switch (m) { |
| 2255 | case 0: /* executable */ | 2237 | case 0: /* executable */ |
| 2256 | mod->core_size = debug_align(mod->core_size); | 2238 | mod->core_layout.size = debug_align(mod->core_layout.size); |
| 2257 | mod->core_text_size = mod->core_size; | 2239 | mod->core_layout.text_size = mod->core_layout.size; |
| 2258 | break; | 2240 | break; |
| 2259 | case 1: /* RO: text and ro-data */ | 2241 | case 1: /* RO: text and ro-data */ |
| 2260 | mod->core_size = debug_align(mod->core_size); | 2242 | mod->core_layout.size = debug_align(mod->core_layout.size); |
| 2261 | mod->core_ro_size = mod->core_size; | 2243 | mod->core_layout.ro_size = mod->core_layout.size; |
| 2262 | break; | 2244 | break; |
| 2263 | case 3: /* whole core */ | 2245 | case 3: /* whole core */ |
| 2264 | mod->core_size = debug_align(mod->core_size); | 2246 | mod->core_layout.size = debug_align(mod->core_layout.size); |
| 2265 | break; | 2247 | break; |
| 2266 | } | 2248 | } |
| 2267 | } | 2249 | } |
| @@ -2277,21 +2259,21 @@ static void layout_sections(struct module *mod, struct load_info *info) | |||
| 2277 | || s->sh_entsize != ~0UL | 2259 | || s->sh_entsize != ~0UL |
| 2278 | || !strstarts(sname, ".init")) | 2260 | || !strstarts(sname, ".init")) |
| 2279 | continue; | 2261 | continue; |
| 2280 | s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) | 2262 | s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i) |
| 2281 | | INIT_OFFSET_MASK); | 2263 | | INIT_OFFSET_MASK); |
| 2282 | pr_debug("\t%s\n", sname); | 2264 | pr_debug("\t%s\n", sname); |
| 2283 | } | 2265 | } |
| 2284 | switch (m) { | 2266 | switch (m) { |
| 2285 | case 0: /* executable */ | 2267 | case 0: /* executable */ |
| 2286 | mod->init_size = debug_align(mod->init_size); | 2268 | mod->init_layout.size = debug_align(mod->init_layout.size); |
| 2287 | mod->init_text_size = mod->init_size; | 2269 | mod->init_layout.text_size = mod->init_layout.size; |
| 2288 | break; | 2270 | break; |
| 2289 | case 1: /* RO: text and ro-data */ | 2271 | case 1: /* RO: text and ro-data */ |
| 2290 | mod->init_size = debug_align(mod->init_size); | 2272 | mod->init_layout.size = debug_align(mod->init_layout.size); |
| 2291 | mod->init_ro_size = mod->init_size; | 2273 | mod->init_layout.ro_size = mod->init_layout.size; |
| 2292 | break; | 2274 | break; |
| 2293 | case 3: /* whole init */ | 2275 | case 3: /* whole init */ |
| 2294 | mod->init_size = debug_align(mod->init_size); | 2276 | mod->init_layout.size = debug_align(mod->init_layout.size); |
| 2295 | break; | 2277 | break; |
| 2296 | } | 2278 | } |
| 2297 | } | 2279 | } |
| @@ -2401,7 +2383,7 @@ static char elf_type(const Elf_Sym *sym, const struct load_info *info) | |||
| 2401 | } | 2383 | } |
| 2402 | if (sym->st_shndx == SHN_UNDEF) | 2384 | if (sym->st_shndx == SHN_UNDEF) |
| 2403 | return 'U'; | 2385 | return 'U'; |
| 2404 | if (sym->st_shndx == SHN_ABS) | 2386 | if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) |
| 2405 | return 'a'; | 2387 | return 'a'; |
| 2406 | if (sym->st_shndx >= SHN_LORESERVE) | 2388 | if (sym->st_shndx >= SHN_LORESERVE) |
| 2407 | return '?'; | 2389 | return '?'; |
| @@ -2430,7 +2412,7 @@ static char elf_type(const Elf_Sym *sym, const struct load_info *info) | |||
| 2430 | } | 2412 | } |
| 2431 | 2413 | ||
| 2432 | static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, | 2414 | static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, |
| 2433 | unsigned int shnum) | 2415 | unsigned int shnum, unsigned int pcpundx) |
| 2434 | { | 2416 | { |
| 2435 | const Elf_Shdr *sec; | 2417 | const Elf_Shdr *sec; |
| 2436 | 2418 | ||
| @@ -2439,6 +2421,11 @@ static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, | |||
| 2439 | || !src->st_name) | 2421 | || !src->st_name) |
| 2440 | return false; | 2422 | return false; |
| 2441 | 2423 | ||
| 2424 | #ifdef CONFIG_KALLSYMS_ALL | ||
| 2425 | if (src->st_shndx == pcpundx) | ||
| 2426 | return true; | ||
| 2427 | #endif | ||
| 2428 | |||
| 2442 | sec = sechdrs + src->st_shndx; | 2429 | sec = sechdrs + src->st_shndx; |
| 2443 | if (!(sec->sh_flags & SHF_ALLOC) | 2430 | if (!(sec->sh_flags & SHF_ALLOC) |
| 2444 | #ifndef CONFIG_KALLSYMS_ALL | 2431 | #ifndef CONFIG_KALLSYMS_ALL |
| @@ -2466,7 +2453,7 @@ static void layout_symtab(struct module *mod, struct load_info *info) | |||
| 2466 | 2453 | ||
| 2467 | /* Put symbol section at end of init part of module. */ | 2454 | /* Put symbol section at end of init part of module. */ |
| 2468 | symsect->sh_flags |= SHF_ALLOC; | 2455 | symsect->sh_flags |= SHF_ALLOC; |
| 2469 | symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, | 2456 | symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect, |
| 2470 | info->index.sym) | INIT_OFFSET_MASK; | 2457 | info->index.sym) | INIT_OFFSET_MASK; |
| 2471 | pr_debug("\t%s\n", info->secstrings + symsect->sh_name); | 2458 | pr_debug("\t%s\n", info->secstrings + symsect->sh_name); |
| 2472 | 2459 | ||
| @@ -2476,23 +2463,24 @@ static void layout_symtab(struct module *mod, struct load_info *info) | |||
| 2476 | /* Compute total space required for the core symbols' strtab. */ | 2463 | /* Compute total space required for the core symbols' strtab. */ |
| 2477 | for (ndst = i = 0; i < nsrc; i++) { | 2464 | for (ndst = i = 0; i < nsrc; i++) { |
| 2478 | if (i == 0 || | 2465 | if (i == 0 || |
| 2479 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { | 2466 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, |
| 2467 | info->index.pcpu)) { | ||
| 2480 | strtab_size += strlen(&info->strtab[src[i].st_name])+1; | 2468 | strtab_size += strlen(&info->strtab[src[i].st_name])+1; |
| 2481 | ndst++; | 2469 | ndst++; |
| 2482 | } | 2470 | } |
| 2483 | } | 2471 | } |
| 2484 | 2472 | ||
| 2485 | /* Append room for core symbols at end of core part. */ | 2473 | /* Append room for core symbols at end of core part. */ |
| 2486 | info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); | 2474 | info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); |
| 2487 | info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); | 2475 | info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); |
| 2488 | mod->core_size += strtab_size; | 2476 | mod->core_layout.size += strtab_size; |
| 2489 | mod->core_size = debug_align(mod->core_size); | 2477 | mod->core_layout.size = debug_align(mod->core_layout.size); |
| 2490 | 2478 | ||
| 2491 | /* Put string table section at end of init part of module. */ | 2479 | /* Put string table section at end of init part of module. */ |
| 2492 | strsect->sh_flags |= SHF_ALLOC; | 2480 | strsect->sh_flags |= SHF_ALLOC; |
| 2493 | strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, | 2481 | strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect, |
| 2494 | info->index.str) | INIT_OFFSET_MASK; | 2482 | info->index.str) | INIT_OFFSET_MASK; |
| 2495 | mod->init_size = debug_align(mod->init_size); | 2483 | mod->init_layout.size = debug_align(mod->init_layout.size); |
| 2496 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); | 2484 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); |
| 2497 | } | 2485 | } |
| 2498 | 2486 | ||
| @@ -2513,12 +2501,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) | |||
| 2513 | for (i = 0; i < mod->num_symtab; i++) | 2501 | for (i = 0; i < mod->num_symtab; i++) |
| 2514 | mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); | 2502 | mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); |
| 2515 | 2503 | ||
| 2516 | mod->core_symtab = dst = mod->module_core + info->symoffs; | 2504 | mod->core_symtab = dst = mod->core_layout.base + info->symoffs; |
| 2517 | mod->core_strtab = s = mod->module_core + info->stroffs; | 2505 | mod->core_strtab = s = mod->core_layout.base + info->stroffs; |
| 2518 | src = mod->symtab; | 2506 | src = mod->symtab; |
| 2519 | for (ndst = i = 0; i < mod->num_symtab; i++) { | 2507 | for (ndst = i = 0; i < mod->num_symtab; i++) { |
| 2520 | if (i == 0 || | 2508 | if (i == 0 || |
| 2521 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { | 2509 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, |
| 2510 | info->index.pcpu)) { | ||
| 2522 | dst[ndst] = src[i]; | 2511 | dst[ndst] = src[i]; |
| 2523 | dst[ndst++].st_name = s - mod->core_strtab; | 2512 | dst[ndst++].st_name = s - mod->core_strtab; |
| 2524 | s += strlcpy(s, &mod->strtab[src[i].st_name], | 2513 | s += strlcpy(s, &mod->strtab[src[i].st_name], |
| @@ -2964,7 +2953,7 @@ static int move_module(struct module *mod, struct load_info *info) | |||
| 2964 | void *ptr; | 2953 | void *ptr; |
| 2965 | 2954 | ||
| 2966 | /* Do the allocs. */ | 2955 | /* Do the allocs. */ |
| 2967 | ptr = module_alloc(mod->core_size); | 2956 | ptr = module_alloc(mod->core_layout.size); |
| 2968 | /* | 2957 | /* |
| 2969 | * The pointer to this block is stored in the module structure | 2958 | * The pointer to this block is stored in the module structure |
| 2970 | * which is inside the block. Just mark it as not being a | 2959 | * which is inside the block. Just mark it as not being a |
| @@ -2974,11 +2963,11 @@ static int move_module(struct module *mod, struct load_info *info) | |||
| 2974 | if (!ptr) | 2963 | if (!ptr) |
| 2975 | return -ENOMEM; | 2964 | return -ENOMEM; |
| 2976 | 2965 | ||
| 2977 | memset(ptr, 0, mod->core_size); | 2966 | memset(ptr, 0, mod->core_layout.size); |
| 2978 | mod->module_core = ptr; | 2967 | mod->core_layout.base = ptr; |
| 2979 | 2968 | ||
| 2980 | if (mod->init_size) { | 2969 | if (mod->init_layout.size) { |
| 2981 | ptr = module_alloc(mod->init_size); | 2970 | ptr = module_alloc(mod->init_layout.size); |
| 2982 | /* | 2971 | /* |
| 2983 | * The pointer to this block is stored in the module structure | 2972 | * The pointer to this block is stored in the module structure |
| 2984 | * which is inside the block. This block doesn't need to be | 2973 | * which is inside the block. This block doesn't need to be |
| @@ -2987,13 +2976,13 @@ static int move_module(struct module *mod, struct load_info *info) | |||
| 2987 | */ | 2976 | */ |
| 2988 | kmemleak_ignore(ptr); | 2977 | kmemleak_ignore(ptr); |
| 2989 | if (!ptr) { | 2978 | if (!ptr) { |
| 2990 | module_memfree(mod->module_core); | 2979 | module_memfree(mod->core_layout.base); |
| 2991 | return -ENOMEM; | 2980 | return -ENOMEM; |
| 2992 | } | 2981 | } |
| 2993 | memset(ptr, 0, mod->init_size); | 2982 | memset(ptr, 0, mod->init_layout.size); |
| 2994 | mod->module_init = ptr; | 2983 | mod->init_layout.base = ptr; |
| 2995 | } else | 2984 | } else |
| 2996 | mod->module_init = NULL; | 2985 | mod->init_layout.base = NULL; |
| 2997 | 2986 | ||
| 2998 | /* Transfer each section which specifies SHF_ALLOC */ | 2987 | /* Transfer each section which specifies SHF_ALLOC */ |
| 2999 | pr_debug("final section addresses:\n"); | 2988 | pr_debug("final section addresses:\n"); |
| @@ -3005,10 +2994,10 @@ static int move_module(struct module *mod, struct load_info *info) | |||
| 3005 | continue; | 2994 | continue; |
| 3006 | 2995 | ||
| 3007 | if (shdr->sh_entsize & INIT_OFFSET_MASK) | 2996 | if (shdr->sh_entsize & INIT_OFFSET_MASK) |
| 3008 | dest = mod->module_init | 2997 | dest = mod->init_layout.base |
| 3009 | + (shdr->sh_entsize & ~INIT_OFFSET_MASK); | 2998 | + (shdr->sh_entsize & ~INIT_OFFSET_MASK); |
| 3010 | else | 2999 | else |
| 3011 | dest = mod->module_core + shdr->sh_entsize; | 3000 | dest = mod->core_layout.base + shdr->sh_entsize; |
| 3012 | 3001 | ||
| 3013 | if (shdr->sh_type != SHT_NOBITS) | 3002 | if (shdr->sh_type != SHT_NOBITS) |
| 3014 | memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); | 3003 | memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); |
| @@ -3070,12 +3059,12 @@ static void flush_module_icache(const struct module *mod) | |||
| 3070 | * Do it before processing of module parameters, so the module | 3059 | * Do it before processing of module parameters, so the module |
| 3071 | * can provide parameter accessor functions of its own. | 3060 | * can provide parameter accessor functions of its own. |
| 3072 | */ | 3061 | */ |
| 3073 | if (mod->module_init) | 3062 | if (mod->init_layout.base) |
| 3074 | flush_icache_range((unsigned long)mod->module_init, | 3063 | flush_icache_range((unsigned long)mod->init_layout.base, |
| 3075 | (unsigned long)mod->module_init | 3064 | (unsigned long)mod->init_layout.base |
| 3076 | + mod->init_size); | 3065 | + mod->init_layout.size); |
| 3077 | flush_icache_range((unsigned long)mod->module_core, | 3066 | flush_icache_range((unsigned long)mod->core_layout.base, |
| 3078 | (unsigned long)mod->module_core + mod->core_size); | 3067 | (unsigned long)mod->core_layout.base + mod->core_layout.size); |
| 3079 | 3068 | ||
| 3080 | set_fs(old_fs); | 3069 | set_fs(old_fs); |
| 3081 | } | 3070 | } |
| @@ -3133,8 +3122,8 @@ static void module_deallocate(struct module *mod, struct load_info *info) | |||
| 3133 | { | 3122 | { |
| 3134 | percpu_modfree(mod); | 3123 | percpu_modfree(mod); |
| 3135 | module_arch_freeing_init(mod); | 3124 | module_arch_freeing_init(mod); |
| 3136 | module_memfree(mod->module_init); | 3125 | module_memfree(mod->init_layout.base); |
| 3137 | module_memfree(mod->module_core); | 3126 | module_memfree(mod->core_layout.base); |
| 3138 | } | 3127 | } |
| 3139 | 3128 | ||
| 3140 | int __weak module_finalize(const Elf_Ehdr *hdr, | 3129 | int __weak module_finalize(const Elf_Ehdr *hdr, |
| @@ -3221,7 +3210,7 @@ static noinline int do_init_module(struct module *mod) | |||
| 3221 | ret = -ENOMEM; | 3210 | ret = -ENOMEM; |
| 3222 | goto fail; | 3211 | goto fail; |
| 3223 | } | 3212 | } |
| 3224 | freeinit->module_init = mod->module_init; | 3213 | freeinit->module_init = mod->init_layout.base; |
| 3225 | 3214 | ||
| 3226 | /* | 3215 | /* |
| 3227 | * We want to find out whether @mod uses async during init. Clear | 3216 | * We want to find out whether @mod uses async during init. Clear |
| @@ -3279,12 +3268,12 @@ static noinline int do_init_module(struct module *mod) | |||
| 3279 | mod->strtab = mod->core_strtab; | 3268 | mod->strtab = mod->core_strtab; |
| 3280 | #endif | 3269 | #endif |
| 3281 | mod_tree_remove_init(mod); | 3270 | mod_tree_remove_init(mod); |
| 3282 | unset_module_init_ro_nx(mod); | 3271 | disable_ro_nx(&mod->init_layout); |
| 3283 | module_arch_freeing_init(mod); | 3272 | module_arch_freeing_init(mod); |
| 3284 | mod->module_init = NULL; | 3273 | mod->init_layout.base = NULL; |
| 3285 | mod->init_size = 0; | 3274 | mod->init_layout.size = 0; |
| 3286 | mod->init_ro_size = 0; | 3275 | mod->init_layout.ro_size = 0; |
| 3287 | mod->init_text_size = 0; | 3276 | mod->init_layout.text_size = 0; |
| 3288 | /* | 3277 | /* |
| 3289 | * We want to free module_init, but be aware that kallsyms may be | 3278 | * We want to free module_init, but be aware that kallsyms may be |
| 3290 | * walking this with preempt disabled. In all the failure paths, we | 3279 | * walking this with preempt disabled. In all the failure paths, we |
| @@ -3373,17 +3362,9 @@ static int complete_formation(struct module *mod, struct load_info *info) | |||
| 3373 | /* This relies on module_mutex for list integrity. */ | 3362 | /* This relies on module_mutex for list integrity. */ |
| 3374 | module_bug_finalize(info->hdr, info->sechdrs, mod); | 3363 | module_bug_finalize(info->hdr, info->sechdrs, mod); |
| 3375 | 3364 | ||
| 3376 | /* Set RO and NX regions for core */ | 3365 | /* Set RO and NX regions */ |
| 3377 | set_section_ro_nx(mod->module_core, | 3366 | module_enable_ro(mod); |
| 3378 | mod->core_text_size, | 3367 | module_enable_nx(mod); |
| 3379 | mod->core_ro_size, | ||
| 3380 | mod->core_size); | ||
| 3381 | |||
| 3382 | /* Set RO and NX regions for init */ | ||
| 3383 | set_section_ro_nx(mod->module_init, | ||
| 3384 | mod->init_text_size, | ||
| 3385 | mod->init_ro_size, | ||
| 3386 | mod->init_size); | ||
| 3387 | 3368 | ||
| 3388 | /* Mark state as coming so strong_try_module_get() ignores us, | 3369 | /* Mark state as coming so strong_try_module_get() ignores us, |
| 3389 | * but kallsyms etc. can see us. */ | 3370 | * but kallsyms etc. can see us. */ |
| @@ -3548,8 +3529,8 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3548 | MODULE_STATE_GOING, mod); | 3529 | MODULE_STATE_GOING, mod); |
| 3549 | 3530 | ||
| 3550 | /* we can't deallocate the module until we clear memory protection */ | 3531 | /* we can't deallocate the module until we clear memory protection */ |
| 3551 | unset_module_init_ro_nx(mod); | 3532 | module_disable_ro(mod); |
| 3552 | unset_module_core_ro_nx(mod); | 3533 | module_disable_nx(mod); |
| 3553 | 3534 | ||
| 3554 | ddebug_cleanup: | 3535 | ddebug_cleanup: |
| 3555 | dynamic_debug_remove(info->debug); | 3536 | dynamic_debug_remove(info->debug); |
| @@ -3578,7 +3559,7 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3578 | */ | 3559 | */ |
| 3579 | ftrace_release_mod(mod); | 3560 | ftrace_release_mod(mod); |
| 3580 | /* Free lock-classes; relies on the preceding sync_rcu() */ | 3561 | /* Free lock-classes; relies on the preceding sync_rcu() */ |
| 3581 | lockdep_free_key_range(mod->module_core, mod->core_size); | 3562 | lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); |
| 3582 | 3563 | ||
| 3583 | module_deallocate(mod, info); | 3564 | module_deallocate(mod, info); |
| 3584 | free_copy: | 3565 | free_copy: |
| @@ -3656,9 +3637,9 @@ static const char *get_ksymbol(struct module *mod, | |||
| 3656 | 3637 | ||
| 3657 | /* At worse, next value is at end of module */ | 3638 | /* At worse, next value is at end of module */ |
| 3658 | if (within_module_init(addr, mod)) | 3639 | if (within_module_init(addr, mod)) |
| 3659 | nextval = (unsigned long)mod->module_init+mod->init_text_size; | 3640 | nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size; |
| 3660 | else | 3641 | else |
| 3661 | nextval = (unsigned long)mod->module_core+mod->core_text_size; | 3642 | nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size; |
| 3662 | 3643 | ||
| 3663 | /* Scan for closest preceding symbol, and next symbol. (ELF | 3644 | /* Scan for closest preceding symbol, and next symbol. (ELF |
| 3664 | starts real symbols at 1). */ | 3645 | starts real symbols at 1). */ |
| @@ -3905,7 +3886,7 @@ static int m_show(struct seq_file *m, void *p) | |||
| 3905 | return 0; | 3886 | return 0; |
| 3906 | 3887 | ||
| 3907 | seq_printf(m, "%s %u", | 3888 | seq_printf(m, "%s %u", |
| 3908 | mod->name, mod->init_size + mod->core_size); | 3889 | mod->name, mod->init_layout.size + mod->core_layout.size); |
| 3909 | print_unload_info(m, mod); | 3890 | print_unload_info(m, mod); |
| 3910 | 3891 | ||
| 3911 | /* Informative for users. */ | 3892 | /* Informative for users. */ |
| @@ -3914,7 +3895,7 @@ static int m_show(struct seq_file *m, void *p) | |||
| 3914 | mod->state == MODULE_STATE_COMING ? "Loading" : | 3895 | mod->state == MODULE_STATE_COMING ? "Loading" : |
| 3915 | "Live"); | 3896 | "Live"); |
| 3916 | /* Used by oprofile and other similar tools. */ | 3897 | /* Used by oprofile and other similar tools. */ |
| 3917 | seq_printf(m, " 0x%pK", mod->module_core); | 3898 | seq_printf(m, " 0x%pK", mod->core_layout.base); |
| 3918 | 3899 | ||
| 3919 | /* Taints info */ | 3900 | /* Taints info */ |
| 3920 | if (mod->taints) | 3901 | if (mod->taints) |
| @@ -4057,8 +4038,8 @@ struct module *__module_text_address(unsigned long addr) | |||
| 4057 | struct module *mod = __module_address(addr); | 4038 | struct module *mod = __module_address(addr); |
| 4058 | if (mod) { | 4039 | if (mod) { |
| 4059 | /* Make sure it's within the text section. */ | 4040 | /* Make sure it's within the text section. */ |
| 4060 | if (!within(addr, mod->module_init, mod->init_text_size) | 4041 | if (!within(addr, mod->init_layout.base, mod->init_layout.text_size) |
| 4061 | && !within(addr, mod->module_core, mod->core_text_size)) | 4042 | && !within(addr, mod->core_layout.base, mod->core_layout.text_size)) |
| 4062 | mod = NULL; | 4043 | mod = NULL; |
| 4063 | } | 4044 | } |
| 4064 | return mod; | 4045 | return mod; |
