summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2015-11-25 18:14:08 -0500
committerJiri Kosina <jkosina@suse.cz>2015-12-04 16:46:25 -0500
commit7523e4dc5057e157212b4741abd6256e03404cf1 (patch)
tree034014d98dea3f675e8e138bc34bd4e0a860b12b
parentc65abf358f211c3f88c8ed714dff25775ab49fc1 (diff)
module: use a structure to encapsulate layout.
Makes it easier to handle init vs core cleanly, though the change is fairly invasive across random architectures. It simplifies the rbtree code immediately, however, while keeping the core data together in the same cachline (now iff the rbtree code is enabled). Acked-by: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
-rw-r--r--arch/alpha/kernel/module.c2
-rw-r--r--arch/arc/kernel/unwind.c4
-rw-r--r--arch/arm/kernel/module-plts.c2
-rw-r--r--arch/avr32/kernel/module.c12
-rw-r--r--arch/ia64/kernel/module.c14
-rw-r--r--arch/metag/kernel/module.c4
-rw-r--r--arch/mips/kernel/vpe.c6
-rw-r--r--arch/parisc/kernel/module.c32
-rw-r--r--arch/powerpc/kernel/module_32.c6
-rw-r--r--arch/s390/kernel/module.c22
-rw-r--r--arch/x86/kernel/livepatch.c6
-rw-r--r--include/linux/module.h64
-rw-r--r--kernel/debug/kdb/kdb_main.c4
-rw-r--r--kernel/module.c199
14 files changed, 178 insertions, 199 deletions
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index 2fd00b7077e4..936bc8f89a67 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
160 160
161 /* The small sections were sorted to the end of the segment. 161 /* The small sections were sorted to the end of the segment.
162 The following should definitely cover them. */ 162 The following should definitely cover them. */
163 gp = (u64)me->module_core + me->core_size - 0x8000; 163 gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
164 got = sechdrs[me->arch.gotsecindex].sh_addr; 164 got = sechdrs[me->arch.gotsecindex].sh_addr;
165 165
166 for (i = 0; i < n; i++) { 166 for (i = 0; i < n; i++) {
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 93c6ea52b671..e0034a6656ef 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -372,8 +372,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
372 return NULL; 372 return NULL;
373 373
374 init_unwind_table(table, module->name, 374 init_unwind_table(table, module->name,
375 module->module_core, module->core_size, 375 module->core_layout.base, module->core_layout.size,
376 module->module_init, module->init_size, 376 module->init_layout.base, module->init_layout.size,
377 table_start, table_size, 377 table_start, table_size,
378 NULL, 0); 378 NULL, 0);
379 379
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 097e2e201b9f..0c7efc3446c0 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -32,7 +32,7 @@ struct plt_entries {
32 32
33static bool in_init(const struct module *mod, u32 addr) 33static bool in_init(const struct module *mod, u32 addr)
34{ 34{
35 return addr - (u32)mod->module_init < mod->init_size; 35 return addr - (u32)mod->init_layout.base < mod->init_layout.size;
36} 36}
37 37
38u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) 38u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index 164efa009e5b..2b4c54c04cb6 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -118,9 +118,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
118 * Increase core size to make room for GOT and set start 118 * Increase core size to make room for GOT and set start
119 * offset for GOT. 119 * offset for GOT.
120 */ 120 */
121 module->core_size = ALIGN(module->core_size, 4); 121 module->core_layout.size = ALIGN(module->core_layout.size, 4);
122 module->arch.got_offset = module->core_size; 122 module->arch.got_offset = module->core_layout.size;
123 module->core_size += module->arch.got_size; 123 module->core_layout.size += module->arch.got_size;
124 124
125 return 0; 125 return 0;
126 126
@@ -177,7 +177,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
177 if (!info->got_initialized) { 177 if (!info->got_initialized) {
178 Elf32_Addr *gotent; 178 Elf32_Addr *gotent;
179 179
180 gotent = (module->module_core 180 gotent = (module->core_layout.base
181 + module->arch.got_offset 181 + module->arch.got_offset
182 + info->got_offset); 182 + info->got_offset);
183 *gotent = relocation; 183 *gotent = relocation;
@@ -255,8 +255,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
255 */ 255 */
256 pr_debug("GOTPC: PC=0x%x, got_offset=0x%lx, core=0x%p\n", 256 pr_debug("GOTPC: PC=0x%x, got_offset=0x%lx, core=0x%p\n",
257 relocation, module->arch.got_offset, 257 relocation, module->arch.got_offset,
258 module->module_core); 258 module->core_layout.base);
259 relocation -= ((unsigned long)module->module_core 259 relocation -= ((unsigned long)module->core_layout.base
260 + module->arch.got_offset); 260 + module->arch.got_offset);
261 *location = relocation; 261 *location = relocation;
262 break; 262 break;
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index b15933c31b2f..6ab0ae7d6535 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -486,13 +486,13 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
486static inline int 486static inline int
487in_init (const struct module *mod, uint64_t addr) 487in_init (const struct module *mod, uint64_t addr)
488{ 488{
489 return addr - (uint64_t) mod->module_init < mod->init_size; 489 return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
490} 490}
491 491
492static inline int 492static inline int
493in_core (const struct module *mod, uint64_t addr) 493in_core (const struct module *mod, uint64_t addr)
494{ 494{
495 return addr - (uint64_t) mod->module_core < mod->core_size; 495 return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
496} 496}
497 497
498static inline int 498static inline int
@@ -675,7 +675,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
675 break; 675 break;
676 676
677 case RV_BDREL: 677 case RV_BDREL:
678 val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); 678 val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
679 break; 679 break;
680 680
681 case RV_LTV: 681 case RV_LTV:
@@ -810,15 +810,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
810 * addresses have been selected... 810 * addresses have been selected...
811 */ 811 */
812 uint64_t gp; 812 uint64_t gp;
813 if (mod->core_size > MAX_LTOFF) 813 if (mod->core_layout.size > MAX_LTOFF)
814 /* 814 /*
815 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated 815 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
816 * at the end of the module. 816 * at the end of the module.
817 */ 817 */
818 gp = mod->core_size - MAX_LTOFF / 2; 818 gp = mod->core_layout.size - MAX_LTOFF / 2;
819 else 819 else
820 gp = mod->core_size / 2; 820 gp = mod->core_layout.size / 2;
821 gp = (uint64_t) mod->module_core + ((gp + 7) & -8); 821 gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
822 mod->arch.gp = gp; 822 mod->arch.gp = gp;
823 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); 823 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
824 } 824 }
diff --git a/arch/metag/kernel/module.c b/arch/metag/kernel/module.c
index 986331cd0a52..bb8dfba9a763 100644
--- a/arch/metag/kernel/module.c
+++ b/arch/metag/kernel/module.c
@@ -176,8 +176,8 @@ static uint32_t do_plt_call(void *location, Elf32_Addr val,
176 tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3); 176 tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3);
177 177
178 /* Init, or core PLT? */ 178 /* Init, or core PLT? */
179 if (location >= mod->module_core 179 if (location >= mod->core_layout.base
180 && location < mod->module_core + mod->core_size) 180 && location < mod->core_layout.base + mod->core_layout.size)
181 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; 181 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
182 else 182 else
183 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; 183 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 9067b651c7a2..544ea21bfef9 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -205,11 +205,11 @@ static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
205 || s->sh_entsize != ~0UL) 205 || s->sh_entsize != ~0UL)
206 continue; 206 continue;
207 s->sh_entsize = 207 s->sh_entsize =
208 get_offset((unsigned long *)&mod->core_size, s); 208 get_offset((unsigned long *)&mod->core_layout.size, s);
209 } 209 }
210 210
211 if (m == 0) 211 if (m == 0)
212 mod->core_text_size = mod->core_size; 212 mod->core_layout.text_size = mod->core_layout.size;
213 213
214 } 214 }
215} 215}
@@ -641,7 +641,7 @@ static int vpe_elfload(struct vpe *v)
641 layout_sections(&mod, hdr, sechdrs, secstrings); 641 layout_sections(&mod, hdr, sechdrs, secstrings);
642 } 642 }
643 643
644 v->load_addr = alloc_progmem(mod.core_size); 644 v->load_addr = alloc_progmem(mod.core_layout.size);
645 if (!v->load_addr) 645 if (!v->load_addr)
646 return -ENOMEM; 646 return -ENOMEM;
647 647
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 3c63a820fcda..b9d75d9fa9ac 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -42,9 +42,9 @@
42 * We are not doing SEGREL32 handling correctly. According to the ABI, we 42 * We are not doing SEGREL32 handling correctly. According to the ABI, we
43 * should do a value offset, like this: 43 * should do a value offset, like this:
44 * if (in_init(me, (void *)val)) 44 * if (in_init(me, (void *)val))
45 * val -= (uint32_t)me->module_init; 45 * val -= (uint32_t)me->init_layout.base;
46 * else 46 * else
47 * val -= (uint32_t)me->module_core; 47 * val -= (uint32_t)me->core_layout.base;
48 * However, SEGREL32 is used only for PARISC unwind entries, and we want 48 * However, SEGREL32 is used only for PARISC unwind entries, and we want
49 * those entries to have an absolute address, and not just an offset. 49 * those entries to have an absolute address, and not just an offset.
50 * 50 *
@@ -100,14 +100,14 @@
100 * or init pieces the location is */ 100 * or init pieces the location is */
101static inline int in_init(struct module *me, void *loc) 101static inline int in_init(struct module *me, void *loc)
102{ 102{
103 return (loc >= me->module_init && 103 return (loc >= me->init_layout.base &&
104 loc <= (me->module_init + me->init_size)); 104 loc <= (me->init_layout.base + me->init_layout.size));
105} 105}
106 106
107static inline int in_core(struct module *me, void *loc) 107static inline int in_core(struct module *me, void *loc)
108{ 108{
109 return (loc >= me->module_core && 109 return (loc >= me->core_layout.base &&
110 loc <= (me->module_core + me->core_size)); 110 loc <= (me->core_layout.base + me->core_layout.size));
111} 111}
112 112
113static inline int in_local(struct module *me, void *loc) 113static inline int in_local(struct module *me, void *loc)
@@ -367,13 +367,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
367 } 367 }
368 368
369 /* align things a bit */ 369 /* align things a bit */
370 me->core_size = ALIGN(me->core_size, 16); 370 me->core_layout.size = ALIGN(me->core_layout.size, 16);
371 me->arch.got_offset = me->core_size; 371 me->arch.got_offset = me->core_layout.size;
372 me->core_size += gots * sizeof(struct got_entry); 372 me->core_layout.size += gots * sizeof(struct got_entry);
373 373
374 me->core_size = ALIGN(me->core_size, 16); 374 me->core_layout.size = ALIGN(me->core_layout.size, 16);
375 me->arch.fdesc_offset = me->core_size; 375 me->arch.fdesc_offset = me->core_layout.size;
376 me->core_size += fdescs * sizeof(Elf_Fdesc); 376 me->core_layout.size += fdescs * sizeof(Elf_Fdesc);
377 377
378 me->arch.got_max = gots; 378 me->arch.got_max = gots;
379 me->arch.fdesc_max = fdescs; 379 me->arch.fdesc_max = fdescs;
@@ -391,7 +391,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
391 391
392 BUG_ON(value == 0); 392 BUG_ON(value == 0);
393 393
394 got = me->module_core + me->arch.got_offset; 394 got = me->core_layout.base + me->arch.got_offset;
395 for (i = 0; got[i].addr; i++) 395 for (i = 0; got[i].addr; i++)
396 if (got[i].addr == value) 396 if (got[i].addr == value)
397 goto out; 397 goto out;
@@ -409,7 +409,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
409#ifdef CONFIG_64BIT 409#ifdef CONFIG_64BIT
410static Elf_Addr get_fdesc(struct module *me, unsigned long value) 410static Elf_Addr get_fdesc(struct module *me, unsigned long value)
411{ 411{
412 Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; 412 Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset;
413 413
414 if (!value) { 414 if (!value) {
415 printk(KERN_ERR "%s: zero OPD requested!\n", me->name); 415 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
@@ -427,7 +427,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
427 427
428 /* Create new one */ 428 /* Create new one */
429 fdesc->addr = value; 429 fdesc->addr = value;
430 fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; 430 fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
431 return (Elf_Addr)fdesc; 431 return (Elf_Addr)fdesc;
432} 432}
433#endif /* CONFIG_64BIT */ 433#endif /* CONFIG_64BIT */
@@ -839,7 +839,7 @@ register_unwind_table(struct module *me,
839 839
840 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; 840 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
841 end = table + sechdrs[me->arch.unwind_section].sh_size; 841 end = table + sechdrs[me->arch.unwind_section].sh_size;
842 gp = (Elf_Addr)me->module_core + me->arch.got_offset; 842 gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
843 843
844 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", 844 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
845 me->arch.unwind_section, table, end, gp); 845 me->arch.unwind_section, table, end, gp);
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index c94d2e018d84..2c01665eb410 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -188,8 +188,8 @@ static uint32_t do_plt_call(void *location,
188 188
189 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); 189 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
190 /* Init, or core PLT? */ 190 /* Init, or core PLT? */
191 if (location >= mod->module_core 191 if (location >= mod->core_layout.base
192 && location < mod->module_core + mod->core_size) 192 && location < mod->core_layout.base + mod->core_layout.size)
193 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; 193 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
194 else 194 else
195 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; 195 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
@@ -296,7 +296,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
296 } 296 }
297#ifdef CONFIG_DYNAMIC_FTRACE 297#ifdef CONFIG_DYNAMIC_FTRACE
298 module->arch.tramp = 298 module->arch.tramp =
299 do_plt_call(module->module_core, 299 do_plt_call(module->core_layout.base,
300 (unsigned long)ftrace_caller, 300 (unsigned long)ftrace_caller,
301 sechdrs, module); 301 sechdrs, module);
302#endif 302#endif
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 0c1a679314dd..7873e171457c 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -159,11 +159,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
159 159
160 /* Increase core size by size of got & plt and set start 160 /* Increase core size by size of got & plt and set start
161 offsets for got and plt. */ 161 offsets for got and plt. */
162 me->core_size = ALIGN(me->core_size, 4); 162 me->core_layout.size = ALIGN(me->core_layout.size, 4);
163 me->arch.got_offset = me->core_size; 163 me->arch.got_offset = me->core_layout.size;
164 me->core_size += me->arch.got_size; 164 me->core_layout.size += me->arch.got_size;
165 me->arch.plt_offset = me->core_size; 165 me->arch.plt_offset = me->core_layout.size;
166 me->core_size += me->arch.plt_size; 166 me->core_layout.size += me->arch.plt_size;
167 return 0; 167 return 0;
168} 168}
169 169
@@ -279,7 +279,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
279 if (info->got_initialized == 0) { 279 if (info->got_initialized == 0) {
280 Elf_Addr *gotent; 280 Elf_Addr *gotent;
281 281
282 gotent = me->module_core + me->arch.got_offset + 282 gotent = me->core_layout.base + me->arch.got_offset +
283 info->got_offset; 283 info->got_offset;
284 *gotent = val; 284 *gotent = val;
285 info->got_initialized = 1; 285 info->got_initialized = 1;
@@ -302,7 +302,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
302 rc = apply_rela_bits(loc, val, 0, 64, 0); 302 rc = apply_rela_bits(loc, val, 0, 64, 0);
303 else if (r_type == R_390_GOTENT || 303 else if (r_type == R_390_GOTENT ||
304 r_type == R_390_GOTPLTENT) { 304 r_type == R_390_GOTPLTENT) {
305 val += (Elf_Addr) me->module_core - loc; 305 val += (Elf_Addr) me->core_layout.base - loc;
306 rc = apply_rela_bits(loc, val, 1, 32, 1); 306 rc = apply_rela_bits(loc, val, 1, 32, 1);
307 } 307 }
308 break; 308 break;
@@ -315,7 +315,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
315 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ 315 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
316 if (info->plt_initialized == 0) { 316 if (info->plt_initialized == 0) {
317 unsigned int *ip; 317 unsigned int *ip;
318 ip = me->module_core + me->arch.plt_offset + 318 ip = me->core_layout.base + me->arch.plt_offset +
319 info->plt_offset; 319 info->plt_offset;
320 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ 320 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
321 ip[1] = 0x100a0004; 321 ip[1] = 0x100a0004;
@@ -334,7 +334,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
334 val - loc + 0xffffUL < 0x1ffffeUL) || 334 val - loc + 0xffffUL < 0x1ffffeUL) ||
335 (r_type == R_390_PLT32DBL && 335 (r_type == R_390_PLT32DBL &&
336 val - loc + 0xffffffffULL < 0x1fffffffeULL))) 336 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
337 val = (Elf_Addr) me->module_core + 337 val = (Elf_Addr) me->core_layout.base +
338 me->arch.plt_offset + 338 me->arch.plt_offset +
339 info->plt_offset; 339 info->plt_offset;
340 val += rela->r_addend - loc; 340 val += rela->r_addend - loc;
@@ -356,7 +356,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
356 case R_390_GOTOFF32: /* 32 bit offset to GOT. */ 356 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
357 case R_390_GOTOFF64: /* 64 bit offset to GOT. */ 357 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
358 val = val + rela->r_addend - 358 val = val + rela->r_addend -
359 ((Elf_Addr) me->module_core + me->arch.got_offset); 359 ((Elf_Addr) me->core_layout.base + me->arch.got_offset);
360 if (r_type == R_390_GOTOFF16) 360 if (r_type == R_390_GOTOFF16)
361 rc = apply_rela_bits(loc, val, 0, 16, 0); 361 rc = apply_rela_bits(loc, val, 0, 16, 0);
362 else if (r_type == R_390_GOTOFF32) 362 else if (r_type == R_390_GOTOFF32)
@@ -366,7 +366,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
366 break; 366 break;
367 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ 367 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
368 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ 368 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
369 val = (Elf_Addr) me->module_core + me->arch.got_offset + 369 val = (Elf_Addr) me->core_layout.base + me->arch.got_offset +
370 rela->r_addend - loc; 370 rela->r_addend - loc;
371 if (r_type == R_390_GOTPC) 371 if (r_type == R_390_GOTPC)
372 rc = apply_rela_bits(loc, val, 1, 32, 0); 372 rc = apply_rela_bits(loc, val, 1, 32, 0);
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
index d1d35ccffed3..bcc06e82a593 100644
--- a/arch/x86/kernel/livepatch.c
+++ b/arch/x86/kernel/livepatch.c
@@ -41,8 +41,8 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
41 int ret, numpages, size = 4; 41 int ret, numpages, size = 4;
42 bool readonly; 42 bool readonly;
43 unsigned long val; 43 unsigned long val;
44 unsigned long core = (unsigned long)mod->module_core; 44 unsigned long core = (unsigned long)mod->core_layout.base;
45 unsigned long core_size = mod->core_size; 45 unsigned long core_size = mod->core_layout.size;
46 46
47 switch (type) { 47 switch (type) {
48 case R_X86_64_NONE: 48 case R_X86_64_NONE:
@@ -72,7 +72,7 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
72 readonly = false; 72 readonly = false;
73 73
74#ifdef CONFIG_DEBUG_SET_MODULE_RONX 74#ifdef CONFIG_DEBUG_SET_MODULE_RONX
75 if (loc < core + mod->core_ro_size) 75 if (loc < core + mod->core_layout.ro_size)
76 readonly = true; 76 readonly = true;
77#endif 77#endif
78 78
diff --git a/include/linux/module.h b/include/linux/module.h
index 3a19c79918e0..6e68e8cf4d0d 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -302,6 +302,28 @@ struct mod_tree_node {
302 struct latch_tree_node node; 302 struct latch_tree_node node;
303}; 303};
304 304
305struct module_layout {
306 /* The actual code + data. */
307 void *base;
308 /* Total size. */
309 unsigned int size;
310 /* The size of the executable code. */
311 unsigned int text_size;
312 /* Size of RO section of the module (text+rodata) */
313 unsigned int ro_size;
314
315#ifdef CONFIG_MODULES_TREE_LOOKUP
316 struct mod_tree_node mtn;
317#endif
318};
319
320#ifdef CONFIG_MODULES_TREE_LOOKUP
321/* Only touch one cacheline for common rbtree-for-core-layout case. */
322#define __module_layout_align ____cacheline_aligned
323#else
324#define __module_layout_align
325#endif
326
305struct module { 327struct module {
306 enum module_state state; 328 enum module_state state;
307 329
@@ -366,37 +388,9 @@ struct module {
366 /* Startup function. */ 388 /* Startup function. */
367 int (*init)(void); 389 int (*init)(void);
368 390
369 /* 391 /* Core layout: rbtree is accessed frequently, so keep together. */
370 * If this is non-NULL, vfree() after init() returns. 392 struct module_layout core_layout __module_layout_align;
371 * 393 struct module_layout init_layout;
372 * Cacheline align here, such that:
373 * module_init, module_core, init_size, core_size,
374 * init_text_size, core_text_size and mtn_core::{mod,node[0]}
375 * are on the same cacheline.
376 */
377 void *module_init ____cacheline_aligned;
378
379 /* Here is the actual code + data, vfree'd on unload. */
380 void *module_core;
381
382 /* Here are the sizes of the init and core sections */
383 unsigned int init_size, core_size;
384
385 /* The size of the executable code in each section. */
386 unsigned int init_text_size, core_text_size;
387
388#ifdef CONFIG_MODULES_TREE_LOOKUP
389 /*
390 * We want mtn_core::{mod,node[0]} to be in the same cacheline as the
391 * above entries such that a regular lookup will only touch one
392 * cacheline.
393 */
394 struct mod_tree_node mtn_core;
395 struct mod_tree_node mtn_init;
396#endif
397
398 /* Size of RO sections of the module (text+rodata) */
399 unsigned int init_ro_size, core_ro_size;
400 394
401 /* Arch-specific module values */ 395 /* Arch-specific module values */
402 struct mod_arch_specific arch; 396 struct mod_arch_specific arch;
@@ -505,15 +499,15 @@ bool is_module_text_address(unsigned long addr);
505static inline bool within_module_core(unsigned long addr, 499static inline bool within_module_core(unsigned long addr,
506 const struct module *mod) 500 const struct module *mod)
507{ 501{
508 return (unsigned long)mod->module_core <= addr && 502 return (unsigned long)mod->core_layout.base <= addr &&
509 addr < (unsigned long)mod->module_core + mod->core_size; 503 addr < (unsigned long)mod->core_layout.base + mod->core_layout.size;
510} 504}
511 505
512static inline bool within_module_init(unsigned long addr, 506static inline bool within_module_init(unsigned long addr,
513 const struct module *mod) 507 const struct module *mod)
514{ 508{
515 return (unsigned long)mod->module_init <= addr && 509 return (unsigned long)mod->init_layout.base <= addr &&
516 addr < (unsigned long)mod->module_init + mod->init_size; 510 addr < (unsigned long)mod->init_layout.base + mod->init_layout.size;
517} 511}
518 512
519static inline bool within_module(unsigned long addr, const struct module *mod) 513static inline bool within_module(unsigned long addr, const struct module *mod)
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 4121345498e0..2a20c0dfdafc 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
2021 continue; 2021 continue;
2022 2022
2023 kdb_printf("%-20s%8u 0x%p ", mod->name, 2023 kdb_printf("%-20s%8u 0x%p ", mod->name,
2024 mod->core_size, (void *)mod); 2024 mod->core_layout.size, (void *)mod);
2025#ifdef CONFIG_MODULE_UNLOAD 2025#ifdef CONFIG_MODULE_UNLOAD
2026 kdb_printf("%4d ", module_refcount(mod)); 2026 kdb_printf("%4d ", module_refcount(mod));
2027#endif 2027#endif
@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
2031 kdb_printf(" (Loading)"); 2031 kdb_printf(" (Loading)");
2032 else 2032 else
2033 kdb_printf(" (Live)"); 2033 kdb_printf(" (Live)");
2034 kdb_printf(" 0x%p", mod->module_core); 2034 kdb_printf(" 0x%p", mod->core_layout.base);
2035 2035
2036#ifdef CONFIG_MODULE_UNLOAD 2036#ifdef CONFIG_MODULE_UNLOAD
2037 { 2037 {
diff --git a/kernel/module.c b/kernel/module.c
index 14b224967e7b..a0a3d6d9d5e8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -108,13 +108,6 @@ static LIST_HEAD(modules);
108 * Use a latched RB-tree for __module_address(); this allows us to use 108 * Use a latched RB-tree for __module_address(); this allows us to use
109 * RCU-sched lookups of the address from any context. 109 * RCU-sched lookups of the address from any context.
110 * 110 *
111 * Because modules have two address ranges: init and core, we need two
112 * latch_tree_nodes entries. Therefore we need the back-pointer from
113 * mod_tree_node.
114 *
115 * Because init ranges are short lived we mark them unlikely and have placed
116 * them outside the critical cacheline in struct module.
117 *
118 * This is conditional on PERF_EVENTS || TRACING because those can really hit 111 * This is conditional on PERF_EVENTS || TRACING because those can really hit
119 * __module_address() hard by doing a lot of stack unwinding; potentially from 112 * __module_address() hard by doing a lot of stack unwinding; potentially from
120 * NMI context. 113 * NMI context.
@@ -122,24 +115,16 @@ static LIST_HEAD(modules);
122 115
123static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) 116static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
124{ 117{
125 struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); 118 struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
126 struct module *mod = mtn->mod;
127 119
128 if (unlikely(mtn == &mod->mtn_init)) 120 return (unsigned long)layout->base;
129 return (unsigned long)mod->module_init;
130
131 return (unsigned long)mod->module_core;
132} 121}
133 122
134static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) 123static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
135{ 124{
136 struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); 125 struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
137 struct module *mod = mtn->mod;
138
139 if (unlikely(mtn == &mod->mtn_init))
140 return (unsigned long)mod->init_size;
141 126
142 return (unsigned long)mod->core_size; 127 return (unsigned long)layout->size;
143} 128}
144 129
145static __always_inline bool 130static __always_inline bool
@@ -197,23 +182,23 @@ static void __mod_tree_remove(struct mod_tree_node *node)
197 */ 182 */
198static void mod_tree_insert(struct module *mod) 183static void mod_tree_insert(struct module *mod)
199{ 184{
200 mod->mtn_core.mod = mod; 185 mod->core_layout.mtn.mod = mod;
201 mod->mtn_init.mod = mod; 186 mod->init_layout.mtn.mod = mod;
202 187
203 __mod_tree_insert(&mod->mtn_core); 188 __mod_tree_insert(&mod->core_layout.mtn);
204 if (mod->init_size) 189 if (mod->init_layout.size)
205 __mod_tree_insert(&mod->mtn_init); 190 __mod_tree_insert(&mod->init_layout.mtn);
206} 191}
207 192
208static void mod_tree_remove_init(struct module *mod) 193static void mod_tree_remove_init(struct module *mod)
209{ 194{
210 if (mod->init_size) 195 if (mod->init_layout.size)
211 __mod_tree_remove(&mod->mtn_init); 196 __mod_tree_remove(&mod->init_layout.mtn);
212} 197}
213 198
214static void mod_tree_remove(struct module *mod) 199static void mod_tree_remove(struct module *mod)
215{ 200{
216 __mod_tree_remove(&mod->mtn_core); 201 __mod_tree_remove(&mod->core_layout.mtn);
217 mod_tree_remove_init(mod); 202 mod_tree_remove_init(mod);
218} 203}
219 204
@@ -267,9 +252,9 @@ static void __mod_update_bounds(void *base, unsigned int size)
267 252
268static void mod_update_bounds(struct module *mod) 253static void mod_update_bounds(struct module *mod)
269{ 254{
270 __mod_update_bounds(mod->module_core, mod->core_size); 255 __mod_update_bounds(mod->core_layout.base, mod->core_layout.size);
271 if (mod->init_size) 256 if (mod->init_layout.size)
272 __mod_update_bounds(mod->module_init, mod->init_size); 257 __mod_update_bounds(mod->init_layout.base, mod->init_layout.size);
273} 258}
274 259
275#ifdef CONFIG_KGDB_KDB 260#ifdef CONFIG_KGDB_KDB
@@ -1214,7 +1199,7 @@ struct module_attribute module_uevent =
1214static ssize_t show_coresize(struct module_attribute *mattr, 1199static ssize_t show_coresize(struct module_attribute *mattr,
1215 struct module_kobject *mk, char *buffer) 1200 struct module_kobject *mk, char *buffer)
1216{ 1201{
1217 return sprintf(buffer, "%u\n", mk->mod->core_size); 1202 return sprintf(buffer, "%u\n", mk->mod->core_layout.size);
1218} 1203}
1219 1204
1220static struct module_attribute modinfo_coresize = 1205static struct module_attribute modinfo_coresize =
@@ -1223,7 +1208,7 @@ static struct module_attribute modinfo_coresize =
1223static ssize_t show_initsize(struct module_attribute *mattr, 1208static ssize_t show_initsize(struct module_attribute *mattr,
1224 struct module_kobject *mk, char *buffer) 1209 struct module_kobject *mk, char *buffer)
1225{ 1210{
1226 return sprintf(buffer, "%u\n", mk->mod->init_size); 1211 return sprintf(buffer, "%u\n", mk->mod->init_layout.size);
1227} 1212}
1228 1213
1229static struct module_attribute modinfo_initsize = 1214static struct module_attribute modinfo_initsize =
@@ -1917,29 +1902,29 @@ static void set_section_ro_nx(void *base,
1917 1902
1918static void set_module_core_ro_nx(struct module *mod) 1903static void set_module_core_ro_nx(struct module *mod)
1919{ 1904{
1920 set_section_ro_nx(mod->module_core, mod->core_text_size, 1905 set_section_ro_nx(mod->core_layout.base, mod->core_layout.text_size,
1921 mod->core_ro_size, mod->core_size, 1906 mod->core_layout.ro_size, mod->core_layout.size,
1922 set_memory_ro, set_memory_nx); 1907 set_memory_ro, set_memory_nx);
1923} 1908}
1924 1909
1925static void unset_module_core_ro_nx(struct module *mod) 1910static void unset_module_core_ro_nx(struct module *mod)
1926{ 1911{
1927 set_section_ro_nx(mod->module_core, mod->core_text_size, 1912 set_section_ro_nx(mod->core_layout.base, mod->core_layout.text_size,
1928 mod->core_ro_size, mod->core_size, 1913 mod->core_layout.ro_size, mod->core_layout.size,
1929 set_memory_rw, set_memory_x); 1914 set_memory_rw, set_memory_x);
1930} 1915}
1931 1916
1932static void set_module_init_ro_nx(struct module *mod) 1917static void set_module_init_ro_nx(struct module *mod)
1933{ 1918{
1934 set_section_ro_nx(mod->module_init, mod->init_text_size, 1919 set_section_ro_nx(mod->init_layout.base, mod->init_layout.text_size,
1935 mod->init_ro_size, mod->init_size, 1920 mod->init_layout.ro_size, mod->init_layout.size,
1936 set_memory_ro, set_memory_nx); 1921 set_memory_ro, set_memory_nx);
1937} 1922}
1938 1923
1939static void unset_module_init_ro_nx(struct module *mod) 1924static void unset_module_init_ro_nx(struct module *mod)
1940{ 1925{
1941 set_section_ro_nx(mod->module_init, mod->init_text_size, 1926 set_section_ro_nx(mod->init_layout.base, mod->init_layout.text_size,
1942 mod->init_ro_size, mod->init_size, 1927 mod->init_layout.ro_size, mod->init_layout.size,
1943 set_memory_rw, set_memory_x); 1928 set_memory_rw, set_memory_x);
1944} 1929}
1945 1930
@@ -1952,14 +1937,14 @@ void set_all_modules_text_rw(void)
1952 list_for_each_entry_rcu(mod, &modules, list) { 1937 list_for_each_entry_rcu(mod, &modules, list) {
1953 if (mod->state == MODULE_STATE_UNFORMED) 1938 if (mod->state == MODULE_STATE_UNFORMED)
1954 continue; 1939 continue;
1955 if ((mod->module_core) && (mod->core_text_size)) { 1940 if ((mod->core_layout.base) && (mod->core_layout.text_size)) {
1956 set_page_attributes(mod->module_core, 1941 set_page_attributes(mod->core_layout.base,
1957 mod->module_core + mod->core_text_size, 1942 mod->core_layout.base + mod->core_layout.text_size,
1958 set_memory_rw); 1943 set_memory_rw);
1959 } 1944 }
1960 if ((mod->module_init) && (mod->init_text_size)) { 1945 if ((mod->init_layout.base) && (mod->init_layout.text_size)) {
1961 set_page_attributes(mod->module_init, 1946 set_page_attributes(mod->init_layout.base,
1962 mod->module_init + mod->init_text_size, 1947 mod->init_layout.base + mod->init_layout.text_size,
1963 set_memory_rw); 1948 set_memory_rw);
1964 } 1949 }
1965 } 1950 }
@@ -1975,14 +1960,14 @@ void set_all_modules_text_ro(void)
1975 list_for_each_entry_rcu(mod, &modules, list) { 1960 list_for_each_entry_rcu(mod, &modules, list) {
1976 if (mod->state == MODULE_STATE_UNFORMED) 1961 if (mod->state == MODULE_STATE_UNFORMED)
1977 continue; 1962 continue;
1978 if ((mod->module_core) && (mod->core_text_size)) { 1963 if ((mod->core_layout.base) && (mod->core_layout.text_size)) {
1979 set_page_attributes(mod->module_core, 1964 set_page_attributes(mod->core_layout.base,
1980 mod->module_core + mod->core_text_size, 1965 mod->core_layout.base + mod->core_layout.text_size,
1981 set_memory_ro); 1966 set_memory_ro);
1982 } 1967 }
1983 if ((mod->module_init) && (mod->init_text_size)) { 1968 if ((mod->init_layout.base) && (mod->init_layout.text_size)) {
1984 set_page_attributes(mod->module_init, 1969 set_page_attributes(mod->init_layout.base,
1985 mod->module_init + mod->init_text_size, 1970 mod->init_layout.base + mod->init_layout.text_size,
1986 set_memory_ro); 1971 set_memory_ro);
1987 } 1972 }
1988 } 1973 }
@@ -2047,16 +2032,16 @@ static void free_module(struct module *mod)
2047 /* This may be NULL, but that's OK */ 2032 /* This may be NULL, but that's OK */
2048 unset_module_init_ro_nx(mod); 2033 unset_module_init_ro_nx(mod);
2049 module_arch_freeing_init(mod); 2034 module_arch_freeing_init(mod);
2050 module_memfree(mod->module_init); 2035 module_memfree(mod->init_layout.base);
2051 kfree(mod->args); 2036 kfree(mod->args);
2052 percpu_modfree(mod); 2037 percpu_modfree(mod);
2053 2038
2054 /* Free lock-classes; relies on the preceding sync_rcu(). */ 2039 /* Free lock-classes; relies on the preceding sync_rcu(). */
2055 lockdep_free_key_range(mod->module_core, mod->core_size); 2040 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
2056 2041
2057 /* Finally, free the core (containing the module structure) */ 2042 /* Finally, free the core (containing the module structure) */
2058 unset_module_core_ro_nx(mod); 2043 unset_module_core_ro_nx(mod);
2059 module_memfree(mod->module_core); 2044 module_memfree(mod->core_layout.base);
2060 2045
2061#ifdef CONFIG_MPU 2046#ifdef CONFIG_MPU
2062 update_protections(current->mm); 2047 update_protections(current->mm);
@@ -2259,20 +2244,20 @@ static void layout_sections(struct module *mod, struct load_info *info)
2259 || s->sh_entsize != ~0UL 2244 || s->sh_entsize != ~0UL
2260 || strstarts(sname, ".init")) 2245 || strstarts(sname, ".init"))
2261 continue; 2246 continue;
2262 s->sh_entsize = get_offset(mod, &mod->core_size, s, i); 2247 s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
2263 pr_debug("\t%s\n", sname); 2248 pr_debug("\t%s\n", sname);
2264 } 2249 }
2265 switch (m) { 2250 switch (m) {
2266 case 0: /* executable */ 2251 case 0: /* executable */
2267 mod->core_size = debug_align(mod->core_size); 2252 mod->core_layout.size = debug_align(mod->core_layout.size);
2268 mod->core_text_size = mod->core_size; 2253 mod->core_layout.text_size = mod->core_layout.size;
2269 break; 2254 break;
2270 case 1: /* RO: text and ro-data */ 2255 case 1: /* RO: text and ro-data */
2271 mod->core_size = debug_align(mod->core_size); 2256 mod->core_layout.size = debug_align(mod->core_layout.size);
2272 mod->core_ro_size = mod->core_size; 2257 mod->core_layout.ro_size = mod->core_layout.size;
2273 break; 2258 break;
2274 case 3: /* whole core */ 2259 case 3: /* whole core */
2275 mod->core_size = debug_align(mod->core_size); 2260 mod->core_layout.size = debug_align(mod->core_layout.size);
2276 break; 2261 break;
2277 } 2262 }
2278 } 2263 }
@@ -2288,21 +2273,21 @@ static void layout_sections(struct module *mod, struct load_info *info)
2288 || s->sh_entsize != ~0UL 2273 || s->sh_entsize != ~0UL
2289 || !strstarts(sname, ".init")) 2274 || !strstarts(sname, ".init"))
2290 continue; 2275 continue;
2291 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) 2276 s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
2292 | INIT_OFFSET_MASK); 2277 | INIT_OFFSET_MASK);
2293 pr_debug("\t%s\n", sname); 2278 pr_debug("\t%s\n", sname);
2294 } 2279 }
2295 switch (m) { 2280 switch (m) {
2296 case 0: /* executable */ 2281 case 0: /* executable */
2297 mod->init_size = debug_align(mod->init_size); 2282 mod->init_layout.size = debug_align(mod->init_layout.size);
2298 mod->init_text_size = mod->init_size; 2283 mod->init_layout.text_size = mod->init_layout.size;
2299 break; 2284 break;
2300 case 1: /* RO: text and ro-data */ 2285 case 1: /* RO: text and ro-data */
2301 mod->init_size = debug_align(mod->init_size); 2286 mod->init_layout.size = debug_align(mod->init_layout.size);
2302 mod->init_ro_size = mod->init_size; 2287 mod->init_layout.ro_size = mod->init_layout.size;
2303 break; 2288 break;
2304 case 3: /* whole init */ 2289 case 3: /* whole init */
2305 mod->init_size = debug_align(mod->init_size); 2290 mod->init_layout.size = debug_align(mod->init_layout.size);
2306 break; 2291 break;
2307 } 2292 }
2308 } 2293 }
@@ -2477,7 +2462,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
2477 2462
2478 /* Put symbol section at end of init part of module. */ 2463 /* Put symbol section at end of init part of module. */
2479 symsect->sh_flags |= SHF_ALLOC; 2464 symsect->sh_flags |= SHF_ALLOC;
2480 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, 2465 symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect,
2481 info->index.sym) | INIT_OFFSET_MASK; 2466 info->index.sym) | INIT_OFFSET_MASK;
2482 pr_debug("\t%s\n", info->secstrings + symsect->sh_name); 2467 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2483 2468
@@ -2494,16 +2479,16 @@ static void layout_symtab(struct module *mod, struct load_info *info)
2494 } 2479 }
2495 2480
2496 /* Append room for core symbols at end of core part. */ 2481 /* Append room for core symbols at end of core part. */
2497 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); 2482 info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1);
2498 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); 2483 info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
2499 mod->core_size += strtab_size; 2484 mod->core_layout.size += strtab_size;
2500 mod->core_size = debug_align(mod->core_size); 2485 mod->core_layout.size = debug_align(mod->core_layout.size);
2501 2486
2502 /* Put string table section at end of init part of module. */ 2487 /* Put string table section at end of init part of module. */
2503 strsect->sh_flags |= SHF_ALLOC; 2488 strsect->sh_flags |= SHF_ALLOC;
2504 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, 2489 strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
2505 info->index.str) | INIT_OFFSET_MASK; 2490 info->index.str) | INIT_OFFSET_MASK;
2506 mod->init_size = debug_align(mod->init_size); 2491 mod->init_layout.size = debug_align(mod->init_layout.size);
2507 pr_debug("\t%s\n", info->secstrings + strsect->sh_name); 2492 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2508} 2493}
2509 2494
@@ -2524,8 +2509,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
2524 for (i = 0; i < mod->num_symtab; i++) 2509 for (i = 0; i < mod->num_symtab; i++)
2525 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); 2510 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2526 2511
2527 mod->core_symtab = dst = mod->module_core + info->symoffs; 2512 mod->core_symtab = dst = mod->core_layout.base + info->symoffs;
2528 mod->core_strtab = s = mod->module_core + info->stroffs; 2513 mod->core_strtab = s = mod->core_layout.base + info->stroffs;
2529 src = mod->symtab; 2514 src = mod->symtab;
2530 for (ndst = i = 0; i < mod->num_symtab; i++) { 2515 for (ndst = i = 0; i < mod->num_symtab; i++) {
2531 if (i == 0 || 2516 if (i == 0 ||
@@ -2975,7 +2960,7 @@ static int move_module(struct module *mod, struct load_info *info)
2975 void *ptr; 2960 void *ptr;
2976 2961
2977 /* Do the allocs. */ 2962 /* Do the allocs. */
2978 ptr = module_alloc(mod->core_size); 2963 ptr = module_alloc(mod->core_layout.size);
2979 /* 2964 /*
2980 * The pointer to this block is stored in the module structure 2965 * The pointer to this block is stored in the module structure
2981 * which is inside the block. Just mark it as not being a 2966 * which is inside the block. Just mark it as not being a
@@ -2985,11 +2970,11 @@ static int move_module(struct module *mod, struct load_info *info)
2985 if (!ptr) 2970 if (!ptr)
2986 return -ENOMEM; 2971 return -ENOMEM;
2987 2972
2988 memset(ptr, 0, mod->core_size); 2973 memset(ptr, 0, mod->core_layout.size);
2989 mod->module_core = ptr; 2974 mod->core_layout.base = ptr;
2990 2975
2991 if (mod->init_size) { 2976 if (mod->init_layout.size) {
2992 ptr = module_alloc(mod->init_size); 2977 ptr = module_alloc(mod->init_layout.size);
2993 /* 2978 /*
2994 * The pointer to this block is stored in the module structure 2979 * The pointer to this block is stored in the module structure
2995 * which is inside the block. This block doesn't need to be 2980 * which is inside the block. This block doesn't need to be
@@ -2998,13 +2983,13 @@ static int move_module(struct module *mod, struct load_info *info)
2998 */ 2983 */
2999 kmemleak_ignore(ptr); 2984 kmemleak_ignore(ptr);
3000 if (!ptr) { 2985 if (!ptr) {
3001 module_memfree(mod->module_core); 2986 module_memfree(mod->core_layout.base);
3002 return -ENOMEM; 2987 return -ENOMEM;
3003 } 2988 }
3004 memset(ptr, 0, mod->init_size); 2989 memset(ptr, 0, mod->init_layout.size);
3005 mod->module_init = ptr; 2990 mod->init_layout.base = ptr;
3006 } else 2991 } else
3007 mod->module_init = NULL; 2992 mod->init_layout.base = NULL;
3008 2993
3009 /* Transfer each section which specifies SHF_ALLOC */ 2994 /* Transfer each section which specifies SHF_ALLOC */
3010 pr_debug("final section addresses:\n"); 2995 pr_debug("final section addresses:\n");
@@ -3016,10 +3001,10 @@ static int move_module(struct module *mod, struct load_info *info)
3016 continue; 3001 continue;
3017 3002
3018 if (shdr->sh_entsize & INIT_OFFSET_MASK) 3003 if (shdr->sh_entsize & INIT_OFFSET_MASK)
3019 dest = mod->module_init 3004 dest = mod->init_layout.base
3020 + (shdr->sh_entsize & ~INIT_OFFSET_MASK); 3005 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
3021 else 3006 else
3022 dest = mod->module_core + shdr->sh_entsize; 3007 dest = mod->core_layout.base + shdr->sh_entsize;
3023 3008
3024 if (shdr->sh_type != SHT_NOBITS) 3009 if (shdr->sh_type != SHT_NOBITS)
3025 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); 3010 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
@@ -3081,12 +3066,12 @@ static void flush_module_icache(const struct module *mod)
3081 * Do it before processing of module parameters, so the module 3066 * Do it before processing of module parameters, so the module
3082 * can provide parameter accessor functions of its own. 3067 * can provide parameter accessor functions of its own.
3083 */ 3068 */
3084 if (mod->module_init) 3069 if (mod->init_layout.base)
3085 flush_icache_range((unsigned long)mod->module_init, 3070 flush_icache_range((unsigned long)mod->init_layout.base,
3086 (unsigned long)mod->module_init 3071 (unsigned long)mod->init_layout.base
3087 + mod->init_size); 3072 + mod->init_layout.size);
3088 flush_icache_range((unsigned long)mod->module_core, 3073 flush_icache_range((unsigned long)mod->core_layout.base,
3089 (unsigned long)mod->module_core + mod->core_size); 3074 (unsigned long)mod->core_layout.base + mod->core_layout.size);
3090 3075
3091 set_fs(old_fs); 3076 set_fs(old_fs);
3092} 3077}
@@ -3144,8 +3129,8 @@ static void module_deallocate(struct module *mod, struct load_info *info)
3144{ 3129{
3145 percpu_modfree(mod); 3130 percpu_modfree(mod);
3146 module_arch_freeing_init(mod); 3131 module_arch_freeing_init(mod);
3147 module_memfree(mod->module_init); 3132 module_memfree(mod->init_layout.base);
3148 module_memfree(mod->module_core); 3133 module_memfree(mod->core_layout.base);
3149} 3134}
3150 3135
3151int __weak module_finalize(const Elf_Ehdr *hdr, 3136int __weak module_finalize(const Elf_Ehdr *hdr,
@@ -3232,7 +3217,7 @@ static noinline int do_init_module(struct module *mod)
3232 ret = -ENOMEM; 3217 ret = -ENOMEM;
3233 goto fail; 3218 goto fail;
3234 } 3219 }
3235 freeinit->module_init = mod->module_init; 3220 freeinit->module_init = mod->init_layout.base;
3236 3221
3237 /* 3222 /*
3238 * We want to find out whether @mod uses async during init. Clear 3223 * We want to find out whether @mod uses async during init. Clear
@@ -3292,10 +3277,10 @@ static noinline int do_init_module(struct module *mod)
3292 mod_tree_remove_init(mod); 3277 mod_tree_remove_init(mod);
3293 unset_module_init_ro_nx(mod); 3278 unset_module_init_ro_nx(mod);
3294 module_arch_freeing_init(mod); 3279 module_arch_freeing_init(mod);
3295 mod->module_init = NULL; 3280 mod->init_layout.base = NULL;
3296 mod->init_size = 0; 3281 mod->init_layout.size = 0;
3297 mod->init_ro_size = 0; 3282 mod->init_layout.ro_size = 0;
3298 mod->init_text_size = 0; 3283 mod->init_layout.text_size = 0;
3299 /* 3284 /*
3300 * We want to free module_init, but be aware that kallsyms may be 3285 * We want to free module_init, but be aware that kallsyms may be
3301 * walking this with preempt disabled. In all the failure paths, we 3286 * walking this with preempt disabled. In all the failure paths, we
@@ -3575,7 +3560,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
3575 mutex_unlock(&module_mutex); 3560 mutex_unlock(&module_mutex);
3576 free_module: 3561 free_module:
3577 /* Free lock-classes; relies on the preceding sync_rcu() */ 3562 /* Free lock-classes; relies on the preceding sync_rcu() */
3578 lockdep_free_key_range(mod->module_core, mod->core_size); 3563 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
3579 3564
3580 module_deallocate(mod, info); 3565 module_deallocate(mod, info);
3581 free_copy: 3566 free_copy:
@@ -3653,9 +3638,9 @@ static const char *get_ksymbol(struct module *mod,
3653 3638
3654 /* At worse, next value is at end of module */ 3639 /* At worse, next value is at end of module */
3655 if (within_module_init(addr, mod)) 3640 if (within_module_init(addr, mod))
3656 nextval = (unsigned long)mod->module_init+mod->init_text_size; 3641 nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size;
3657 else 3642 else
3658 nextval = (unsigned long)mod->module_core+mod->core_text_size; 3643 nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
3659 3644
3660 /* Scan for closest preceding symbol, and next symbol. (ELF 3645 /* Scan for closest preceding symbol, and next symbol. (ELF
3661 starts real symbols at 1). */ 3646 starts real symbols at 1). */
@@ -3902,7 +3887,7 @@ static int m_show(struct seq_file *m, void *p)
3902 return 0; 3887 return 0;
3903 3888
3904 seq_printf(m, "%s %u", 3889 seq_printf(m, "%s %u",
3905 mod->name, mod->init_size + mod->core_size); 3890 mod->name, mod->init_layout.size + mod->core_layout.size);
3906 print_unload_info(m, mod); 3891 print_unload_info(m, mod);
3907 3892
3908 /* Informative for users. */ 3893 /* Informative for users. */
@@ -3911,7 +3896,7 @@ static int m_show(struct seq_file *m, void *p)
3911 mod->state == MODULE_STATE_COMING ? "Loading" : 3896 mod->state == MODULE_STATE_COMING ? "Loading" :
3912 "Live"); 3897 "Live");
3913 /* Used by oprofile and other similar tools. */ 3898 /* Used by oprofile and other similar tools. */
3914 seq_printf(m, " 0x%pK", mod->module_core); 3899 seq_printf(m, " 0x%pK", mod->core_layout.base);
3915 3900
3916 /* Taints info */ 3901 /* Taints info */
3917 if (mod->taints) 3902 if (mod->taints)
@@ -4054,8 +4039,8 @@ struct module *__module_text_address(unsigned long addr)
4054 struct module *mod = __module_address(addr); 4039 struct module *mod = __module_address(addr);
4055 if (mod) { 4040 if (mod) {
4056 /* Make sure it's within the text section. */ 4041 /* Make sure it's within the text section. */
4057 if (!within(addr, mod->module_init, mod->init_text_size) 4042 if (!within(addr, mod->init_layout.base, mod->init_layout.text_size)
4058 && !within(addr, mod->module_core, mod->core_text_size)) 4043 && !within(addr, mod->core_layout.base, mod->core_layout.text_size))
4059 mod = NULL; 4044 mod = NULL;
4060 } 4045 }
4061 return mod; 4046 return mod;