diff options
Diffstat (limited to 'kernel/module.c')
-rw-r--r-- | kernel/module.c | 59 |
1 files changed, 33 insertions, 26 deletions
diff --git a/kernel/module.c b/kernel/module.c index a65dc787a27b..e5538d5f00ad 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -474,9 +474,10 @@ static void module_unload_init(struct module *mod) | |||
474 | 474 | ||
475 | INIT_LIST_HEAD(&mod->modules_which_use_me); | 475 | INIT_LIST_HEAD(&mod->modules_which_use_me); |
476 | for_each_possible_cpu(cpu) | 476 | for_each_possible_cpu(cpu) |
477 | local_set(__module_ref_addr(mod, cpu), 0); | 477 | per_cpu_ptr(mod->refptr, cpu)->count = 0; |
478 | |||
478 | /* Hold reference count during initialization. */ | 479 | /* Hold reference count during initialization. */ |
479 | local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1); | 480 | __this_cpu_write(mod->refptr->count, 1); |
480 | /* Backwards compatibility macros put refcount during init. */ | 481 | /* Backwards compatibility macros put refcount during init. */ |
481 | mod->waiter = current; | 482 | mod->waiter = current; |
482 | } | 483 | } |
@@ -619,7 +620,7 @@ unsigned int module_refcount(struct module *mod) | |||
619 | int cpu; | 620 | int cpu; |
620 | 621 | ||
621 | for_each_possible_cpu(cpu) | 622 | for_each_possible_cpu(cpu) |
622 | total += local_read(__module_ref_addr(mod, cpu)); | 623 | total += per_cpu_ptr(mod->refptr, cpu)->count; |
623 | return total; | 624 | return total; |
624 | } | 625 | } |
625 | EXPORT_SYMBOL(module_refcount); | 626 | EXPORT_SYMBOL(module_refcount); |
@@ -796,14 +797,15 @@ static struct module_attribute refcnt = { | |||
796 | void module_put(struct module *module) | 797 | void module_put(struct module *module) |
797 | { | 798 | { |
798 | if (module) { | 799 | if (module) { |
799 | unsigned int cpu = get_cpu(); | 800 | preempt_disable(); |
800 | local_dec(__module_ref_addr(module, cpu)); | 801 | __this_cpu_dec(module->refptr->count); |
802 | |||
801 | trace_module_put(module, _RET_IP_, | 803 | trace_module_put(module, _RET_IP_, |
802 | local_read(__module_ref_addr(module, cpu))); | 804 | __this_cpu_read(module->refptr->count)); |
803 | /* Maybe they're waiting for us to drop reference? */ | 805 | /* Maybe they're waiting for us to drop reference? */ |
804 | if (unlikely(!module_is_live(module))) | 806 | if (unlikely(!module_is_live(module))) |
805 | wake_up_process(module->waiter); | 807 | wake_up_process(module->waiter); |
806 | put_cpu(); | 808 | preempt_enable(); |
807 | } | 809 | } |
808 | } | 810 | } |
809 | EXPORT_SYMBOL(module_put); | 811 | EXPORT_SYMBOL(module_put); |
@@ -1010,6 +1012,12 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs, | |||
1010 | * J. Corbet <corbet@lwn.net> | 1012 | * J. Corbet <corbet@lwn.net> |
1011 | */ | 1013 | */ |
1012 | #if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) | 1014 | #if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) |
1015 | |||
1016 | static inline bool sect_empty(const Elf_Shdr *sect) | ||
1017 | { | ||
1018 | return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; | ||
1019 | } | ||
1020 | |||
1013 | struct module_sect_attr | 1021 | struct module_sect_attr |
1014 | { | 1022 | { |
1015 | struct module_attribute mattr; | 1023 | struct module_attribute mattr; |
@@ -1051,8 +1059,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, | |||
1051 | 1059 | ||
1052 | /* Count loaded sections and allocate structures */ | 1060 | /* Count loaded sections and allocate structures */ |
1053 | for (i = 0; i < nsect; i++) | 1061 | for (i = 0; i < nsect; i++) |
1054 | if (sechdrs[i].sh_flags & SHF_ALLOC | 1062 | if (!sect_empty(&sechdrs[i])) |
1055 | && sechdrs[i].sh_size) | ||
1056 | nloaded++; | 1063 | nloaded++; |
1057 | size[0] = ALIGN(sizeof(*sect_attrs) | 1064 | size[0] = ALIGN(sizeof(*sect_attrs) |
1058 | + nloaded * sizeof(sect_attrs->attrs[0]), | 1065 | + nloaded * sizeof(sect_attrs->attrs[0]), |
@@ -1070,9 +1077,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, | |||
1070 | sattr = §_attrs->attrs[0]; | 1077 | sattr = §_attrs->attrs[0]; |
1071 | gattr = §_attrs->grp.attrs[0]; | 1078 | gattr = §_attrs->grp.attrs[0]; |
1072 | for (i = 0; i < nsect; i++) { | 1079 | for (i = 0; i < nsect; i++) { |
1073 | if (! (sechdrs[i].sh_flags & SHF_ALLOC)) | 1080 | if (sect_empty(&sechdrs[i])) |
1074 | continue; | ||
1075 | if (!sechdrs[i].sh_size) | ||
1076 | continue; | 1081 | continue; |
1077 | sattr->address = sechdrs[i].sh_addr; | 1082 | sattr->address = sechdrs[i].sh_addr; |
1078 | sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, | 1083 | sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, |
@@ -1156,7 +1161,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, | |||
1156 | /* Count notes sections and allocate structures. */ | 1161 | /* Count notes sections and allocate structures. */ |
1157 | notes = 0; | 1162 | notes = 0; |
1158 | for (i = 0; i < nsect; i++) | 1163 | for (i = 0; i < nsect; i++) |
1159 | if ((sechdrs[i].sh_flags & SHF_ALLOC) && | 1164 | if (!sect_empty(&sechdrs[i]) && |
1160 | (sechdrs[i].sh_type == SHT_NOTE)) | 1165 | (sechdrs[i].sh_type == SHT_NOTE)) |
1161 | ++notes; | 1166 | ++notes; |
1162 | 1167 | ||
@@ -1172,7 +1177,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, | |||
1172 | notes_attrs->notes = notes; | 1177 | notes_attrs->notes = notes; |
1173 | nattr = ¬es_attrs->attrs[0]; | 1178 | nattr = ¬es_attrs->attrs[0]; |
1174 | for (loaded = i = 0; i < nsect; ++i) { | 1179 | for (loaded = i = 0; i < nsect; ++i) { |
1175 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | 1180 | if (sect_empty(&sechdrs[i])) |
1176 | continue; | 1181 | continue; |
1177 | if (sechdrs[i].sh_type == SHT_NOTE) { | 1182 | if (sechdrs[i].sh_type == SHT_NOTE) { |
1178 | nattr->attr.name = mod->sect_attrs->attrs[loaded].name; | 1183 | nattr->attr.name = mod->sect_attrs->attrs[loaded].name; |
@@ -1394,9 +1399,9 @@ static void free_module(struct module *mod) | |||
1394 | kfree(mod->args); | 1399 | kfree(mod->args); |
1395 | if (mod->percpu) | 1400 | if (mod->percpu) |
1396 | percpu_modfree(mod->percpu); | 1401 | percpu_modfree(mod->percpu); |
1397 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | 1402 | #if defined(CONFIG_MODULE_UNLOAD) |
1398 | if (mod->refptr) | 1403 | if (mod->refptr) |
1399 | percpu_modfree(mod->refptr); | 1404 | free_percpu(mod->refptr); |
1400 | #endif | 1405 | #endif |
1401 | /* Free lock-classes: */ | 1406 | /* Free lock-classes: */ |
1402 | lockdep_free_key_range(mod->module_core, mod->core_size); | 1407 | lockdep_free_key_range(mod->module_core, mod->core_size); |
@@ -1910,9 +1915,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, | |||
1910 | unsigned int i; | 1915 | unsigned int i; |
1911 | 1916 | ||
1912 | /* only scan the sections containing data */ | 1917 | /* only scan the sections containing data */ |
1913 | kmemleak_scan_area(mod->module_core, (unsigned long)mod - | 1918 | kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); |
1914 | (unsigned long)mod->module_core, | ||
1915 | sizeof(struct module), GFP_KERNEL); | ||
1916 | 1919 | ||
1917 | for (i = 1; i < hdr->e_shnum; i++) { | 1920 | for (i = 1; i < hdr->e_shnum; i++) { |
1918 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | 1921 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) |
@@ -1921,8 +1924,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, | |||
1921 | && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) | 1924 | && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) |
1922 | continue; | 1925 | continue; |
1923 | 1926 | ||
1924 | kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - | 1927 | kmemleak_scan_area((void *)sechdrs[i].sh_addr, |
1925 | (unsigned long)mod->module_core, | ||
1926 | sechdrs[i].sh_size, GFP_KERNEL); | 1928 | sechdrs[i].sh_size, GFP_KERNEL); |
1927 | } | 1929 | } |
1928 | } | 1930 | } |
@@ -2162,9 +2164,8 @@ static noinline struct module *load_module(void __user *umod, | |||
2162 | mod = (void *)sechdrs[modindex].sh_addr; | 2164 | mod = (void *)sechdrs[modindex].sh_addr; |
2163 | kmemleak_load_module(mod, hdr, sechdrs, secstrings); | 2165 | kmemleak_load_module(mod, hdr, sechdrs, secstrings); |
2164 | 2166 | ||
2165 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | 2167 | #if defined(CONFIG_MODULE_UNLOAD) |
2166 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), | 2168 | mod->refptr = alloc_percpu(struct module_ref); |
2167 | mod->name); | ||
2168 | if (!mod->refptr) { | 2169 | if (!mod->refptr) { |
2169 | err = -ENOMEM; | 2170 | err = -ENOMEM; |
2170 | goto free_init; | 2171 | goto free_init; |
@@ -2250,6 +2251,12 @@ static noinline struct module *load_module(void __user *umod, | |||
2250 | "_ftrace_events", | 2251 | "_ftrace_events", |
2251 | sizeof(*mod->trace_events), | 2252 | sizeof(*mod->trace_events), |
2252 | &mod->num_trace_events); | 2253 | &mod->num_trace_events); |
2254 | /* | ||
2255 | * This section contains pointers to allocated objects in the trace | ||
2256 | * code and not scanning it leads to false positives. | ||
2257 | */ | ||
2258 | kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * | ||
2259 | mod->num_trace_events, GFP_KERNEL); | ||
2253 | #endif | 2260 | #endif |
2254 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 2261 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
2255 | /* sechdrs[0].sh_size is always zero */ | 2262 | /* sechdrs[0].sh_size is always zero */ |
@@ -2390,8 +2397,8 @@ static noinline struct module *load_module(void __user *umod, | |||
2390 | kobject_put(&mod->mkobj.kobj); | 2397 | kobject_put(&mod->mkobj.kobj); |
2391 | free_unload: | 2398 | free_unload: |
2392 | module_unload_free(mod); | 2399 | module_unload_free(mod); |
2393 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | 2400 | #if defined(CONFIG_MODULE_UNLOAD) |
2394 | percpu_modfree(mod->refptr); | 2401 | free_percpu(mod->refptr); |
2395 | free_init: | 2402 | free_init: |
2396 | #endif | 2403 | #endif |
2397 | module_free(mod, mod->module_init); | 2404 | module_free(mod, mod->module_init); |