aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/module.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /kernel/module.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c361
1 files changed, 148 insertions, 213 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 5842a71cf052..1016b75b026a 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -370,204 +370,98 @@ EXPORT_SYMBOL_GPL(find_module);
370 370
371#ifdef CONFIG_SMP 371#ifdef CONFIG_SMP
372 372
373#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA 373static inline void __percpu *mod_percpu(struct module *mod)
374
375static void *percpu_modalloc(unsigned long size, unsigned long align,
376 const char *name)
377{ 374{
378 void *ptr; 375 return mod->percpu;
376}
379 377
378static int percpu_modalloc(struct module *mod,
379 unsigned long size, unsigned long align)
380{
380 if (align > PAGE_SIZE) { 381 if (align > PAGE_SIZE) {
381 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", 382 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
382 name, align, PAGE_SIZE); 383 mod->name, align, PAGE_SIZE);
383 align = PAGE_SIZE; 384 align = PAGE_SIZE;
384 } 385 }
385 386
386 ptr = __alloc_reserved_percpu(size, align); 387 mod->percpu = __alloc_reserved_percpu(size, align);
387 if (!ptr) 388 if (!mod->percpu) {
388 printk(KERN_WARNING 389 printk(KERN_WARNING
389 "Could not allocate %lu bytes percpu data\n", size); 390 "Could not allocate %lu bytes percpu data\n", size);
390 return ptr; 391 return -ENOMEM;
391}
392
393static void percpu_modfree(void *freeme)
394{
395 free_percpu(freeme);
396}
397
398#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
399
400/* Number of blocks used and allocated. */
401static unsigned int pcpu_num_used, pcpu_num_allocated;
402/* Size of each block. -ve means used. */
403static int *pcpu_size;
404
405static int split_block(unsigned int i, unsigned short size)
406{
407 /* Reallocation required? */
408 if (pcpu_num_used + 1 > pcpu_num_allocated) {
409 int *new;
410
411 new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2,
412 GFP_KERNEL);
413 if (!new)
414 return 0;
415
416 pcpu_num_allocated *= 2;
417 pcpu_size = new;
418 } 392 }
419 393 mod->percpu_size = size;
420 /* Insert a new subblock */ 394 return 0;
421 memmove(&pcpu_size[i+1], &pcpu_size[i],
422 sizeof(pcpu_size[0]) * (pcpu_num_used - i));
423 pcpu_num_used++;
424
425 pcpu_size[i+1] -= size;
426 pcpu_size[i] = size;
427 return 1;
428} 395}
429 396
430static inline unsigned int block_size(int val) 397static void percpu_modfree(struct module *mod)
431{ 398{
432 if (val < 0) 399 free_percpu(mod->percpu);
433 return -val;
434 return val;
435} 400}
436 401
437static void *percpu_modalloc(unsigned long size, unsigned long align, 402static unsigned int find_pcpusec(Elf_Ehdr *hdr,
438 const char *name) 403 Elf_Shdr *sechdrs,
404 const char *secstrings)
439{ 405{
440 unsigned long extra; 406 return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
441 unsigned int i;
442 void *ptr;
443 int cpu;
444
445 if (align > PAGE_SIZE) {
446 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
447 name, align, PAGE_SIZE);
448 align = PAGE_SIZE;
449 }
450
451 ptr = __per_cpu_start;
452 for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
453 /* Extra for alignment requirement. */
454 extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
455 BUG_ON(i == 0 && extra != 0);
456
457 if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
458 continue;
459
460 /* Transfer extra to previous block. */
461 if (pcpu_size[i-1] < 0)
462 pcpu_size[i-1] -= extra;
463 else
464 pcpu_size[i-1] += extra;
465 pcpu_size[i] -= extra;
466 ptr += extra;
467
468 /* Split block if warranted */
469 if (pcpu_size[i] - size > sizeof(unsigned long))
470 if (!split_block(i, size))
471 return NULL;
472
473 /* add the per-cpu scanning areas */
474 for_each_possible_cpu(cpu)
475 kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
476 GFP_KERNEL);
477
478 /* Mark allocated */
479 pcpu_size[i] = -pcpu_size[i];
480 return ptr;
481 }
482
483 printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
484 size);
485 return NULL;
486} 407}
487 408
488static void percpu_modfree(void *freeme) 409static void percpu_modcopy(struct module *mod,
410 const void *from, unsigned long size)
489{ 411{
490 unsigned int i;
491 void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
492 int cpu; 412 int cpu;
493 413
494 /* First entry is core kernel percpu data. */
495 for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
496 if (ptr == freeme) {
497 pcpu_size[i] = -pcpu_size[i];
498 goto free;
499 }
500 }
501 BUG();
502
503 free:
504 /* remove the per-cpu scanning areas */
505 for_each_possible_cpu(cpu) 414 for_each_possible_cpu(cpu)
506 kmemleak_free(freeme + per_cpu_offset(cpu)); 415 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
507
508 /* Merge with previous? */
509 if (pcpu_size[i-1] >= 0) {
510 pcpu_size[i-1] += pcpu_size[i];
511 pcpu_num_used--;
512 memmove(&pcpu_size[i], &pcpu_size[i+1],
513 (pcpu_num_used - i) * sizeof(pcpu_size[0]));
514 i--;
515 }
516 /* Merge with next? */
517 if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
518 pcpu_size[i] += pcpu_size[i+1];
519 pcpu_num_used--;
520 memmove(&pcpu_size[i+1], &pcpu_size[i+2],
521 (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
522 }
523} 416}
524 417
525static int percpu_modinit(void) 418/**
419 * is_module_percpu_address - test whether address is from module static percpu
420 * @addr: address to test
421 *
422 * Test whether @addr belongs to module static percpu area.
423 *
424 * RETURNS:
425 * %true if @addr is from module static percpu area
426 */
427bool is_module_percpu_address(unsigned long addr)
526{ 428{
527 pcpu_num_used = 2; 429 struct module *mod;
528 pcpu_num_allocated = 2; 430 unsigned int cpu;
529 pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
530 GFP_KERNEL);
531 /* Static in-kernel percpu data (used). */
532 pcpu_size[0] = -(__per_cpu_end-__per_cpu_start);
533 /* Free room. */
534 pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
535 if (pcpu_size[1] < 0) {
536 printk(KERN_ERR "No per-cpu room for modules.\n");
537 pcpu_num_used = 1;
538 }
539
540 return 0;
541}
542__initcall(percpu_modinit);
543 431
544#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ 432 preempt_disable();
545 433
546static unsigned int find_pcpusec(Elf_Ehdr *hdr, 434 list_for_each_entry_rcu(mod, &modules, list) {
547 Elf_Shdr *sechdrs, 435 if (!mod->percpu_size)
548 const char *secstrings) 436 continue;
549{ 437 for_each_possible_cpu(cpu) {
550 return find_sec(hdr, sechdrs, secstrings, ".data.percpu"); 438 void *start = per_cpu_ptr(mod->percpu, cpu);
551}
552 439
553static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size) 440 if ((void *)addr >= start &&
554{ 441 (void *)addr < start + mod->percpu_size) {
555 int cpu; 442 preempt_enable();
443 return true;
444 }
445 }
446 }
556 447
557 for_each_possible_cpu(cpu) 448 preempt_enable();
558 memcpy(pcpudest + per_cpu_offset(cpu), from, size); 449 return false;
559} 450}
560 451
561#else /* ... !CONFIG_SMP */ 452#else /* ... !CONFIG_SMP */
562 453
563static inline void *percpu_modalloc(unsigned long size, unsigned long align, 454static inline void __percpu *mod_percpu(struct module *mod)
564 const char *name)
565{ 455{
566 return NULL; 456 return NULL;
567} 457}
568static inline void percpu_modfree(void *pcpuptr) 458static inline int percpu_modalloc(struct module *mod,
459 unsigned long size, unsigned long align)
460{
461 return -ENOMEM;
462}
463static inline void percpu_modfree(struct module *mod)
569{ 464{
570 BUG();
571} 465}
572static inline unsigned int find_pcpusec(Elf_Ehdr *hdr, 466static inline unsigned int find_pcpusec(Elf_Ehdr *hdr,
573 Elf_Shdr *sechdrs, 467 Elf_Shdr *sechdrs,
@@ -575,12 +469,16 @@ static inline unsigned int find_pcpusec(Elf_Ehdr *hdr,
575{ 469{
576 return 0; 470 return 0;
577} 471}
578static inline void percpu_modcopy(void *pcpudst, const void *src, 472static inline void percpu_modcopy(struct module *mod,
579 unsigned long size) 473 const void *from, unsigned long size)
580{ 474{
581 /* pcpusec should be 0, and size of that section should be 0. */ 475 /* pcpusec should be 0, and size of that section should be 0. */
582 BUG_ON(size != 0); 476 BUG_ON(size != 0);
583} 477}
478bool is_module_percpu_address(unsigned long addr)
479{
480 return false;
481}
584 482
585#endif /* CONFIG_SMP */ 483#endif /* CONFIG_SMP */
586 484
@@ -623,10 +521,13 @@ static void module_unload_init(struct module *mod)
623 int cpu; 521 int cpu;
624 522
625 INIT_LIST_HEAD(&mod->modules_which_use_me); 523 INIT_LIST_HEAD(&mod->modules_which_use_me);
626 for_each_possible_cpu(cpu) 524 for_each_possible_cpu(cpu) {
627 local_set(__module_ref_addr(mod, cpu), 0); 525 per_cpu_ptr(mod->refptr, cpu)->incs = 0;
526 per_cpu_ptr(mod->refptr, cpu)->decs = 0;
527 }
528
628 /* Hold reference count during initialization. */ 529 /* Hold reference count during initialization. */
629 local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1); 530 __this_cpu_write(mod->refptr->incs, 1);
630 /* Backwards compatibility macros put refcount during init. */ 531 /* Backwards compatibility macros put refcount during init. */
631 mod->waiter = current; 532 mod->waiter = current;
632} 533}
@@ -765,12 +666,28 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
765 666
766unsigned int module_refcount(struct module *mod) 667unsigned int module_refcount(struct module *mod)
767{ 668{
768 unsigned int total = 0; 669 unsigned int incs = 0, decs = 0;
769 int cpu; 670 int cpu;
770 671
771 for_each_possible_cpu(cpu) 672 for_each_possible_cpu(cpu)
772 total += local_read(__module_ref_addr(mod, cpu)); 673 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
773 return total; 674 /*
675 * ensure the incs are added up after the decs.
676 * module_put ensures incs are visible before decs with smp_wmb.
677 *
678 * This 2-count scheme avoids the situation where the refcount
679 * for CPU0 is read, then CPU0 increments the module refcount,
680 * then CPU1 drops that refcount, then the refcount for CPU1 is
681 * read. We would record a decrement but not its corresponding
682 * increment so we would see a low count (disaster).
683 *
684 * Rare situation? But module_refcount can be preempted, and we
685 * might be tallying up 4096+ CPUs. So it is not impossible.
686 */
687 smp_rmb();
688 for_each_possible_cpu(cpu)
689 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
690 return incs - decs;
774} 691}
775EXPORT_SYMBOL(module_refcount); 692EXPORT_SYMBOL(module_refcount);
776 693
@@ -946,14 +863,16 @@ static struct module_attribute refcnt = {
946void module_put(struct module *module) 863void module_put(struct module *module)
947{ 864{
948 if (module) { 865 if (module) {
949 unsigned int cpu = get_cpu(); 866 preempt_disable();
950 local_dec(__module_ref_addr(module, cpu)); 867 smp_wmb(); /* see comment in module_refcount */
868 __this_cpu_inc(module->refptr->decs);
869
951 trace_module_put(module, _RET_IP_, 870 trace_module_put(module, _RET_IP_,
952 local_read(__module_ref_addr(module, cpu))); 871 __this_cpu_read(module->refptr->decs));
953 /* Maybe they're waiting for us to drop reference? */ 872 /* Maybe they're waiting for us to drop reference? */
954 if (unlikely(!module_is_live(module))) 873 if (unlikely(!module_is_live(module)))
955 wake_up_process(module->waiter); 874 wake_up_process(module->waiter);
956 put_cpu(); 875 preempt_enable();
957 } 876 }
958} 877}
959EXPORT_SYMBOL(module_put); 878EXPORT_SYMBOL(module_put);
@@ -1030,11 +949,23 @@ static int try_to_force_load(struct module *mod, const char *reason)
1030} 949}
1031 950
1032#ifdef CONFIG_MODVERSIONS 951#ifdef CONFIG_MODVERSIONS
952/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
953static unsigned long maybe_relocated(unsigned long crc,
954 const struct module *crc_owner)
955{
956#ifdef ARCH_RELOCATES_KCRCTAB
957 if (crc_owner == NULL)
958 return crc - (unsigned long)reloc_start;
959#endif
960 return crc;
961}
962
1033static int check_version(Elf_Shdr *sechdrs, 963static int check_version(Elf_Shdr *sechdrs,
1034 unsigned int versindex, 964 unsigned int versindex,
1035 const char *symname, 965 const char *symname,
1036 struct module *mod, 966 struct module *mod,
1037 const unsigned long *crc) 967 const unsigned long *crc,
968 const struct module *crc_owner)
1038{ 969{
1039 unsigned int i, num_versions; 970 unsigned int i, num_versions;
1040 struct modversion_info *versions; 971 struct modversion_info *versions;
@@ -1055,10 +986,10 @@ static int check_version(Elf_Shdr *sechdrs,
1055 if (strcmp(versions[i].name, symname) != 0) 986 if (strcmp(versions[i].name, symname) != 0)
1056 continue; 987 continue;
1057 988
1058 if (versions[i].crc == *crc) 989 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1059 return 1; 990 return 1;
1060 DEBUGP("Found checksum %lX vs module %lX\n", 991 DEBUGP("Found checksum %lX vs module %lX\n",
1061 *crc, versions[i].crc); 992 maybe_relocated(*crc, crc_owner), versions[i].crc);
1062 goto bad_version; 993 goto bad_version;
1063 } 994 }
1064 995
@@ -1081,7 +1012,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1081 if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, 1012 if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
1082 &crc, true, false)) 1013 &crc, true, false))
1083 BUG(); 1014 BUG();
1084 return check_version(sechdrs, versindex, "module_layout", mod, crc); 1015 return check_version(sechdrs, versindex, "module_layout", mod, crc,
1016 NULL);
1085} 1017}
1086 1018
1087/* First part is kernel version, which we ignore if module has crcs. */ 1019/* First part is kernel version, which we ignore if module has crcs. */
@@ -1099,7 +1031,8 @@ static inline int check_version(Elf_Shdr *sechdrs,
1099 unsigned int versindex, 1031 unsigned int versindex,
1100 const char *symname, 1032 const char *symname,
1101 struct module *mod, 1033 struct module *mod,
1102 const unsigned long *crc) 1034 const unsigned long *crc,
1035 const struct module *crc_owner)
1103{ 1036{
1104 return 1; 1037 return 1;
1105} 1038}
@@ -1134,8 +1067,8 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
1134 /* use_module can fail due to OOM, 1067 /* use_module can fail due to OOM,
1135 or module initialization or unloading */ 1068 or module initialization or unloading */
1136 if (sym) { 1069 if (sym) {
1137 if (!check_version(sechdrs, versindex, name, mod, crc) || 1070 if (!check_version(sechdrs, versindex, name, mod, crc, owner)
1138 !use_module(mod, owner)) 1071 || !use_module(mod, owner))
1139 sym = NULL; 1072 sym = NULL;
1140 } 1073 }
1141 return sym; 1074 return sym;
@@ -1146,6 +1079,12 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
1146 * J. Corbet <corbet@lwn.net> 1079 * J. Corbet <corbet@lwn.net>
1147 */ 1080 */
1148#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) 1081#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
1082
1083static inline bool sect_empty(const Elf_Shdr *sect)
1084{
1085 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1086}
1087
1149struct module_sect_attr 1088struct module_sect_attr
1150{ 1089{
1151 struct module_attribute mattr; 1090 struct module_attribute mattr;
@@ -1187,8 +1126,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1187 1126
1188 /* Count loaded sections and allocate structures */ 1127 /* Count loaded sections and allocate structures */
1189 for (i = 0; i < nsect; i++) 1128 for (i = 0; i < nsect; i++)
1190 if (sechdrs[i].sh_flags & SHF_ALLOC 1129 if (!sect_empty(&sechdrs[i]))
1191 && sechdrs[i].sh_size)
1192 nloaded++; 1130 nloaded++;
1193 size[0] = ALIGN(sizeof(*sect_attrs) 1131 size[0] = ALIGN(sizeof(*sect_attrs)
1194 + nloaded * sizeof(sect_attrs->attrs[0]), 1132 + nloaded * sizeof(sect_attrs->attrs[0]),
@@ -1206,9 +1144,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1206 sattr = &sect_attrs->attrs[0]; 1144 sattr = &sect_attrs->attrs[0];
1207 gattr = &sect_attrs->grp.attrs[0]; 1145 gattr = &sect_attrs->grp.attrs[0];
1208 for (i = 0; i < nsect; i++) { 1146 for (i = 0; i < nsect; i++) {
1209 if (! (sechdrs[i].sh_flags & SHF_ALLOC)) 1147 if (sect_empty(&sechdrs[i]))
1210 continue;
1211 if (!sechdrs[i].sh_size)
1212 continue; 1148 continue;
1213 sattr->address = sechdrs[i].sh_addr; 1149 sattr->address = sechdrs[i].sh_addr;
1214 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, 1150 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
@@ -1216,6 +1152,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1216 if (sattr->name == NULL) 1152 if (sattr->name == NULL)
1217 goto out; 1153 goto out;
1218 sect_attrs->nsections++; 1154 sect_attrs->nsections++;
1155 sysfs_attr_init(&sattr->mattr.attr);
1219 sattr->mattr.show = module_sect_show; 1156 sattr->mattr.show = module_sect_show;
1220 sattr->mattr.store = NULL; 1157 sattr->mattr.store = NULL;
1221 sattr->mattr.attr.name = sattr->name; 1158 sattr->mattr.attr.name = sattr->name;
@@ -1292,7 +1229,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
1292 /* Count notes sections and allocate structures. */ 1229 /* Count notes sections and allocate structures. */
1293 notes = 0; 1230 notes = 0;
1294 for (i = 0; i < nsect; i++) 1231 for (i = 0; i < nsect; i++)
1295 if ((sechdrs[i].sh_flags & SHF_ALLOC) && 1232 if (!sect_empty(&sechdrs[i]) &&
1296 (sechdrs[i].sh_type == SHT_NOTE)) 1233 (sechdrs[i].sh_type == SHT_NOTE))
1297 ++notes; 1234 ++notes;
1298 1235
@@ -1308,9 +1245,10 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
1308 notes_attrs->notes = notes; 1245 notes_attrs->notes = notes;
1309 nattr = &notes_attrs->attrs[0]; 1246 nattr = &notes_attrs->attrs[0];
1310 for (loaded = i = 0; i < nsect; ++i) { 1247 for (loaded = i = 0; i < nsect; ++i) {
1311 if (!(sechdrs[i].sh_flags & SHF_ALLOC)) 1248 if (sect_empty(&sechdrs[i]))
1312 continue; 1249 continue;
1313 if (sechdrs[i].sh_type == SHT_NOTE) { 1250 if (sechdrs[i].sh_type == SHT_NOTE) {
1251 sysfs_bin_attr_init(nattr);
1314 nattr->attr.name = mod->sect_attrs->attrs[loaded].name; 1252 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1315 nattr->attr.mode = S_IRUGO; 1253 nattr->attr.mode = S_IRUGO;
1316 nattr->size = sechdrs[i].sh_size; 1254 nattr->size = sechdrs[i].sh_size;
@@ -1383,6 +1321,7 @@ int module_add_modinfo_attrs(struct module *mod)
1383 if (!attr->test || 1321 if (!attr->test ||
1384 (attr->test && attr->test(mod))) { 1322 (attr->test && attr->test(mod))) {
1385 memcpy(temp_attr, attr, sizeof(*temp_attr)); 1323 memcpy(temp_attr, attr, sizeof(*temp_attr));
1324 sysfs_attr_init(&temp_attr->attr);
1386 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr); 1325 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
1387 ++temp_attr; 1326 ++temp_attr;
1388 } 1327 }
@@ -1528,11 +1467,10 @@ static void free_module(struct module *mod)
1528 /* This may be NULL, but that's OK */ 1467 /* This may be NULL, but that's OK */
1529 module_free(mod, mod->module_init); 1468 module_free(mod, mod->module_init);
1530 kfree(mod->args); 1469 kfree(mod->args);
1531 if (mod->percpu) 1470 percpu_modfree(mod);
1532 percpu_modfree(mod->percpu); 1471#if defined(CONFIG_MODULE_UNLOAD)
1533#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
1534 if (mod->refptr) 1472 if (mod->refptr)
1535 percpu_modfree(mod->refptr); 1473 free_percpu(mod->refptr);
1536#endif 1474#endif
1537 /* Free lock-classes: */ 1475 /* Free lock-classes: */
1538 lockdep_free_key_range(mod->module_core, mod->core_size); 1476 lockdep_free_key_range(mod->module_core, mod->core_size);
@@ -1648,7 +1586,7 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
1648 default: 1586 default:
1649 /* Divert to percpu allocation if a percpu var. */ 1587 /* Divert to percpu allocation if a percpu var. */
1650 if (sym[i].st_shndx == pcpuindex) 1588 if (sym[i].st_shndx == pcpuindex)
1651 secbase = (unsigned long)mod->percpu; 1589 secbase = (unsigned long)mod_percpu(mod);
1652 else 1590 else
1653 secbase = sechdrs[sym[i].st_shndx].sh_addr; 1591 secbase = sechdrs[sym[i].st_shndx].sh_addr;
1654 sym[i].st_value += secbase; 1592 sym[i].st_value += secbase;
@@ -2046,9 +1984,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
2046 unsigned int i; 1984 unsigned int i;
2047 1985
2048 /* only scan the sections containing data */ 1986 /* only scan the sections containing data */
2049 kmemleak_scan_area(mod->module_core, (unsigned long)mod - 1987 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2050 (unsigned long)mod->module_core,
2051 sizeof(struct module), GFP_KERNEL);
2052 1988
2053 for (i = 1; i < hdr->e_shnum; i++) { 1989 for (i = 1; i < hdr->e_shnum; i++) {
2054 if (!(sechdrs[i].sh_flags & SHF_ALLOC)) 1990 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
@@ -2057,8 +1993,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
2057 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) 1993 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
2058 continue; 1994 continue;
2059 1995
2060 kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - 1996 kmemleak_scan_area((void *)sechdrs[i].sh_addr,
2061 (unsigned long)mod->module_core,
2062 sechdrs[i].sh_size, GFP_KERNEL); 1997 sechdrs[i].sh_size, GFP_KERNEL);
2063 } 1998 }
2064} 1999}
@@ -2085,7 +2020,7 @@ static noinline struct module *load_module(void __user *umod,
2085 unsigned int modindex, versindex, infoindex, pcpuindex; 2020 unsigned int modindex, versindex, infoindex, pcpuindex;
2086 struct module *mod; 2021 struct module *mod;
2087 long err = 0; 2022 long err = 0;
2088 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 2023 void *ptr = NULL; /* Stops spurious gcc warning */
2089 unsigned long symoffs, stroffs, *strmap; 2024 unsigned long symoffs, stroffs, *strmap;
2090 2025
2091 mm_segment_t old_fs; 2026 mm_segment_t old_fs;
@@ -2225,15 +2160,11 @@ static noinline struct module *load_module(void __user *umod,
2225 2160
2226 if (pcpuindex) { 2161 if (pcpuindex) {
2227 /* We have a special allocation for this section. */ 2162 /* We have a special allocation for this section. */
2228 percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, 2163 err = percpu_modalloc(mod, sechdrs[pcpuindex].sh_size,
2229 sechdrs[pcpuindex].sh_addralign, 2164 sechdrs[pcpuindex].sh_addralign);
2230 mod->name); 2165 if (err)
2231 if (!percpu) {
2232 err = -ENOMEM;
2233 goto free_mod; 2166 goto free_mod;
2234 }
2235 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; 2167 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
2236 mod->percpu = percpu;
2237 } 2168 }
2238 2169
2239 /* Determine total sizes, and put offsets in sh_entsize. For now 2170 /* Determine total sizes, and put offsets in sh_entsize. For now
@@ -2298,9 +2229,8 @@ static noinline struct module *load_module(void __user *umod,
2298 mod = (void *)sechdrs[modindex].sh_addr; 2229 mod = (void *)sechdrs[modindex].sh_addr;
2299 kmemleak_load_module(mod, hdr, sechdrs, secstrings); 2230 kmemleak_load_module(mod, hdr, sechdrs, secstrings);
2300 2231
2301#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2232#if defined(CONFIG_MODULE_UNLOAD)
2302 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), 2233 mod->refptr = alloc_percpu(struct module_ref);
2303 mod->name);
2304 if (!mod->refptr) { 2234 if (!mod->refptr) {
2305 err = -ENOMEM; 2235 err = -ENOMEM;
2306 goto free_init; 2236 goto free_init;
@@ -2386,6 +2316,12 @@ static noinline struct module *load_module(void __user *umod,
2386 "_ftrace_events", 2316 "_ftrace_events",
2387 sizeof(*mod->trace_events), 2317 sizeof(*mod->trace_events),
2388 &mod->num_trace_events); 2318 &mod->num_trace_events);
2319 /*
2320 * This section contains pointers to allocated objects in the trace
2321 * code and not scanning it leads to false positives.
2322 */
2323 kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
2324 mod->num_trace_events, GFP_KERNEL);
2389#endif 2325#endif
2390#ifdef CONFIG_FTRACE_MCOUNT_RECORD 2326#ifdef CONFIG_FTRACE_MCOUNT_RECORD
2391 /* sechdrs[0].sh_size is always zero */ 2327 /* sechdrs[0].sh_size is always zero */
@@ -2443,7 +2379,7 @@ static noinline struct module *load_module(void __user *umod,
2443 sort_extable(mod->extable, mod->extable + mod->num_exentries); 2379 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2444 2380
2445 /* Finally, copy percpu area over. */ 2381 /* Finally, copy percpu area over. */
2446 percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr, 2382 percpu_modcopy(mod, (void *)sechdrs[pcpuindex].sh_addr,
2447 sechdrs[pcpuindex].sh_size); 2383 sechdrs[pcpuindex].sh_size);
2448 2384
2449 add_kallsyms(mod, sechdrs, hdr->e_shnum, symindex, strindex, 2385 add_kallsyms(mod, sechdrs, hdr->e_shnum, symindex, strindex,
@@ -2526,8 +2462,8 @@ static noinline struct module *load_module(void __user *umod,
2526 kobject_put(&mod->mkobj.kobj); 2462 kobject_put(&mod->mkobj.kobj);
2527 free_unload: 2463 free_unload:
2528 module_unload_free(mod); 2464 module_unload_free(mod);
2529#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2465#if defined(CONFIG_MODULE_UNLOAD)
2530 percpu_modfree(mod->refptr); 2466 free_percpu(mod->refptr);
2531 free_init: 2467 free_init:
2532#endif 2468#endif
2533 module_free(mod, mod->module_init); 2469 module_free(mod, mod->module_init);
@@ -2535,8 +2471,7 @@ static noinline struct module *load_module(void __user *umod,
2535 module_free(mod, mod->module_core); 2471 module_free(mod, mod->module_core);
2536 /* mod will be freed with core. Don't access it beyond this line! */ 2472 /* mod will be freed with core. Don't access it beyond this line! */
2537 free_percpu: 2473 free_percpu:
2538 if (percpu) 2474 percpu_modfree(mod);
2539 percpu_modfree(percpu);
2540 free_mod: 2475 free_mod:
2541 kfree(args); 2476 kfree(args);
2542 kfree(strmap); 2477 kfree(strmap);