aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/module.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c909
1 files changed, 542 insertions, 367 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 2d537186191f..6c562828c85c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -47,6 +47,7 @@
47#include <linux/rculist.h> 47#include <linux/rculist.h>
48#include <asm/uaccess.h> 48#include <asm/uaccess.h>
49#include <asm/cacheflush.h> 49#include <asm/cacheflush.h>
50#include <asm/mmu_context.h>
50#include <linux/license.h> 51#include <linux/license.h>
51#include <asm/sections.h> 52#include <asm/sections.h>
52#include <linux/tracepoint.h> 53#include <linux/tracepoint.h>
@@ -55,6 +56,9 @@
55#include <linux/percpu.h> 56#include <linux/percpu.h>
56#include <linux/kmemleak.h> 57#include <linux/kmemleak.h>
57 58
59#define CREATE_TRACE_POINTS
60#include <trace/events/module.h>
61
58#if 0 62#if 0
59#define DEBUGP printk 63#define DEBUGP printk
60#else 64#else
@@ -68,11 +72,19 @@
68/* If this is set, the section belongs in the init part of the module */ 72/* If this is set, the section belongs in the init part of the module */
69#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) 73#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
70 74
71/* List of modules, protected by module_mutex or preempt_disable 75/*
76 * Mutex protects:
77 * 1) List of modules (also safely readable with preempt_disable),
78 * 2) module_use links,
79 * 3) module_addr_min/module_addr_max.
72 * (delete uses stop_machine/add uses RCU list operations). */ 80 * (delete uses stop_machine/add uses RCU list operations). */
73DEFINE_MUTEX(module_mutex); 81DEFINE_MUTEX(module_mutex);
74EXPORT_SYMBOL_GPL(module_mutex); 82EXPORT_SYMBOL_GPL(module_mutex);
75static LIST_HEAD(modules); 83static LIST_HEAD(modules);
84#ifdef CONFIG_KGDB_KDB
85struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
86#endif /* CONFIG_KGDB_KDB */
87
76 88
77/* Block module loading/unloading? */ 89/* Block module loading/unloading? */
78int modules_disabled = 0; 90int modules_disabled = 0;
@@ -82,7 +94,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
82 94
83static BLOCKING_NOTIFIER_HEAD(module_notify_list); 95static BLOCKING_NOTIFIER_HEAD(module_notify_list);
84 96
85/* Bounds of module allocation, for speeding __module_address */ 97/* Bounds of module allocation, for speeding __module_address.
98 * Protected by module_mutex. */
86static unsigned long module_addr_min = -1UL, module_addr_max = 0; 99static unsigned long module_addr_min = -1UL, module_addr_max = 0;
87 100
88int register_module_notifier(struct notifier_block * nb) 101int register_module_notifier(struct notifier_block * nb)
@@ -172,8 +185,6 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
172extern const struct kernel_symbol __stop___ksymtab_gpl[]; 185extern const struct kernel_symbol __stop___ksymtab_gpl[];
173extern const struct kernel_symbol __start___ksymtab_gpl_future[]; 186extern const struct kernel_symbol __start___ksymtab_gpl_future[];
174extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; 187extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
175extern const struct kernel_symbol __start___ksymtab_gpl_future[];
176extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
177extern const unsigned long __start___kcrctab[]; 188extern const unsigned long __start___kcrctab[];
178extern const unsigned long __start___kcrctab_gpl[]; 189extern const unsigned long __start___kcrctab_gpl[];
179extern const unsigned long __start___kcrctab_gpl_future[]; 190extern const unsigned long __start___kcrctab_gpl_future[];
@@ -323,7 +334,7 @@ static bool find_symbol_in_section(const struct symsearch *syms,
323} 334}
324 335
325/* Find a symbol and return it, along with, (optional) crc and 336/* Find a symbol and return it, along with, (optional) crc and
326 * (optional) module which owns it */ 337 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
327const struct kernel_symbol *find_symbol(const char *name, 338const struct kernel_symbol *find_symbol(const char *name,
328 struct module **owner, 339 struct module **owner,
329 const unsigned long **crc, 340 const unsigned long **crc,
@@ -364,204 +375,98 @@ EXPORT_SYMBOL_GPL(find_module);
364 375
365#ifdef CONFIG_SMP 376#ifdef CONFIG_SMP
366 377
367#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 378static inline void __percpu *mod_percpu(struct module *mod)
368
369static void *percpu_modalloc(unsigned long size, unsigned long align,
370 const char *name)
371{ 379{
372 void *ptr; 380 return mod->percpu;
381}
373 382
383static int percpu_modalloc(struct module *mod,
384 unsigned long size, unsigned long align)
385{
374 if (align > PAGE_SIZE) { 386 if (align > PAGE_SIZE) {
375 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", 387 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
376 name, align, PAGE_SIZE); 388 mod->name, align, PAGE_SIZE);
377 align = PAGE_SIZE; 389 align = PAGE_SIZE;
378 } 390 }
379 391
380 ptr = __alloc_reserved_percpu(size, align); 392 mod->percpu = __alloc_reserved_percpu(size, align);
381 if (!ptr) 393 if (!mod->percpu) {
382 printk(KERN_WARNING 394 printk(KERN_WARNING
383 "Could not allocate %lu bytes percpu data\n", size); 395 "Could not allocate %lu bytes percpu data\n", size);
384 return ptr; 396 return -ENOMEM;
385}
386
387static void percpu_modfree(void *freeme)
388{
389 free_percpu(freeme);
390}
391
392#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
393
394/* Number of blocks used and allocated. */
395static unsigned int pcpu_num_used, pcpu_num_allocated;
396/* Size of each block. -ve means used. */
397static int *pcpu_size;
398
399static int split_block(unsigned int i, unsigned short size)
400{
401 /* Reallocation required? */
402 if (pcpu_num_used + 1 > pcpu_num_allocated) {
403 int *new;
404
405 new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2,
406 GFP_KERNEL);
407 if (!new)
408 return 0;
409
410 pcpu_num_allocated *= 2;
411 pcpu_size = new;
412 } 397 }
413 398 mod->percpu_size = size;
414 /* Insert a new subblock */ 399 return 0;
415 memmove(&pcpu_size[i+1], &pcpu_size[i],
416 sizeof(pcpu_size[0]) * (pcpu_num_used - i));
417 pcpu_num_used++;
418
419 pcpu_size[i+1] -= size;
420 pcpu_size[i] = size;
421 return 1;
422} 400}
423 401
424static inline unsigned int block_size(int val) 402static void percpu_modfree(struct module *mod)
425{ 403{
426 if (val < 0) 404 free_percpu(mod->percpu);
427 return -val;
428 return val;
429} 405}
430 406
431static void *percpu_modalloc(unsigned long size, unsigned long align, 407static unsigned int find_pcpusec(Elf_Ehdr *hdr,
432 const char *name) 408 Elf_Shdr *sechdrs,
409 const char *secstrings)
433{ 410{
434 unsigned long extra; 411 return find_sec(hdr, sechdrs, secstrings, ".data..percpu");
435 unsigned int i;
436 void *ptr;
437 int cpu;
438
439 if (align > PAGE_SIZE) {
440 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
441 name, align, PAGE_SIZE);
442 align = PAGE_SIZE;
443 }
444
445 ptr = __per_cpu_start;
446 for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
447 /* Extra for alignment requirement. */
448 extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
449 BUG_ON(i == 0 && extra != 0);
450
451 if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
452 continue;
453
454 /* Transfer extra to previous block. */
455 if (pcpu_size[i-1] < 0)
456 pcpu_size[i-1] -= extra;
457 else
458 pcpu_size[i-1] += extra;
459 pcpu_size[i] -= extra;
460 ptr += extra;
461
462 /* Split block if warranted */
463 if (pcpu_size[i] - size > sizeof(unsigned long))
464 if (!split_block(i, size))
465 return NULL;
466
467 /* add the per-cpu scanning areas */
468 for_each_possible_cpu(cpu)
469 kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
470 GFP_KERNEL);
471
472 /* Mark allocated */
473 pcpu_size[i] = -pcpu_size[i];
474 return ptr;
475 }
476
477 printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
478 size);
479 return NULL;
480} 412}
481 413
482static void percpu_modfree(void *freeme) 414static void percpu_modcopy(struct module *mod,
415 const void *from, unsigned long size)
483{ 416{
484 unsigned int i;
485 void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
486 int cpu; 417 int cpu;
487 418
488 /* First entry is core kernel percpu data. */
489 for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
490 if (ptr == freeme) {
491 pcpu_size[i] = -pcpu_size[i];
492 goto free;
493 }
494 }
495 BUG();
496
497 free:
498 /* remove the per-cpu scanning areas */
499 for_each_possible_cpu(cpu) 419 for_each_possible_cpu(cpu)
500 kmemleak_free(freeme + per_cpu_offset(cpu)); 420 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
501
502 /* Merge with previous? */
503 if (pcpu_size[i-1] >= 0) {
504 pcpu_size[i-1] += pcpu_size[i];
505 pcpu_num_used--;
506 memmove(&pcpu_size[i], &pcpu_size[i+1],
507 (pcpu_num_used - i) * sizeof(pcpu_size[0]));
508 i--;
509 }
510 /* Merge with next? */
511 if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
512 pcpu_size[i] += pcpu_size[i+1];
513 pcpu_num_used--;
514 memmove(&pcpu_size[i+1], &pcpu_size[i+2],
515 (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
516 }
517} 421}
518 422
519static int percpu_modinit(void) 423/**
424 * is_module_percpu_address - test whether address is from module static percpu
425 * @addr: address to test
426 *
427 * Test whether @addr belongs to module static percpu area.
428 *
429 * RETURNS:
430 * %true if @addr is from module static percpu area
431 */
432bool is_module_percpu_address(unsigned long addr)
520{ 433{
521 pcpu_num_used = 2; 434 struct module *mod;
522 pcpu_num_allocated = 2; 435 unsigned int cpu;
523 pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
524 GFP_KERNEL);
525 /* Static in-kernel percpu data (used). */
526 pcpu_size[0] = -(__per_cpu_end-__per_cpu_start);
527 /* Free room. */
528 pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
529 if (pcpu_size[1] < 0) {
530 printk(KERN_ERR "No per-cpu room for modules.\n");
531 pcpu_num_used = 1;
532 }
533
534 return 0;
535}
536__initcall(percpu_modinit);
537 436
538#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 437 preempt_disable();
539 438
540static unsigned int find_pcpusec(Elf_Ehdr *hdr, 439 list_for_each_entry_rcu(mod, &modules, list) {
541 Elf_Shdr *sechdrs, 440 if (!mod->percpu_size)
542 const char *secstrings) 441 continue;
543{ 442 for_each_possible_cpu(cpu) {
544 return find_sec(hdr, sechdrs, secstrings, ".data.percpu"); 443 void *start = per_cpu_ptr(mod->percpu, cpu);
545}
546 444
547static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size) 445 if ((void *)addr >= start &&
548{ 446 (void *)addr < start + mod->percpu_size) {
549 int cpu; 447 preempt_enable();
448 return true;
449 }
450 }
451 }
550 452
551 for_each_possible_cpu(cpu) 453 preempt_enable();
552 memcpy(pcpudest + per_cpu_offset(cpu), from, size); 454 return false;
553} 455}
554 456
555#else /* ... !CONFIG_SMP */ 457#else /* ... !CONFIG_SMP */
556 458
557static inline void *percpu_modalloc(unsigned long size, unsigned long align, 459static inline void __percpu *mod_percpu(struct module *mod)
558 const char *name)
559{ 460{
560 return NULL; 461 return NULL;
561} 462}
562static inline void percpu_modfree(void *pcpuptr) 463static inline int percpu_modalloc(struct module *mod,
464 unsigned long size, unsigned long align)
465{
466 return -ENOMEM;
467}
468static inline void percpu_modfree(struct module *mod)
563{ 469{
564 BUG();
565} 470}
566static inline unsigned int find_pcpusec(Elf_Ehdr *hdr, 471static inline unsigned int find_pcpusec(Elf_Ehdr *hdr,
567 Elf_Shdr *sechdrs, 472 Elf_Shdr *sechdrs,
@@ -569,12 +474,16 @@ static inline unsigned int find_pcpusec(Elf_Ehdr *hdr,
569{ 474{
570 return 0; 475 return 0;
571} 476}
572static inline void percpu_modcopy(void *pcpudst, const void *src, 477static inline void percpu_modcopy(struct module *mod,
573 unsigned long size) 478 const void *from, unsigned long size)
574{ 479{
575 /* pcpusec should be 0, and size of that section should be 0. */ 480 /* pcpusec should be 0, and size of that section should be 0. */
576 BUG_ON(size != 0); 481 BUG_ON(size != 0);
577} 482}
483bool is_module_percpu_address(unsigned long addr)
484{
485 return false;
486}
578 487
579#endif /* CONFIG_SMP */ 488#endif /* CONFIG_SMP */
580 489
@@ -611,34 +520,34 @@ MODINFO_ATTR(srcversion);
611static char last_unloaded_module[MODULE_NAME_LEN+1]; 520static char last_unloaded_module[MODULE_NAME_LEN+1];
612 521
613#ifdef CONFIG_MODULE_UNLOAD 522#ifdef CONFIG_MODULE_UNLOAD
523
524EXPORT_TRACEPOINT_SYMBOL(module_get);
525
614/* Init the unload section of the module. */ 526/* Init the unload section of the module. */
615static void module_unload_init(struct module *mod) 527static void module_unload_init(struct module *mod)
616{ 528{
617 int cpu; 529 int cpu;
618 530
619 INIT_LIST_HEAD(&mod->modules_which_use_me); 531 INIT_LIST_HEAD(&mod->source_list);
620 for_each_possible_cpu(cpu) 532 INIT_LIST_HEAD(&mod->target_list);
621 local_set(__module_ref_addr(mod, cpu), 0); 533 for_each_possible_cpu(cpu) {
534 per_cpu_ptr(mod->refptr, cpu)->incs = 0;
535 per_cpu_ptr(mod->refptr, cpu)->decs = 0;
536 }
537
622 /* Hold reference count during initialization. */ 538 /* Hold reference count during initialization. */
623 local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1); 539 __this_cpu_write(mod->refptr->incs, 1);
624 /* Backwards compatibility macros put refcount during init. */ 540 /* Backwards compatibility macros put refcount during init. */
625 mod->waiter = current; 541 mod->waiter = current;
626} 542}
627 543
628/* modules using other modules */
629struct module_use
630{
631 struct list_head list;
632 struct module *module_which_uses;
633};
634
635/* Does a already use b? */ 544/* Does a already use b? */
636static int already_uses(struct module *a, struct module *b) 545static int already_uses(struct module *a, struct module *b)
637{ 546{
638 struct module_use *use; 547 struct module_use *use;
639 548
640 list_for_each_entry(use, &b->modules_which_use_me, list) { 549 list_for_each_entry(use, &b->source_list, source_list) {
641 if (use->module_which_uses == a) { 550 if (use->source == a) {
642 DEBUGP("%s uses %s!\n", a->name, b->name); 551 DEBUGP("%s uses %s!\n", a->name, b->name);
643 return 1; 552 return 1;
644 } 553 }
@@ -647,62 +556,68 @@ static int already_uses(struct module *a, struct module *b)
647 return 0; 556 return 0;
648} 557}
649 558
650/* Module a uses b */ 559/*
651int use_module(struct module *a, struct module *b) 560 * Module a uses b
561 * - we add 'a' as a "source", 'b' as a "target" of module use
562 * - the module_use is added to the list of 'b' sources (so
563 * 'b' can walk the list to see who sourced them), and of 'a'
564 * targets (so 'a' can see what modules it targets).
565 */
566static int add_module_usage(struct module *a, struct module *b)
652{ 567{
653 struct module_use *use; 568 struct module_use *use;
654 int no_warn, err;
655 569
656 if (b == NULL || already_uses(a, b)) return 1; 570 DEBUGP("Allocating new usage for %s.\n", a->name);
571 use = kmalloc(sizeof(*use), GFP_ATOMIC);
572 if (!use) {
573 printk(KERN_WARNING "%s: out of memory loading\n", a->name);
574 return -ENOMEM;
575 }
576
577 use->source = a;
578 use->target = b;
579 list_add(&use->source_list, &b->source_list);
580 list_add(&use->target_list, &a->target_list);
581 return 0;
582}
583
584/* Module a uses b: caller needs module_mutex() */
585int ref_module(struct module *a, struct module *b)
586{
587 int err;
657 588
658 /* If we're interrupted or time out, we fail. */ 589 if (b == NULL || already_uses(a, b))
659 if (wait_event_interruptible_timeout(
660 module_wq, (err = strong_try_module_get(b)) != -EBUSY,
661 30 * HZ) <= 0) {
662 printk("%s: gave up waiting for init of module %s.\n",
663 a->name, b->name);
664 return 0; 590 return 0;
665 }
666 591
667 /* If strong_try_module_get() returned a different error, we fail. */ 592 /* If module isn't available, we fail. */
593 err = strong_try_module_get(b);
668 if (err) 594 if (err)
669 return 0; 595 return err;
670 596
671 DEBUGP("Allocating new usage for %s.\n", a->name); 597 err = add_module_usage(a, b);
672 use = kmalloc(sizeof(*use), GFP_ATOMIC); 598 if (err) {
673 if (!use) {
674 printk("%s: out of memory loading\n", a->name);
675 module_put(b); 599 module_put(b);
676 return 0; 600 return err;
677 } 601 }
678 602 return 0;
679 use->module_which_uses = a;
680 list_add(&use->list, &b->modules_which_use_me);
681 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
682 return 1;
683} 603}
684EXPORT_SYMBOL_GPL(use_module); 604EXPORT_SYMBOL_GPL(ref_module);
685 605
686/* Clear the unload stuff of the module. */ 606/* Clear the unload stuff of the module. */
687static void module_unload_free(struct module *mod) 607static void module_unload_free(struct module *mod)
688{ 608{
689 struct module *i; 609 struct module_use *use, *tmp;
690
691 list_for_each_entry(i, &modules, list) {
692 struct module_use *use;
693 610
694 list_for_each_entry(use, &i->modules_which_use_me, list) { 611 mutex_lock(&module_mutex);
695 if (use->module_which_uses == mod) { 612 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
696 DEBUGP("%s unusing %s\n", mod->name, i->name); 613 struct module *i = use->target;
697 module_put(i); 614 DEBUGP("%s unusing %s\n", mod->name, i->name);
698 list_del(&use->list); 615 module_put(i);
699 kfree(use); 616 list_del(&use->source_list);
700 sysfs_remove_link(i->holders_dir, mod->name); 617 list_del(&use->target_list);
701 /* There can be at most one match. */ 618 kfree(use);
702 break;
703 }
704 }
705 } 619 }
620 mutex_unlock(&module_mutex);
706} 621}
707 622
708#ifdef CONFIG_MODULE_FORCE_UNLOAD 623#ifdef CONFIG_MODULE_FORCE_UNLOAD
@@ -759,12 +674,28 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
759 674
760unsigned int module_refcount(struct module *mod) 675unsigned int module_refcount(struct module *mod)
761{ 676{
762 unsigned int total = 0; 677 unsigned int incs = 0, decs = 0;
763 int cpu; 678 int cpu;
764 679
765 for_each_possible_cpu(cpu) 680 for_each_possible_cpu(cpu)
766 total += local_read(__module_ref_addr(mod, cpu)); 681 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
767 return total; 682 /*
683 * ensure the incs are added up after the decs.
684 * module_put ensures incs are visible before decs with smp_wmb.
685 *
686 * This 2-count scheme avoids the situation where the refcount
687 * for CPU0 is read, then CPU0 increments the module refcount,
688 * then CPU1 drops that refcount, then the refcount for CPU1 is
689 * read. We would record a decrement but not its corresponding
690 * increment so we would see a low count (disaster).
691 *
692 * Rare situation? But module_refcount can be preempted, and we
693 * might be tallying up 4096+ CPUs. So it is not impossible.
694 */
695 smp_rmb();
696 for_each_possible_cpu(cpu)
697 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
698 return incs - decs;
768} 699}
769EXPORT_SYMBOL(module_refcount); 700EXPORT_SYMBOL(module_refcount);
770 701
@@ -800,16 +731,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
800 return -EFAULT; 731 return -EFAULT;
801 name[MODULE_NAME_LEN-1] = '\0'; 732 name[MODULE_NAME_LEN-1] = '\0';
802 733
803 /* Create stop_machine threads since free_module relies on 734 if (mutex_lock_interruptible(&module_mutex) != 0)
804 * a non-failing stop_machine call. */ 735 return -EINTR;
805 ret = stop_machine_create();
806 if (ret)
807 return ret;
808
809 if (mutex_lock_interruptible(&module_mutex) != 0) {
810 ret = -EINTR;
811 goto out_stop;
812 }
813 736
814 mod = find_module(name); 737 mod = find_module(name);
815 if (!mod) { 738 if (!mod) {
@@ -817,7 +740,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
817 goto out; 740 goto out;
818 } 741 }
819 742
820 if (!list_empty(&mod->modules_which_use_me)) { 743 if (!list_empty(&mod->source_list)) {
821 /* Other modules depend on us: get rid of them first. */ 744 /* Other modules depend on us: get rid of them first. */
822 ret = -EWOULDBLOCK; 745 ret = -EWOULDBLOCK;
823 goto out; 746 goto out;
@@ -861,16 +784,14 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
861 blocking_notifier_call_chain(&module_notify_list, 784 blocking_notifier_call_chain(&module_notify_list,
862 MODULE_STATE_GOING, mod); 785 MODULE_STATE_GOING, mod);
863 async_synchronize_full(); 786 async_synchronize_full();
864 mutex_lock(&module_mutex); 787
865 /* Store the name of the last unloaded module for diagnostic purposes */ 788 /* Store the name of the last unloaded module for diagnostic purposes */
866 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); 789 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
867 ddebug_remove_module(mod->name);
868 free_module(mod);
869 790
870 out: 791 free_module(mod);
792 return 0;
793out:
871 mutex_unlock(&module_mutex); 794 mutex_unlock(&module_mutex);
872out_stop:
873 stop_machine_destroy();
874 return ret; 795 return ret;
875} 796}
876 797
@@ -883,9 +804,9 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
883 804
884 /* Always include a trailing , so userspace can differentiate 805 /* Always include a trailing , so userspace can differentiate
885 between this and the old multi-field proc format. */ 806 between this and the old multi-field proc format. */
886 list_for_each_entry(use, &mod->modules_which_use_me, list) { 807 list_for_each_entry(use, &mod->source_list, source_list) {
887 printed_something = 1; 808 printed_something = 1;
888 seq_printf(m, "%s,", use->module_which_uses->name); 809 seq_printf(m, "%s,", use->source->name);
889 } 810 }
890 811
891 if (mod->init != NULL && mod->exit == NULL) { 812 if (mod->init != NULL && mod->exit == NULL) {
@@ -940,12 +861,15 @@ static struct module_attribute refcnt = {
940void module_put(struct module *module) 861void module_put(struct module *module)
941{ 862{
942 if (module) { 863 if (module) {
943 unsigned int cpu = get_cpu(); 864 preempt_disable();
944 local_dec(__module_ref_addr(module, cpu)); 865 smp_wmb(); /* see comment in module_refcount */
866 __this_cpu_inc(module->refptr->decs);
867
868 trace_module_put(module, _RET_IP_);
945 /* Maybe they're waiting for us to drop reference? */ 869 /* Maybe they're waiting for us to drop reference? */
946 if (unlikely(!module_is_live(module))) 870 if (unlikely(!module_is_live(module)))
947 wake_up_process(module->waiter); 871 wake_up_process(module->waiter);
948 put_cpu(); 872 preempt_enable();
949 } 873 }
950} 874}
951EXPORT_SYMBOL(module_put); 875EXPORT_SYMBOL(module_put);
@@ -961,11 +885,11 @@ static inline void module_unload_free(struct module *mod)
961{ 885{
962} 886}
963 887
964int use_module(struct module *a, struct module *b) 888int ref_module(struct module *a, struct module *b)
965{ 889{
966 return strong_try_module_get(b) == 0; 890 return strong_try_module_get(b);
967} 891}
968EXPORT_SYMBOL_GPL(use_module); 892EXPORT_SYMBOL_GPL(ref_module);
969 893
970static inline void module_unload_init(struct module *mod) 894static inline void module_unload_init(struct module *mod)
971{ 895{
@@ -1022,11 +946,23 @@ static int try_to_force_load(struct module *mod, const char *reason)
1022} 946}
1023 947
1024#ifdef CONFIG_MODVERSIONS 948#ifdef CONFIG_MODVERSIONS
949/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
950static unsigned long maybe_relocated(unsigned long crc,
951 const struct module *crc_owner)
952{
953#ifdef ARCH_RELOCATES_KCRCTAB
954 if (crc_owner == NULL)
955 return crc - (unsigned long)reloc_start;
956#endif
957 return crc;
958}
959
1025static int check_version(Elf_Shdr *sechdrs, 960static int check_version(Elf_Shdr *sechdrs,
1026 unsigned int versindex, 961 unsigned int versindex,
1027 const char *symname, 962 const char *symname,
1028 struct module *mod, 963 struct module *mod,
1029 const unsigned long *crc) 964 const unsigned long *crc,
965 const struct module *crc_owner)
1030{ 966{
1031 unsigned int i, num_versions; 967 unsigned int i, num_versions;
1032 struct modversion_info *versions; 968 struct modversion_info *versions;
@@ -1047,10 +983,10 @@ static int check_version(Elf_Shdr *sechdrs,
1047 if (strcmp(versions[i].name, symname) != 0) 983 if (strcmp(versions[i].name, symname) != 0)
1048 continue; 984 continue;
1049 985
1050 if (versions[i].crc == *crc) 986 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1051 return 1; 987 return 1;
1052 DEBUGP("Found checksum %lX vs module %lX\n", 988 DEBUGP("Found checksum %lX vs module %lX\n",
1053 *crc, versions[i].crc); 989 maybe_relocated(*crc, crc_owner), versions[i].crc);
1054 goto bad_version; 990 goto bad_version;
1055 } 991 }
1056 992
@@ -1070,10 +1006,13 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1070{ 1006{
1071 const unsigned long *crc; 1007 const unsigned long *crc;
1072 1008
1009 /* Since this should be found in kernel (which can't be removed),
1010 * no locking is necessary. */
1073 if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, 1011 if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
1074 &crc, true, false)) 1012 &crc, true, false))
1075 BUG(); 1013 BUG();
1076 return check_version(sechdrs, versindex, "module_layout", mod, crc); 1014 return check_version(sechdrs, versindex, "module_layout", mod, crc,
1015 NULL);
1077} 1016}
1078 1017
1079/* First part is kernel version, which we ignore if module has crcs. */ 1018/* First part is kernel version, which we ignore if module has crcs. */
@@ -1091,7 +1030,8 @@ static inline int check_version(Elf_Shdr *sechdrs,
1091 unsigned int versindex, 1030 unsigned int versindex,
1092 const char *symname, 1031 const char *symname,
1093 struct module *mod, 1032 struct module *mod,
1094 const unsigned long *crc) 1033 const unsigned long *crc,
1034 const struct module *crc_owner)
1095{ 1035{
1096 return 1; 1036 return 1;
1097} 1037}
@@ -1110,34 +1050,73 @@ static inline int same_magic(const char *amagic, const char *bmagic,
1110} 1050}
1111#endif /* CONFIG_MODVERSIONS */ 1051#endif /* CONFIG_MODVERSIONS */
1112 1052
1113/* Resolve a symbol for this module. I.e. if we find one, record usage. 1053/* Resolve a symbol for this module. I.e. if we find one, record usage. */
1114 Must be holding module_mutex. */
1115static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs, 1054static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
1116 unsigned int versindex, 1055 unsigned int versindex,
1117 const char *name, 1056 const char *name,
1118 struct module *mod) 1057 struct module *mod,
1058 char ownername[])
1119{ 1059{
1120 struct module *owner; 1060 struct module *owner;
1121 const struct kernel_symbol *sym; 1061 const struct kernel_symbol *sym;
1122 const unsigned long *crc; 1062 const unsigned long *crc;
1063 int err;
1123 1064
1065 mutex_lock(&module_mutex);
1124 sym = find_symbol(name, &owner, &crc, 1066 sym = find_symbol(name, &owner, &crc,
1125 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true); 1067 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1126 /* use_module can fail due to OOM, 1068 if (!sym)
1127 or module initialization or unloading */ 1069 goto unlock;
1128 if (sym) { 1070
1129 if (!check_version(sechdrs, versindex, name, mod, crc) || 1071 if (!check_version(sechdrs, versindex, name, mod, crc, owner)) {
1130 !use_module(mod, owner)) 1072 sym = ERR_PTR(-EINVAL);
1131 sym = NULL; 1073 goto getname;
1074 }
1075
1076 err = ref_module(mod, owner);
1077 if (err) {
1078 sym = ERR_PTR(err);
1079 goto getname;
1132 } 1080 }
1081
1082getname:
1083 /* We must make copy under the lock if we failed to get ref. */
1084 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1085unlock:
1086 mutex_unlock(&module_mutex);
1133 return sym; 1087 return sym;
1134} 1088}
1135 1089
1090static const struct kernel_symbol *resolve_symbol_wait(Elf_Shdr *sechdrs,
1091 unsigned int versindex,
1092 const char *name,
1093 struct module *mod)
1094{
1095 const struct kernel_symbol *ksym;
1096 char ownername[MODULE_NAME_LEN];
1097
1098 if (wait_event_interruptible_timeout(module_wq,
1099 !IS_ERR(ksym = resolve_symbol(sechdrs, versindex, name,
1100 mod, ownername)) ||
1101 PTR_ERR(ksym) != -EBUSY,
1102 30 * HZ) <= 0) {
1103 printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n",
1104 mod->name, ownername);
1105 }
1106 return ksym;
1107}
1108
1136/* 1109/*
1137 * /sys/module/foo/sections stuff 1110 * /sys/module/foo/sections stuff
1138 * J. Corbet <corbet@lwn.net> 1111 * J. Corbet <corbet@lwn.net>
1139 */ 1112 */
1140#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) 1113#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
1114
1115static inline bool sect_empty(const Elf_Shdr *sect)
1116{
1117 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1118}
1119
1141struct module_sect_attr 1120struct module_sect_attr
1142{ 1121{
1143 struct module_attribute mattr; 1122 struct module_attribute mattr;
@@ -1179,7 +1158,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1179 1158
1180 /* Count loaded sections and allocate structures */ 1159 /* Count loaded sections and allocate structures */
1181 for (i = 0; i < nsect; i++) 1160 for (i = 0; i < nsect; i++)
1182 if (sechdrs[i].sh_flags & SHF_ALLOC) 1161 if (!sect_empty(&sechdrs[i]))
1183 nloaded++; 1162 nloaded++;
1184 size[0] = ALIGN(sizeof(*sect_attrs) 1163 size[0] = ALIGN(sizeof(*sect_attrs)
1185 + nloaded * sizeof(sect_attrs->attrs[0]), 1164 + nloaded * sizeof(sect_attrs->attrs[0]),
@@ -1197,7 +1176,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1197 sattr = &sect_attrs->attrs[0]; 1176 sattr = &sect_attrs->attrs[0];
1198 gattr = &sect_attrs->grp.attrs[0]; 1177 gattr = &sect_attrs->grp.attrs[0];
1199 for (i = 0; i < nsect; i++) { 1178 for (i = 0; i < nsect; i++) {
1200 if (! (sechdrs[i].sh_flags & SHF_ALLOC)) 1179 if (sect_empty(&sechdrs[i]))
1201 continue; 1180 continue;
1202 sattr->address = sechdrs[i].sh_addr; 1181 sattr->address = sechdrs[i].sh_addr;
1203 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, 1182 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
@@ -1205,6 +1184,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1205 if (sattr->name == NULL) 1184 if (sattr->name == NULL)
1206 goto out; 1185 goto out;
1207 sect_attrs->nsections++; 1186 sect_attrs->nsections++;
1187 sysfs_attr_init(&sattr->mattr.attr);
1208 sattr->mattr.show = module_sect_show; 1188 sattr->mattr.show = module_sect_show;
1209 sattr->mattr.store = NULL; 1189 sattr->mattr.store = NULL;
1210 sattr->mattr.attr.name = sattr->name; 1190 sattr->mattr.attr.name = sattr->name;
@@ -1244,7 +1224,7 @@ struct module_notes_attrs {
1244 struct bin_attribute attrs[0]; 1224 struct bin_attribute attrs[0];
1245}; 1225};
1246 1226
1247static ssize_t module_notes_read(struct kobject *kobj, 1227static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1248 struct bin_attribute *bin_attr, 1228 struct bin_attribute *bin_attr,
1249 char *buf, loff_t pos, size_t count) 1229 char *buf, loff_t pos, size_t count)
1250{ 1230{
@@ -1281,7 +1261,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
1281 /* Count notes sections and allocate structures. */ 1261 /* Count notes sections and allocate structures. */
1282 notes = 0; 1262 notes = 0;
1283 for (i = 0; i < nsect; i++) 1263 for (i = 0; i < nsect; i++)
1284 if ((sechdrs[i].sh_flags & SHF_ALLOC) && 1264 if (!sect_empty(&sechdrs[i]) &&
1285 (sechdrs[i].sh_type == SHT_NOTE)) 1265 (sechdrs[i].sh_type == SHT_NOTE))
1286 ++notes; 1266 ++notes;
1287 1267
@@ -1297,9 +1277,10 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
1297 notes_attrs->notes = notes; 1277 notes_attrs->notes = notes;
1298 nattr = &notes_attrs->attrs[0]; 1278 nattr = &notes_attrs->attrs[0];
1299 for (loaded = i = 0; i < nsect; ++i) { 1279 for (loaded = i = 0; i < nsect; ++i) {
1300 if (!(sechdrs[i].sh_flags & SHF_ALLOC)) 1280 if (sect_empty(&sechdrs[i]))
1301 continue; 1281 continue;
1302 if (sechdrs[i].sh_type == SHT_NOTE) { 1282 if (sechdrs[i].sh_type == SHT_NOTE) {
1283 sysfs_bin_attr_init(nattr);
1303 nattr->attr.name = mod->sect_attrs->attrs[loaded].name; 1284 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1304 nattr->attr.mode = S_IRUGO; 1285 nattr->attr.mode = S_IRUGO;
1305 nattr->size = sechdrs[i].sh_size; 1286 nattr->size = sechdrs[i].sh_size;
@@ -1354,7 +1335,34 @@ static inline void remove_notes_attrs(struct module *mod)
1354#endif 1335#endif
1355 1336
1356#ifdef CONFIG_SYSFS 1337#ifdef CONFIG_SYSFS
1357int module_add_modinfo_attrs(struct module *mod) 1338static void add_usage_links(struct module *mod)
1339{
1340#ifdef CONFIG_MODULE_UNLOAD
1341 struct module_use *use;
1342 int nowarn;
1343
1344 mutex_lock(&module_mutex);
1345 list_for_each_entry(use, &mod->target_list, target_list) {
1346 nowarn = sysfs_create_link(use->target->holders_dir,
1347 &mod->mkobj.kobj, mod->name);
1348 }
1349 mutex_unlock(&module_mutex);
1350#endif
1351}
1352
1353static void del_usage_links(struct module *mod)
1354{
1355#ifdef CONFIG_MODULE_UNLOAD
1356 struct module_use *use;
1357
1358 mutex_lock(&module_mutex);
1359 list_for_each_entry(use, &mod->target_list, target_list)
1360 sysfs_remove_link(use->target->holders_dir, mod->name);
1361 mutex_unlock(&module_mutex);
1362#endif
1363}
1364
1365static int module_add_modinfo_attrs(struct module *mod)
1358{ 1366{
1359 struct module_attribute *attr; 1367 struct module_attribute *attr;
1360 struct module_attribute *temp_attr; 1368 struct module_attribute *temp_attr;
@@ -1372,6 +1380,7 @@ int module_add_modinfo_attrs(struct module *mod)
1372 if (!attr->test || 1380 if (!attr->test ||
1373 (attr->test && attr->test(mod))) { 1381 (attr->test && attr->test(mod))) {
1374 memcpy(temp_attr, attr, sizeof(*temp_attr)); 1382 memcpy(temp_attr, attr, sizeof(*temp_attr));
1383 sysfs_attr_init(&temp_attr->attr);
1375 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr); 1384 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
1376 ++temp_attr; 1385 ++temp_attr;
1377 } 1386 }
@@ -1379,7 +1388,7 @@ int module_add_modinfo_attrs(struct module *mod)
1379 return error; 1388 return error;
1380} 1389}
1381 1390
1382void module_remove_modinfo_attrs(struct module *mod) 1391static void module_remove_modinfo_attrs(struct module *mod)
1383{ 1392{
1384 struct module_attribute *attr; 1393 struct module_attribute *attr;
1385 int i; 1394 int i;
@@ -1395,7 +1404,7 @@ void module_remove_modinfo_attrs(struct module *mod)
1395 kfree(mod->modinfo_attrs); 1404 kfree(mod->modinfo_attrs);
1396} 1405}
1397 1406
1398int mod_sysfs_init(struct module *mod) 1407static int mod_sysfs_init(struct module *mod)
1399{ 1408{
1400 int err; 1409 int err;
1401 struct kobject *kobj; 1410 struct kobject *kobj;
@@ -1429,12 +1438,16 @@ out:
1429 return err; 1438 return err;
1430} 1439}
1431 1440
1432int mod_sysfs_setup(struct module *mod, 1441static int mod_sysfs_setup(struct module *mod,
1433 struct kernel_param *kparam, 1442 struct kernel_param *kparam,
1434 unsigned int num_params) 1443 unsigned int num_params)
1435{ 1444{
1436 int err; 1445 int err;
1437 1446
1447 err = mod_sysfs_init(mod);
1448 if (err)
1449 goto out;
1450
1438 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); 1451 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1439 if (!mod->holders_dir) { 1452 if (!mod->holders_dir) {
1440 err = -ENOMEM; 1453 err = -ENOMEM;
@@ -1449,6 +1462,8 @@ int mod_sysfs_setup(struct module *mod,
1449 if (err) 1462 if (err)
1450 goto out_unreg_param; 1463 goto out_unreg_param;
1451 1464
1465 add_usage_links(mod);
1466
1452 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); 1467 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1453 return 0; 1468 return 0;
1454 1469
@@ -1458,6 +1473,7 @@ out_unreg_holders:
1458 kobject_put(mod->holders_dir); 1473 kobject_put(mod->holders_dir);
1459out_unreg: 1474out_unreg:
1460 kobject_put(&mod->mkobj.kobj); 1475 kobject_put(&mod->mkobj.kobj);
1476out:
1461 return err; 1477 return err;
1462} 1478}
1463 1479
@@ -1468,14 +1484,40 @@ static void mod_sysfs_fini(struct module *mod)
1468 1484
1469#else /* CONFIG_SYSFS */ 1485#else /* CONFIG_SYSFS */
1470 1486
1487static inline int mod_sysfs_init(struct module *mod)
1488{
1489 return 0;
1490}
1491
1492static inline int mod_sysfs_setup(struct module *mod,
1493 struct kernel_param *kparam,
1494 unsigned int num_params)
1495{
1496 return 0;
1497}
1498
1499static inline int module_add_modinfo_attrs(struct module *mod)
1500{
1501 return 0;
1502}
1503
1504static inline void module_remove_modinfo_attrs(struct module *mod)
1505{
1506}
1507
1471static void mod_sysfs_fini(struct module *mod) 1508static void mod_sysfs_fini(struct module *mod)
1472{ 1509{
1473} 1510}
1474 1511
1512static void del_usage_links(struct module *mod)
1513{
1514}
1515
1475#endif /* CONFIG_SYSFS */ 1516#endif /* CONFIG_SYSFS */
1476 1517
1477static void mod_kobject_remove(struct module *mod) 1518static void mod_kobject_remove(struct module *mod)
1478{ 1519{
1520 del_usage_links(mod);
1479 module_remove_modinfo_attrs(mod); 1521 module_remove_modinfo_attrs(mod);
1480 module_param_sysfs_remove(mod); 1522 module_param_sysfs_remove(mod);
1481 kobject_put(mod->mkobj.drivers_dir); 1523 kobject_put(mod->mkobj.drivers_dir);
@@ -1494,15 +1536,22 @@ static int __unlink_module(void *_mod)
1494 return 0; 1536 return 0;
1495} 1537}
1496 1538
1497/* Free a module, remove from lists, etc (must hold module_mutex). */ 1539/* Free a module, remove from lists, etc. */
1498static void free_module(struct module *mod) 1540static void free_module(struct module *mod)
1499{ 1541{
1542 trace_module_free(mod);
1543
1500 /* Delete from various lists */ 1544 /* Delete from various lists */
1545 mutex_lock(&module_mutex);
1501 stop_machine(__unlink_module, mod, NULL); 1546 stop_machine(__unlink_module, mod, NULL);
1547 mutex_unlock(&module_mutex);
1502 remove_notes_attrs(mod); 1548 remove_notes_attrs(mod);
1503 remove_sect_attrs(mod); 1549 remove_sect_attrs(mod);
1504 mod_kobject_remove(mod); 1550 mod_kobject_remove(mod);
1505 1551
1552 /* Remove dynamic debug info */
1553 ddebug_remove_module(mod->name);
1554
1506 /* Arch-specific cleanup. */ 1555 /* Arch-specific cleanup. */
1507 module_arch_cleanup(mod); 1556 module_arch_cleanup(mod);
1508 1557
@@ -1515,17 +1564,20 @@ static void free_module(struct module *mod)
1515 /* This may be NULL, but that's OK */ 1564 /* This may be NULL, but that's OK */
1516 module_free(mod, mod->module_init); 1565 module_free(mod, mod->module_init);
1517 kfree(mod->args); 1566 kfree(mod->args);
1518 if (mod->percpu) 1567 percpu_modfree(mod);
1519 percpu_modfree(mod->percpu); 1568#if defined(CONFIG_MODULE_UNLOAD)
1520#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
1521 if (mod->refptr) 1569 if (mod->refptr)
1522 percpu_modfree(mod->refptr); 1570 free_percpu(mod->refptr);
1523#endif 1571#endif
1524 /* Free lock-classes: */ 1572 /* Free lock-classes: */
1525 lockdep_free_key_range(mod->module_core, mod->core_size); 1573 lockdep_free_key_range(mod->module_core, mod->core_size);
1526 1574
1527 /* Finally, free the core (containing the module structure) */ 1575 /* Finally, free the core (containing the module structure) */
1528 module_free(mod, mod->module_core); 1576 module_free(mod, mod->module_core);
1577
1578#ifdef CONFIG_MPU
1579 update_protections(current->mm);
1580#endif
1529} 1581}
1530 1582
1531void *__symbol_get(const char *symbol) 1583void *__symbol_get(const char *symbol)
@@ -1546,6 +1598,8 @@ EXPORT_SYMBOL_GPL(__symbol_get);
1546/* 1598/*
1547 * Ensure that an exported symbol [global namespace] does not already exist 1599 * Ensure that an exported symbol [global namespace] does not already exist
1548 * in the kernel or in some other module's exported symbol table. 1600 * in the kernel or in some other module's exported symbol table.
1601 *
1602 * You must hold the module_mutex.
1549 */ 1603 */
1550static int verify_export_symbols(struct module *mod) 1604static int verify_export_symbols(struct module *mod)
1551{ 1605{
@@ -1611,27 +1665,29 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
1611 break; 1665 break;
1612 1666
1613 case SHN_UNDEF: 1667 case SHN_UNDEF:
1614 ksym = resolve_symbol(sechdrs, versindex, 1668 ksym = resolve_symbol_wait(sechdrs, versindex,
1615 strtab + sym[i].st_name, mod); 1669 strtab + sym[i].st_name,
1670 mod);
1616 /* Ok if resolved. */ 1671 /* Ok if resolved. */
1617 if (ksym) { 1672 if (ksym && !IS_ERR(ksym)) {
1618 sym[i].st_value = ksym->value; 1673 sym[i].st_value = ksym->value;
1619 break; 1674 break;
1620 } 1675 }
1621 1676
1622 /* Ok if weak. */ 1677 /* Ok if weak. */
1623 if (ELF_ST_BIND(sym[i].st_info) == STB_WEAK) 1678 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
1624 break; 1679 break;
1625 1680
1626 printk(KERN_WARNING "%s: Unknown symbol %s\n", 1681 printk(KERN_WARNING "%s: Unknown symbol %s (err %li)\n",
1627 mod->name, strtab + sym[i].st_name); 1682 mod->name, strtab + sym[i].st_name,
1628 ret = -ENOENT; 1683 PTR_ERR(ksym));
1684 ret = PTR_ERR(ksym) ?: -ENOENT;
1629 break; 1685 break;
1630 1686
1631 default: 1687 default:
1632 /* Divert to percpu allocation if a percpu var. */ 1688 /* Divert to percpu allocation if a percpu var. */
1633 if (sym[i].st_shndx == pcpuindex) 1689 if (sym[i].st_shndx == pcpuindex)
1634 secbase = (unsigned long)mod->percpu; 1690 secbase = (unsigned long)mod_percpu(mod);
1635 else 1691 else
1636 secbase = sechdrs[sym[i].st_shndx].sh_addr; 1692 secbase = sechdrs[sym[i].st_shndx].sh_addr;
1637 sym[i].st_value += secbase; 1693 sym[i].st_value += secbase;
@@ -1783,6 +1839,17 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs,
1783 } 1839 }
1784} 1840}
1785 1841
1842static void free_modinfo(struct module *mod)
1843{
1844 struct module_attribute *attr;
1845 int i;
1846
1847 for (i = 0; (attr = modinfo_attrs[i]); i++) {
1848 if (attr->free)
1849 attr->free(mod);
1850 }
1851}
1852
1786#ifdef CONFIG_KALLSYMS 1853#ifdef CONFIG_KALLSYMS
1787 1854
1788/* lookup symbol in given range of kernel_symbols */ 1855/* lookup symbol in given range of kernel_symbols */
@@ -1848,13 +1915,93 @@ static char elf_type(const Elf_Sym *sym,
1848 return '?'; 1915 return '?';
1849} 1916}
1850 1917
1918static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
1919 unsigned int shnum)
1920{
1921 const Elf_Shdr *sec;
1922
1923 if (src->st_shndx == SHN_UNDEF
1924 || src->st_shndx >= shnum
1925 || !src->st_name)
1926 return false;
1927
1928 sec = sechdrs + src->st_shndx;
1929 if (!(sec->sh_flags & SHF_ALLOC)
1930#ifndef CONFIG_KALLSYMS_ALL
1931 || !(sec->sh_flags & SHF_EXECINSTR)
1932#endif
1933 || (sec->sh_entsize & INIT_OFFSET_MASK))
1934 return false;
1935
1936 return true;
1937}
1938
1939static unsigned long layout_symtab(struct module *mod,
1940 Elf_Shdr *sechdrs,
1941 unsigned int symindex,
1942 unsigned int strindex,
1943 const Elf_Ehdr *hdr,
1944 const char *secstrings,
1945 unsigned long *pstroffs,
1946 unsigned long *strmap)
1947{
1948 unsigned long symoffs;
1949 Elf_Shdr *symsect = sechdrs + symindex;
1950 Elf_Shdr *strsect = sechdrs + strindex;
1951 const Elf_Sym *src;
1952 const char *strtab;
1953 unsigned int i, nsrc, ndst;
1954
1955 /* Put symbol section at end of init part of module. */
1956 symsect->sh_flags |= SHF_ALLOC;
1957 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
1958 symindex) | INIT_OFFSET_MASK;
1959 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
1960
1961 src = (void *)hdr + symsect->sh_offset;
1962 nsrc = symsect->sh_size / sizeof(*src);
1963 strtab = (void *)hdr + strsect->sh_offset;
1964 for (ndst = i = 1; i < nsrc; ++i, ++src)
1965 if (is_core_symbol(src, sechdrs, hdr->e_shnum)) {
1966 unsigned int j = src->st_name;
1967
1968 while(!__test_and_set_bit(j, strmap) && strtab[j])
1969 ++j;
1970 ++ndst;
1971 }
1972
1973 /* Append room for core symbols at end of core part. */
1974 symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
1975 mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
1976
1977 /* Put string table section at end of init part of module. */
1978 strsect->sh_flags |= SHF_ALLOC;
1979 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
1980 strindex) | INIT_OFFSET_MASK;
1981 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
1982
1983 /* Append room for core symbols' strings at end of core part. */
1984 *pstroffs = mod->core_size;
1985 __set_bit(0, strmap);
1986 mod->core_size += bitmap_weight(strmap, strsect->sh_size);
1987
1988 return symoffs;
1989}
1990
1851static void add_kallsyms(struct module *mod, 1991static void add_kallsyms(struct module *mod,
1852 Elf_Shdr *sechdrs, 1992 Elf_Shdr *sechdrs,
1993 unsigned int shnum,
1853 unsigned int symindex, 1994 unsigned int symindex,
1854 unsigned int strindex, 1995 unsigned int strindex,
1855 const char *secstrings) 1996 unsigned long symoffs,
1997 unsigned long stroffs,
1998 const char *secstrings,
1999 unsigned long *strmap)
1856{ 2000{
1857 unsigned int i; 2001 unsigned int i, ndst;
2002 const Elf_Sym *src;
2003 Elf_Sym *dst;
2004 char *s;
1858 2005
1859 mod->symtab = (void *)sechdrs[symindex].sh_addr; 2006 mod->symtab = (void *)sechdrs[symindex].sh_addr;
1860 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym); 2007 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
@@ -1864,13 +2011,46 @@ static void add_kallsyms(struct module *mod,
1864 for (i = 0; i < mod->num_symtab; i++) 2011 for (i = 0; i < mod->num_symtab; i++)
1865 mod->symtab[i].st_info 2012 mod->symtab[i].st_info
1866 = elf_type(&mod->symtab[i], sechdrs, secstrings, mod); 2013 = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
2014
2015 mod->core_symtab = dst = mod->module_core + symoffs;
2016 src = mod->symtab;
2017 *dst = *src;
2018 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
2019 if (!is_core_symbol(src, sechdrs, shnum))
2020 continue;
2021 dst[ndst] = *src;
2022 dst[ndst].st_name = bitmap_weight(strmap, dst[ndst].st_name);
2023 ++ndst;
2024 }
2025 mod->core_num_syms = ndst;
2026
2027 mod->core_strtab = s = mod->module_core + stroffs;
2028 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
2029 if (test_bit(i, strmap))
2030 *++s = mod->strtab[i];
1867} 2031}
1868#else 2032#else
2033static inline unsigned long layout_symtab(struct module *mod,
2034 Elf_Shdr *sechdrs,
2035 unsigned int symindex,
2036 unsigned int strindex,
2037 const Elf_Ehdr *hdr,
2038 const char *secstrings,
2039 unsigned long *pstroffs,
2040 unsigned long *strmap)
2041{
2042 return 0;
2043}
2044
1869static inline void add_kallsyms(struct module *mod, 2045static inline void add_kallsyms(struct module *mod,
1870 Elf_Shdr *sechdrs, 2046 Elf_Shdr *sechdrs,
2047 unsigned int shnum,
1871 unsigned int symindex, 2048 unsigned int symindex,
1872 unsigned int strindex, 2049 unsigned int strindex,
1873 const char *secstrings) 2050 unsigned long symoffs,
2051 unsigned long stroffs,
2052 const char *secstrings,
2053 const unsigned long *strmap)
1874{ 2054{
1875} 2055}
1876#endif /* CONFIG_KALLSYMS */ 2056#endif /* CONFIG_KALLSYMS */
@@ -1884,16 +2064,24 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
1884#endif 2064#endif
1885} 2065}
1886 2066
2067static void dynamic_debug_remove(struct _ddebug *debug)
2068{
2069 if (debug)
2070 ddebug_remove_module(debug->modname);
2071}
2072
1887static void *module_alloc_update_bounds(unsigned long size) 2073static void *module_alloc_update_bounds(unsigned long size)
1888{ 2074{
1889 void *ret = module_alloc(size); 2075 void *ret = module_alloc(size);
1890 2076
1891 if (ret) { 2077 if (ret) {
2078 mutex_lock(&module_mutex);
1892 /* Update module bounds. */ 2079 /* Update module bounds. */
1893 if ((unsigned long)ret < module_addr_min) 2080 if ((unsigned long)ret < module_addr_min)
1894 module_addr_min = (unsigned long)ret; 2081 module_addr_min = (unsigned long)ret;
1895 if ((unsigned long)ret + size > module_addr_max) 2082 if ((unsigned long)ret + size > module_addr_max)
1896 module_addr_max = (unsigned long)ret + size; 2083 module_addr_max = (unsigned long)ret + size;
2084 mutex_unlock(&module_mutex);
1897 } 2085 }
1898 return ret; 2086 return ret;
1899} 2087}
@@ -1905,9 +2093,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
1905 unsigned int i; 2093 unsigned int i;
1906 2094
1907 /* only scan the sections containing data */ 2095 /* only scan the sections containing data */
1908 kmemleak_scan_area(mod->module_core, (unsigned long)mod - 2096 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
1909 (unsigned long)mod->module_core,
1910 sizeof(struct module), GFP_KERNEL);
1911 2097
1912 for (i = 1; i < hdr->e_shnum; i++) { 2098 for (i = 1; i < hdr->e_shnum; i++) {
1913 if (!(sechdrs[i].sh_flags & SHF_ALLOC)) 2099 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
@@ -1916,8 +2102,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
1916 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) 2102 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
1917 continue; 2103 continue;
1918 2104
1919 kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - 2105 kmemleak_scan_area((void *)sechdrs[i].sh_addr,
1920 (unsigned long)mod->module_core,
1921 sechdrs[i].sh_size, GFP_KERNEL); 2106 sechdrs[i].sh_size, GFP_KERNEL);
1922 } 2107 }
1923} 2108}
@@ -1944,7 +2129,12 @@ static noinline struct module *load_module(void __user *umod,
1944 unsigned int modindex, versindex, infoindex, pcpuindex; 2129 unsigned int modindex, versindex, infoindex, pcpuindex;
1945 struct module *mod; 2130 struct module *mod;
1946 long err = 0; 2131 long err = 0;
1947 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 2132 void *ptr = NULL; /* Stops spurious gcc warning */
2133 unsigned long symoffs, stroffs, *strmap;
2134 void __percpu *percpu;
2135 struct _ddebug *debug = NULL;
2136 unsigned int num_debug = 0;
2137
1948 mm_segment_t old_fs; 2138 mm_segment_t old_fs;
1949 2139
1950 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", 2140 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
@@ -2026,11 +2216,6 @@ static noinline struct module *load_module(void __user *umod,
2026 /* Don't keep modinfo and version sections. */ 2216 /* Don't keep modinfo and version sections. */
2027 sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; 2217 sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
2028 sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC; 2218 sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
2029#ifdef CONFIG_KALLSYMS
2030 /* Keep symbol and string tables for decoding later. */
2031 sechdrs[symindex].sh_flags |= SHF_ALLOC;
2032 sechdrs[strindex].sh_flags |= SHF_ALLOC;
2033#endif
2034 2219
2035 /* Check module struct version now, before we try to use module. */ 2220 /* Check module struct version now, before we try to use module. */
2036 if (!check_modstruct_version(sechdrs, versindex, mod)) { 2221 if (!check_modstruct_version(sechdrs, versindex, mod)) {
@@ -2066,8 +2251,10 @@ static noinline struct module *load_module(void __user *umod,
2066 goto free_hdr; 2251 goto free_hdr;
2067 } 2252 }
2068 2253
2069 if (find_module(mod->name)) { 2254 strmap = kzalloc(BITS_TO_LONGS(sechdrs[strindex].sh_size)
2070 err = -EEXIST; 2255 * sizeof(long), GFP_KERNEL);
2256 if (!strmap) {
2257 err = -ENOMEM;
2071 goto free_mod; 2258 goto free_mod;
2072 } 2259 }
2073 2260
@@ -2080,21 +2267,21 @@ static noinline struct module *load_module(void __user *umod,
2080 2267
2081 if (pcpuindex) { 2268 if (pcpuindex) {
2082 /* We have a special allocation for this section. */ 2269 /* We have a special allocation for this section. */
2083 percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, 2270 err = percpu_modalloc(mod, sechdrs[pcpuindex].sh_size,
2084 sechdrs[pcpuindex].sh_addralign, 2271 sechdrs[pcpuindex].sh_addralign);
2085 mod->name); 2272 if (err)
2086 if (!percpu) {
2087 err = -ENOMEM;
2088 goto free_mod; 2273 goto free_mod;
2089 }
2090 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; 2274 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
2091 mod->percpu = percpu;
2092 } 2275 }
2276 /* Keep this around for failure path. */
2277 percpu = mod_percpu(mod);
2093 2278
2094 /* Determine total sizes, and put offsets in sh_entsize. For now 2279 /* Determine total sizes, and put offsets in sh_entsize. For now
2095 this is done generically; there doesn't appear to be any 2280 this is done generically; there doesn't appear to be any
2096 special cases for the architectures. */ 2281 special cases for the architectures. */
2097 layout_sections(mod, hdr, sechdrs, secstrings); 2282 layout_sections(mod, hdr, sechdrs, secstrings);
2283 symoffs = layout_symtab(mod, sechdrs, symindex, strindex, hdr,
2284 secstrings, &stroffs, strmap);
2098 2285
2099 /* Do the allocs. */ 2286 /* Do the allocs. */
2100 ptr = module_alloc_update_bounds(mod->core_size); 2287 ptr = module_alloc_update_bounds(mod->core_size);
@@ -2151,9 +2338,8 @@ static noinline struct module *load_module(void __user *umod,
2151 mod = (void *)sechdrs[modindex].sh_addr; 2338 mod = (void *)sechdrs[modindex].sh_addr;
2152 kmemleak_load_module(mod, hdr, sechdrs, secstrings); 2339 kmemleak_load_module(mod, hdr, sechdrs, secstrings);
2153 2340
2154#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2341#if defined(CONFIG_MODULE_UNLOAD)
2155 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), 2342 mod->refptr = alloc_percpu(struct module_ref);
2156 mod->name);
2157 if (!mod->refptr) { 2343 if (!mod->refptr) {
2158 err = -ENOMEM; 2344 err = -ENOMEM;
2159 goto free_init; 2345 goto free_init;
@@ -2162,11 +2348,6 @@ static noinline struct module *load_module(void __user *umod,
2162 /* Now we've moved module, initialize linked lists, etc. */ 2348 /* Now we've moved module, initialize linked lists, etc. */
2163 module_unload_init(mod); 2349 module_unload_init(mod);
2164 2350
2165 /* add kobject, so we can reference it. */
2166 err = mod_sysfs_init(mod);
2167 if (err)
2168 goto free_unload;
2169
2170 /* Set up license info based on the info section */ 2351 /* Set up license info based on the info section */
2171 set_license(mod, get_modinfo(sechdrs, infoindex, "license")); 2352 set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
2172 2353
@@ -2228,10 +2409,6 @@ static noinline struct module *load_module(void __user *umod,
2228 sizeof(*mod->ctors), &mod->num_ctors); 2409 sizeof(*mod->ctors), &mod->num_ctors);
2229#endif 2410#endif
2230 2411
2231#ifdef CONFIG_MARKERS
2232 mod->markers = section_objs(hdr, sechdrs, secstrings, "__markers",
2233 sizeof(*mod->markers), &mod->num_markers);
2234#endif
2235#ifdef CONFIG_TRACEPOINTS 2412#ifdef CONFIG_TRACEPOINTS
2236 mod->tracepoints = section_objs(hdr, sechdrs, secstrings, 2413 mod->tracepoints = section_objs(hdr, sechdrs, secstrings,
2237 "__tracepoints", 2414 "__tracepoints",
@@ -2243,6 +2420,12 @@ static noinline struct module *load_module(void __user *umod,
2243 "_ftrace_events", 2420 "_ftrace_events",
2244 sizeof(*mod->trace_events), 2421 sizeof(*mod->trace_events),
2245 &mod->num_trace_events); 2422 &mod->num_trace_events);
2423 /*
2424 * This section contains pointers to allocated objects in the trace
2425 * code and not scanning it leads to false positives.
2426 */
2427 kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
2428 mod->num_trace_events, GFP_KERNEL);
2246#endif 2429#endif
2247#ifdef CONFIG_FTRACE_MCOUNT_RECORD 2430#ifdef CONFIG_FTRACE_MCOUNT_RECORD
2248 /* sechdrs[0].sh_size is always zero */ 2431 /* sechdrs[0].sh_size is always zero */
@@ -2289,31 +2472,23 @@ static noinline struct module *load_module(void __user *umod,
2289 goto cleanup; 2472 goto cleanup;
2290 } 2473 }
2291 2474
2292 /* Find duplicate symbols */
2293 err = verify_export_symbols(mod);
2294 if (err < 0)
2295 goto cleanup;
2296
2297 /* Set up and sort exception table */ 2475 /* Set up and sort exception table */
2298 mod->extable = section_objs(hdr, sechdrs, secstrings, "__ex_table", 2476 mod->extable = section_objs(hdr, sechdrs, secstrings, "__ex_table",
2299 sizeof(*mod->extable), &mod->num_exentries); 2477 sizeof(*mod->extable), &mod->num_exentries);
2300 sort_extable(mod->extable, mod->extable + mod->num_exentries); 2478 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2301 2479
2302 /* Finally, copy percpu area over. */ 2480 /* Finally, copy percpu area over. */
2303 percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr, 2481 percpu_modcopy(mod, (void *)sechdrs[pcpuindex].sh_addr,
2304 sechdrs[pcpuindex].sh_size); 2482 sechdrs[pcpuindex].sh_size);
2305 2483
2306 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings); 2484 add_kallsyms(mod, sechdrs, hdr->e_shnum, symindex, strindex,
2307 2485 symoffs, stroffs, secstrings, strmap);
2308 if (!mod->taints) { 2486 kfree(strmap);
2309 struct _ddebug *debug; 2487 strmap = NULL;
2310 unsigned int num_debug;
2311 2488
2489 if (!mod->taints)
2312 debug = section_objs(hdr, sechdrs, secstrings, "__verbose", 2490 debug = section_objs(hdr, sechdrs, secstrings, "__verbose",
2313 sizeof(*debug), &num_debug); 2491 sizeof(*debug), &num_debug);
2314 if (debug)
2315 dynamic_debug_setup(debug, num_debug);
2316 }
2317 2492
2318 err = module_finalize(hdr, sechdrs, mod); 2493 err = module_finalize(hdr, sechdrs, mod);
2319 if (err < 0) 2494 if (err < 0)
@@ -2349,7 +2524,22 @@ static noinline struct module *load_module(void __user *umod,
2349 * function to insert in a way safe to concurrent readers. 2524 * function to insert in a way safe to concurrent readers.
2350 * The mutex protects against concurrent writers. 2525 * The mutex protects against concurrent writers.
2351 */ 2526 */
2527 mutex_lock(&module_mutex);
2528 if (find_module(mod->name)) {
2529 err = -EEXIST;
2530 goto unlock;
2531 }
2532
2533 if (debug)
2534 dynamic_debug_setup(debug, num_debug);
2535
2536 /* Find duplicate symbols */
2537 err = verify_export_symbols(mod);
2538 if (err < 0)
2539 goto ddebug;
2540
2352 list_add_rcu(&mod->list, &modules); 2541 list_add_rcu(&mod->list, &modules);
2542 mutex_unlock(&module_mutex);
2353 2543
2354 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, NULL); 2544 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, NULL);
2355 if (err < 0) 2545 if (err < 0)
@@ -2358,38 +2548,44 @@ static noinline struct module *load_module(void __user *umod,
2358 err = mod_sysfs_setup(mod, mod->kp, mod->num_kp); 2548 err = mod_sysfs_setup(mod, mod->kp, mod->num_kp);
2359 if (err < 0) 2549 if (err < 0)
2360 goto unlink; 2550 goto unlink;
2551
2361 add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs); 2552 add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
2362 add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs); 2553 add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
2363 2554
2364 /* Get rid of temporary copy */ 2555 /* Get rid of temporary copy */
2365 vfree(hdr); 2556 vfree(hdr);
2366 2557
2558 trace_module_load(mod);
2559
2367 /* Done! */ 2560 /* Done! */
2368 return mod; 2561 return mod;
2369 2562
2370 unlink: 2563 unlink:
2564 mutex_lock(&module_mutex);
2371 /* Unlink carefully: kallsyms could be walking list. */ 2565 /* Unlink carefully: kallsyms could be walking list. */
2372 list_del_rcu(&mod->list); 2566 list_del_rcu(&mod->list);
2567 ddebug:
2568 dynamic_debug_remove(debug);
2569 unlock:
2570 mutex_unlock(&module_mutex);
2373 synchronize_sched(); 2571 synchronize_sched();
2374 module_arch_cleanup(mod); 2572 module_arch_cleanup(mod);
2375 cleanup: 2573 cleanup:
2376 kobject_del(&mod->mkobj.kobj); 2574 free_modinfo(mod);
2377 kobject_put(&mod->mkobj.kobj);
2378 free_unload:
2379 module_unload_free(mod); 2575 module_unload_free(mod);
2380#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2576#if defined(CONFIG_MODULE_UNLOAD)
2577 free_percpu(mod->refptr);
2381 free_init: 2578 free_init:
2382 percpu_modfree(mod->refptr);
2383#endif 2579#endif
2384 module_free(mod, mod->module_init); 2580 module_free(mod, mod->module_init);
2385 free_core: 2581 free_core:
2386 module_free(mod, mod->module_core); 2582 module_free(mod, mod->module_core);
2387 /* mod will be freed with core. Don't access it beyond this line! */ 2583 /* mod will be freed with core. Don't access it beyond this line! */
2388 free_percpu: 2584 free_percpu:
2389 if (percpu) 2585 free_percpu(percpu);
2390 percpu_modfree(percpu);
2391 free_mod: 2586 free_mod:
2392 kfree(args); 2587 kfree(args);
2588 kfree(strmap);
2393 free_hdr: 2589 free_hdr:
2394 vfree(hdr); 2590 vfree(hdr);
2395 return ERR_PTR(err); 2591 return ERR_PTR(err);
@@ -2422,19 +2618,10 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2422 if (!capable(CAP_SYS_MODULE) || modules_disabled) 2618 if (!capable(CAP_SYS_MODULE) || modules_disabled)
2423 return -EPERM; 2619 return -EPERM;
2424 2620
2425 /* Only one module load at a time, please */
2426 if (mutex_lock_interruptible(&module_mutex) != 0)
2427 return -EINTR;
2428
2429 /* Do all the hard work */ 2621 /* Do all the hard work */
2430 mod = load_module(umod, len, uargs); 2622 mod = load_module(umod, len, uargs);
2431 if (IS_ERR(mod)) { 2623 if (IS_ERR(mod))
2432 mutex_unlock(&module_mutex);
2433 return PTR_ERR(mod); 2624 return PTR_ERR(mod);
2434 }
2435
2436 /* Drop lock so they can recurse */
2437 mutex_unlock(&module_mutex);
2438 2625
2439 blocking_notifier_call_chain(&module_notify_list, 2626 blocking_notifier_call_chain(&module_notify_list,
2440 MODULE_STATE_COMING, mod); 2627 MODULE_STATE_COMING, mod);
@@ -2451,9 +2638,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2451 module_put(mod); 2638 module_put(mod);
2452 blocking_notifier_call_chain(&module_notify_list, 2639 blocking_notifier_call_chain(&module_notify_list,
2453 MODULE_STATE_GOING, mod); 2640 MODULE_STATE_GOING, mod);
2454 mutex_lock(&module_mutex);
2455 free_module(mod); 2641 free_module(mod);
2456 mutex_unlock(&module_mutex);
2457 wake_up(&module_wq); 2642 wake_up(&module_wq);
2458 return ret; 2643 return ret;
2459 } 2644 }
@@ -2479,6 +2664,11 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2479 /* Drop initial reference. */ 2664 /* Drop initial reference. */
2480 module_put(mod); 2665 module_put(mod);
2481 trim_init_extable(mod); 2666 trim_init_extable(mod);
2667#ifdef CONFIG_KALLSYMS
2668 mod->num_symtab = mod->core_num_syms;
2669 mod->symtab = mod->core_symtab;
2670 mod->strtab = mod->core_strtab;
2671#endif
2482 module_free(mod, mod->module_init); 2672 module_free(mod, mod->module_init);
2483 mod->module_init = NULL; 2673 mod->module_init = NULL;
2484 mod->init_size = 0; 2674 mod->init_size = 0;
@@ -2940,27 +3130,12 @@ void module_layout(struct module *mod,
2940 struct modversion_info *ver, 3130 struct modversion_info *ver,
2941 struct kernel_param *kp, 3131 struct kernel_param *kp,
2942 struct kernel_symbol *ks, 3132 struct kernel_symbol *ks,
2943 struct marker *marker,
2944 struct tracepoint *tp) 3133 struct tracepoint *tp)
2945{ 3134{
2946} 3135}
2947EXPORT_SYMBOL(module_layout); 3136EXPORT_SYMBOL(module_layout);
2948#endif 3137#endif
2949 3138
2950#ifdef CONFIG_MARKERS
2951void module_update_markers(void)
2952{
2953 struct module *mod;
2954
2955 mutex_lock(&module_mutex);
2956 list_for_each_entry(mod, &modules, list)
2957 if (!mod->taints)
2958 marker_update_probe_range(mod->markers,
2959 mod->markers + mod->num_markers);
2960 mutex_unlock(&module_mutex);
2961}
2962#endif
2963
2964#ifdef CONFIG_TRACEPOINTS 3139#ifdef CONFIG_TRACEPOINTS
2965void module_update_tracepoints(void) 3140void module_update_tracepoints(void)
2966{ 3141{