diff options
Diffstat (limited to 'kernel/module.c')
-rw-r--r-- | kernel/module.c | 66 |
1 files changed, 26 insertions, 40 deletions
diff --git a/kernel/module.c b/kernel/module.c index af5ebd21d77b..f5a3b1e8ec51 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -641,8 +641,6 @@ static int module_unload_init(struct module *mod) | |||
641 | 641 | ||
642 | /* Hold reference count during initialization. */ | 642 | /* Hold reference count during initialization. */ |
643 | __this_cpu_write(mod->refptr->incs, 1); | 643 | __this_cpu_write(mod->refptr->incs, 1); |
644 | /* Backwards compatibility macros put refcount during init. */ | ||
645 | mod->waiter = current; | ||
646 | 644 | ||
647 | return 0; | 645 | return 0; |
648 | } | 646 | } |
@@ -768,16 +766,9 @@ static int __try_stop_module(void *_sref) | |||
768 | 766 | ||
769 | static int try_stop_module(struct module *mod, int flags, int *forced) | 767 | static int try_stop_module(struct module *mod, int flags, int *forced) |
770 | { | 768 | { |
771 | if (flags & O_NONBLOCK) { | 769 | struct stopref sref = { mod, flags, forced }; |
772 | struct stopref sref = { mod, flags, forced }; | ||
773 | 770 | ||
774 | return stop_machine(__try_stop_module, &sref, NULL); | 771 | return stop_machine(__try_stop_module, &sref, NULL); |
775 | } else { | ||
776 | /* We don't need to stop the machine for this. */ | ||
777 | mod->state = MODULE_STATE_GOING; | ||
778 | synchronize_sched(); | ||
779 | return 0; | ||
780 | } | ||
781 | } | 772 | } |
782 | 773 | ||
783 | unsigned long module_refcount(struct module *mod) | 774 | unsigned long module_refcount(struct module *mod) |
@@ -810,21 +801,6 @@ EXPORT_SYMBOL(module_refcount); | |||
810 | /* This exists whether we can unload or not */ | 801 | /* This exists whether we can unload or not */ |
811 | static void free_module(struct module *mod); | 802 | static void free_module(struct module *mod); |
812 | 803 | ||
813 | static void wait_for_zero_refcount(struct module *mod) | ||
814 | { | ||
815 | /* Since we might sleep for some time, release the mutex first */ | ||
816 | mutex_unlock(&module_mutex); | ||
817 | for (;;) { | ||
818 | pr_debug("Looking at refcount...\n"); | ||
819 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
820 | if (module_refcount(mod) == 0) | ||
821 | break; | ||
822 | schedule(); | ||
823 | } | ||
824 | current->state = TASK_RUNNING; | ||
825 | mutex_lock(&module_mutex); | ||
826 | } | ||
827 | |||
828 | SYSCALL_DEFINE2(delete_module, const char __user *, name_user, | 804 | SYSCALL_DEFINE2(delete_module, const char __user *, name_user, |
829 | unsigned int, flags) | 805 | unsigned int, flags) |
830 | { | 806 | { |
@@ -839,6 +815,11 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, | |||
839 | return -EFAULT; | 815 | return -EFAULT; |
840 | name[MODULE_NAME_LEN-1] = '\0'; | 816 | name[MODULE_NAME_LEN-1] = '\0'; |
841 | 817 | ||
818 | if (!(flags & O_NONBLOCK)) { | ||
819 | printk(KERN_WARNING | ||
820 | "waiting module removal not supported: please upgrade"); | ||
821 | } | ||
822 | |||
842 | if (mutex_lock_interruptible(&module_mutex) != 0) | 823 | if (mutex_lock_interruptible(&module_mutex) != 0) |
843 | return -EINTR; | 824 | return -EINTR; |
844 | 825 | ||
@@ -856,8 +837,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, | |||
856 | 837 | ||
857 | /* Doing init or already dying? */ | 838 | /* Doing init or already dying? */ |
858 | if (mod->state != MODULE_STATE_LIVE) { | 839 | if (mod->state != MODULE_STATE_LIVE) { |
859 | /* FIXME: if (force), slam module count and wake up | 840 | /* FIXME: if (force), slam module count damn the torpedoes */ |
860 | waiter --RR */ | ||
861 | pr_debug("%s already dying\n", mod->name); | 841 | pr_debug("%s already dying\n", mod->name); |
862 | ret = -EBUSY; | 842 | ret = -EBUSY; |
863 | goto out; | 843 | goto out; |
@@ -873,18 +853,11 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, | |||
873 | } | 853 | } |
874 | } | 854 | } |
875 | 855 | ||
876 | /* Set this up before setting mod->state */ | ||
877 | mod->waiter = current; | ||
878 | |||
879 | /* Stop the machine so refcounts can't move and disable module. */ | 856 | /* Stop the machine so refcounts can't move and disable module. */ |
880 | ret = try_stop_module(mod, flags, &forced); | 857 | ret = try_stop_module(mod, flags, &forced); |
881 | if (ret != 0) | 858 | if (ret != 0) |
882 | goto out; | 859 | goto out; |
883 | 860 | ||
884 | /* Never wait if forced. */ | ||
885 | if (!forced && module_refcount(mod) != 0) | ||
886 | wait_for_zero_refcount(mod); | ||
887 | |||
888 | mutex_unlock(&module_mutex); | 861 | mutex_unlock(&module_mutex); |
889 | /* Final destruction now no one is using it. */ | 862 | /* Final destruction now no one is using it. */ |
890 | if (mod->exit != NULL) | 863 | if (mod->exit != NULL) |
@@ -1002,9 +975,6 @@ void module_put(struct module *module) | |||
1002 | __this_cpu_inc(module->refptr->decs); | 975 | __this_cpu_inc(module->refptr->decs); |
1003 | 976 | ||
1004 | trace_module_put(module, _RET_IP_); | 977 | trace_module_put(module, _RET_IP_); |
1005 | /* Maybe they're waiting for us to drop reference? */ | ||
1006 | if (unlikely(!module_is_live(module))) | ||
1007 | wake_up_process(module->waiter); | ||
1008 | preempt_enable(); | 978 | preempt_enable(); |
1009 | } | 979 | } |
1010 | } | 980 | } |
@@ -2728,7 +2698,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) | |||
2728 | return 0; | 2698 | return 0; |
2729 | } | 2699 | } |
2730 | 2700 | ||
2731 | static void find_module_sections(struct module *mod, struct load_info *info) | 2701 | static int find_module_sections(struct module *mod, struct load_info *info) |
2732 | { | 2702 | { |
2733 | mod->kp = section_objs(info, "__param", | 2703 | mod->kp = section_objs(info, "__param", |
2734 | sizeof(*mod->kp), &mod->num_kp); | 2704 | sizeof(*mod->kp), &mod->num_kp); |
@@ -2758,6 +2728,18 @@ static void find_module_sections(struct module *mod, struct load_info *info) | |||
2758 | #ifdef CONFIG_CONSTRUCTORS | 2728 | #ifdef CONFIG_CONSTRUCTORS |
2759 | mod->ctors = section_objs(info, ".ctors", | 2729 | mod->ctors = section_objs(info, ".ctors", |
2760 | sizeof(*mod->ctors), &mod->num_ctors); | 2730 | sizeof(*mod->ctors), &mod->num_ctors); |
2731 | if (!mod->ctors) | ||
2732 | mod->ctors = section_objs(info, ".init_array", | ||
2733 | sizeof(*mod->ctors), &mod->num_ctors); | ||
2734 | else if (find_sec(info, ".init_array")) { | ||
2735 | /* | ||
2736 | * This shouldn't happen with same compiler and binutils | ||
2737 | * building all parts of the module. | ||
2738 | */ | ||
2739 | printk(KERN_WARNING "%s: has both .ctors and .init_array.\n", | ||
2740 | mod->name); | ||
2741 | return -EINVAL; | ||
2742 | } | ||
2761 | #endif | 2743 | #endif |
2762 | 2744 | ||
2763 | #ifdef CONFIG_TRACEPOINTS | 2745 | #ifdef CONFIG_TRACEPOINTS |
@@ -2795,6 +2777,8 @@ static void find_module_sections(struct module *mod, struct load_info *info) | |||
2795 | 2777 | ||
2796 | info->debug = section_objs(info, "__verbose", | 2778 | info->debug = section_objs(info, "__verbose", |
2797 | sizeof(*info->debug), &info->num_debug); | 2779 | sizeof(*info->debug), &info->num_debug); |
2780 | |||
2781 | return 0; | ||
2798 | } | 2782 | } |
2799 | 2783 | ||
2800 | static int move_module(struct module *mod, struct load_info *info) | 2784 | static int move_module(struct module *mod, struct load_info *info) |
@@ -3248,7 +3232,9 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3248 | 3232 | ||
3249 | /* Now we've got everything in the final locations, we can | 3233 | /* Now we've got everything in the final locations, we can |
3250 | * find optional sections. */ | 3234 | * find optional sections. */ |
3251 | find_module_sections(mod, info); | 3235 | err = find_module_sections(mod, info); |
3236 | if (err) | ||
3237 | goto free_unload; | ||
3252 | 3238 | ||
3253 | err = check_module_license_and_versions(mod); | 3239 | err = check_module_license_and_versions(mod); |
3254 | if (err) | 3240 | if (err) |