diff options
| author | Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> | 2014-11-09 18:00:29 -0500 |
|---|---|---|
| committer | Rusty Russell <rusty@rustcorp.com.au> | 2014-11-11 01:37:46 -0500 |
| commit | e513cc1c07e2ab93a4514eec9833e031df3e30bb (patch) | |
| tree | 77f3db46fd317bb4074adee61ec4beb2a43a79cc | |
| parent | 2f35c41f58a978dfa44ffa102249d556caa99eeb (diff) | |
module: Remove stop_machine from module unloading
Remove stop_machine from module unloading by adding new reference
counting algorithm.
This atomic refcounter works like a semaphore, it can get (be
incremented) only when the counter is not 0. When loading a module,
kmodule subsystem sets the counter MODULE_REF_BASE (= 1). And when
unloading the module, it subtracts MODULE_REF_BASE from the counter.
If no one refers the module, the refcounter becomes 0 and we can
remove the module safely. If someone referes it, we try to recover
the counter by adding MODULE_REF_BASE unless the counter becomes 0,
because the referrer can put the module right before recovering.
If the recovering is failed, we can get the 0 refcount and it
never be incremented again, it can be removed safely too.
Note that __module_get() forcibly gets the module refcounter,
users should use try_module_get() instead of that.
Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
| -rw-r--r-- | kernel/module.c | 67 |
1 files changed, 39 insertions, 28 deletions
diff --git a/kernel/module.c b/kernel/module.c index b1d485df5ac1..e772595d73db 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -42,7 +42,6 @@ | |||
| 42 | #include <linux/vermagic.h> | 42 | #include <linux/vermagic.h> |
| 43 | #include <linux/notifier.h> | 43 | #include <linux/notifier.h> |
| 44 | #include <linux/sched.h> | 44 | #include <linux/sched.h> |
| 45 | #include <linux/stop_machine.h> | ||
| 46 | #include <linux/device.h> | 45 | #include <linux/device.h> |
| 47 | #include <linux/string.h> | 46 | #include <linux/string.h> |
| 48 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
| @@ -98,7 +97,7 @@ | |||
| 98 | * 1) List of modules (also safely readable with preempt_disable), | 97 | * 1) List of modules (also safely readable with preempt_disable), |
| 99 | * 2) module_use links, | 98 | * 2) module_use links, |
| 100 | * 3) module_addr_min/module_addr_max. | 99 | * 3) module_addr_min/module_addr_max. |
| 101 | * (delete uses stop_machine/add uses RCU list operations). */ | 100 | * (delete and add uses RCU list operations). */ |
| 102 | DEFINE_MUTEX(module_mutex); | 101 | DEFINE_MUTEX(module_mutex); |
| 103 | EXPORT_SYMBOL_GPL(module_mutex); | 102 | EXPORT_SYMBOL_GPL(module_mutex); |
| 104 | static LIST_HEAD(modules); | 103 | static LIST_HEAD(modules); |
| @@ -628,14 +627,23 @@ static char last_unloaded_module[MODULE_NAME_LEN+1]; | |||
| 628 | 627 | ||
| 629 | EXPORT_TRACEPOINT_SYMBOL(module_get); | 628 | EXPORT_TRACEPOINT_SYMBOL(module_get); |
| 630 | 629 | ||
| 630 | /* MODULE_REF_BASE is the base reference count by kmodule loader. */ | ||
| 631 | #define MODULE_REF_BASE 1 | ||
| 632 | |||
| 631 | /* Init the unload section of the module. */ | 633 | /* Init the unload section of the module. */ |
| 632 | static int module_unload_init(struct module *mod) | 634 | static int module_unload_init(struct module *mod) |
| 633 | { | 635 | { |
| 636 | /* | ||
| 637 | * Initialize reference counter to MODULE_REF_BASE. | ||
| 638 | * refcnt == 0 means module is going. | ||
| 639 | */ | ||
| 640 | atomic_set(&mod->refcnt, MODULE_REF_BASE); | ||
| 641 | |||
| 634 | INIT_LIST_HEAD(&mod->source_list); | 642 | INIT_LIST_HEAD(&mod->source_list); |
| 635 | INIT_LIST_HEAD(&mod->target_list); | 643 | INIT_LIST_HEAD(&mod->target_list); |
| 636 | 644 | ||
| 637 | /* Hold reference count during initialization. */ | 645 | /* Hold reference count during initialization. */ |
| 638 | atomic_set(&mod->refcnt, 1); | 646 | atomic_inc(&mod->refcnt); |
| 639 | 647 | ||
| 640 | return 0; | 648 | return 0; |
| 641 | } | 649 | } |
| @@ -734,39 +742,39 @@ static inline int try_force_unload(unsigned int flags) | |||
| 734 | } | 742 | } |
| 735 | #endif /* CONFIG_MODULE_FORCE_UNLOAD */ | 743 | #endif /* CONFIG_MODULE_FORCE_UNLOAD */ |
| 736 | 744 | ||
| 737 | struct stopref | 745 | /* Try to release refcount of module, 0 means success. */ |
| 746 | static int try_release_module_ref(struct module *mod) | ||
| 738 | { | 747 | { |
| 739 | struct module *mod; | 748 | int ret; |
| 740 | int flags; | ||
| 741 | int *forced; | ||
| 742 | }; | ||
| 743 | 749 | ||
| 744 | /* Whole machine is stopped with interrupts off when this runs. */ | 750 | /* Try to decrement refcnt which we set at loading */ |
| 745 | static int __try_stop_module(void *_sref) | 751 | ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); |
| 746 | { | 752 | BUG_ON(ret < 0); |
| 747 | struct stopref *sref = _sref; | 753 | if (ret) |
| 754 | /* Someone can put this right now, recover with checking */ | ||
| 755 | ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); | ||
| 756 | |||
| 757 | return ret; | ||
| 758 | } | ||
| 748 | 759 | ||
| 760 | static int try_stop_module(struct module *mod, int flags, int *forced) | ||
| 761 | { | ||
| 749 | /* If it's not unused, quit unless we're forcing. */ | 762 | /* If it's not unused, quit unless we're forcing. */ |
| 750 | if (module_refcount(sref->mod) != 0) { | 763 | if (try_release_module_ref(mod) != 0) { |
| 751 | if (!(*sref->forced = try_force_unload(sref->flags))) | 764 | *forced = try_force_unload(flags); |
| 765 | if (!(*forced)) | ||
| 752 | return -EWOULDBLOCK; | 766 | return -EWOULDBLOCK; |
| 753 | } | 767 | } |
| 754 | 768 | ||
| 755 | /* Mark it as dying. */ | 769 | /* Mark it as dying. */ |
| 756 | sref->mod->state = MODULE_STATE_GOING; | 770 | mod->state = MODULE_STATE_GOING; |
| 757 | return 0; | ||
| 758 | } | ||
| 759 | |||
| 760 | static int try_stop_module(struct module *mod, int flags, int *forced) | ||
| 761 | { | ||
| 762 | struct stopref sref = { mod, flags, forced }; | ||
| 763 | 771 | ||
| 764 | return stop_machine(__try_stop_module, &sref, NULL); | 772 | return 0; |
| 765 | } | 773 | } |
| 766 | 774 | ||
| 767 | unsigned long module_refcount(struct module *mod) | 775 | unsigned long module_refcount(struct module *mod) |
| 768 | { | 776 | { |
| 769 | return (unsigned long)atomic_read(&mod->refcnt); | 777 | return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE; |
| 770 | } | 778 | } |
| 771 | EXPORT_SYMBOL(module_refcount); | 779 | EXPORT_SYMBOL(module_refcount); |
| 772 | 780 | ||
| @@ -921,11 +929,11 @@ bool try_module_get(struct module *module) | |||
| 921 | 929 | ||
| 922 | if (module) { | 930 | if (module) { |
| 923 | preempt_disable(); | 931 | preempt_disable(); |
| 924 | 932 | /* Note: here, we can fail to get a reference */ | |
| 925 | if (likely(module_is_live(module))) { | 933 | if (likely(module_is_live(module) && |
| 926 | atomic_inc(&module->refcnt); | 934 | atomic_inc_not_zero(&module->refcnt) != 0)) |
| 927 | trace_module_get(module, _RET_IP_); | 935 | trace_module_get(module, _RET_IP_); |
| 928 | } else | 936 | else |
| 929 | ret = false; | 937 | ret = false; |
| 930 | 938 | ||
| 931 | preempt_enable(); | 939 | preempt_enable(); |
| @@ -936,9 +944,12 @@ EXPORT_SYMBOL(try_module_get); | |||
| 936 | 944 | ||
| 937 | void module_put(struct module *module) | 945 | void module_put(struct module *module) |
| 938 | { | 946 | { |
| 947 | int ret; | ||
| 948 | |||
| 939 | if (module) { | 949 | if (module) { |
| 940 | preempt_disable(); | 950 | preempt_disable(); |
| 941 | atomic_dec(&module->refcnt); | 951 | ret = atomic_dec_if_positive(&module->refcnt); |
| 952 | WARN_ON(ret < 0); /* Failed to put refcount */ | ||
| 942 | trace_module_put(module, _RET_IP_); | 953 | trace_module_put(module, _RET_IP_); |
| 943 | preempt_enable(); | 954 | preempt_enable(); |
| 944 | } | 955 | } |
