diff options
author | Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> | 2014-11-09 17:59:29 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2014-11-11 01:37:46 -0500 |
commit | 2f35c41f58a978dfa44ffa102249d556caa99eeb (patch) | |
tree | 871fdf31787639112af15e3532bb6617660f2e47 /kernel | |
parent | 0286b5ea125e58b4797747f688949c05394412e8 (diff) |
module: Replace module_ref with atomic_t refcnt
Replace module_ref per-cpu complex reference counter with
an atomic_t simple refcnt. This is for code simplification.
Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/module.c | 39 |
1 files changed, 5 insertions, 34 deletions
diff --git a/kernel/module.c b/kernel/module.c index d596a306b0a1..b1d485df5ac1 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -631,15 +631,11 @@ EXPORT_TRACEPOINT_SYMBOL(module_get); | |||
631 | /* Init the unload section of the module. */ | 631 | /* Init the unload section of the module. */ |
632 | static int module_unload_init(struct module *mod) | 632 | static int module_unload_init(struct module *mod) |
633 | { | 633 | { |
634 | mod->refptr = alloc_percpu(struct module_ref); | ||
635 | if (!mod->refptr) | ||
636 | return -ENOMEM; | ||
637 | |||
638 | INIT_LIST_HEAD(&mod->source_list); | 634 | INIT_LIST_HEAD(&mod->source_list); |
639 | INIT_LIST_HEAD(&mod->target_list); | 635 | INIT_LIST_HEAD(&mod->target_list); |
640 | 636 | ||
641 | /* Hold reference count during initialization. */ | 637 | /* Hold reference count during initialization. */ |
642 | raw_cpu_write(mod->refptr->incs, 1); | 638 | atomic_set(&mod->refcnt, 1); |
643 | 639 | ||
644 | return 0; | 640 | return 0; |
645 | } | 641 | } |
@@ -721,8 +717,6 @@ static void module_unload_free(struct module *mod) | |||
721 | kfree(use); | 717 | kfree(use); |
722 | } | 718 | } |
723 | mutex_unlock(&module_mutex); | 719 | mutex_unlock(&module_mutex); |
724 | |||
725 | free_percpu(mod->refptr); | ||
726 | } | 720 | } |
727 | 721 | ||
728 | #ifdef CONFIG_MODULE_FORCE_UNLOAD | 722 | #ifdef CONFIG_MODULE_FORCE_UNLOAD |
@@ -772,28 +766,7 @@ static int try_stop_module(struct module *mod, int flags, int *forced) | |||
772 | 766 | ||
773 | unsigned long module_refcount(struct module *mod) | 767 | unsigned long module_refcount(struct module *mod) |
774 | { | 768 | { |
775 | unsigned long incs = 0, decs = 0; | 769 | return (unsigned long)atomic_read(&mod->refcnt); |
776 | int cpu; | ||
777 | |||
778 | for_each_possible_cpu(cpu) | ||
779 | decs += per_cpu_ptr(mod->refptr, cpu)->decs; | ||
780 | /* | ||
781 | * ensure the incs are added up after the decs. | ||
782 | * module_put ensures incs are visible before decs with smp_wmb. | ||
783 | * | ||
784 | * This 2-count scheme avoids the situation where the refcount | ||
785 | * for CPU0 is read, then CPU0 increments the module refcount, | ||
786 | * then CPU1 drops that refcount, then the refcount for CPU1 is | ||
787 | * read. We would record a decrement but not its corresponding | ||
788 | * increment so we would see a low count (disaster). | ||
789 | * | ||
790 | * Rare situation? But module_refcount can be preempted, and we | ||
791 | * might be tallying up 4096+ CPUs. So it is not impossible. | ||
792 | */ | ||
793 | smp_rmb(); | ||
794 | for_each_possible_cpu(cpu) | ||
795 | incs += per_cpu_ptr(mod->refptr, cpu)->incs; | ||
796 | return incs - decs; | ||
797 | } | 770 | } |
798 | EXPORT_SYMBOL(module_refcount); | 771 | EXPORT_SYMBOL(module_refcount); |
799 | 772 | ||
@@ -935,7 +908,7 @@ void __module_get(struct module *module) | |||
935 | { | 908 | { |
936 | if (module) { | 909 | if (module) { |
937 | preempt_disable(); | 910 | preempt_disable(); |
938 | __this_cpu_inc(module->refptr->incs); | 911 | atomic_inc(&module->refcnt); |
939 | trace_module_get(module, _RET_IP_); | 912 | trace_module_get(module, _RET_IP_); |
940 | preempt_enable(); | 913 | preempt_enable(); |
941 | } | 914 | } |
@@ -950,7 +923,7 @@ bool try_module_get(struct module *module) | |||
950 | preempt_disable(); | 923 | preempt_disable(); |
951 | 924 | ||
952 | if (likely(module_is_live(module))) { | 925 | if (likely(module_is_live(module))) { |
953 | __this_cpu_inc(module->refptr->incs); | 926 | atomic_inc(&module->refcnt); |
954 | trace_module_get(module, _RET_IP_); | 927 | trace_module_get(module, _RET_IP_); |
955 | } else | 928 | } else |
956 | ret = false; | 929 | ret = false; |
@@ -965,9 +938,7 @@ void module_put(struct module *module) | |||
965 | { | 938 | { |
966 | if (module) { | 939 | if (module) { |
967 | preempt_disable(); | 940 | preempt_disable(); |
968 | smp_wmb(); /* see comment in module_refcount */ | 941 | atomic_dec(&module->refcnt); |
969 | __this_cpu_inc(module->refptr->decs); | ||
970 | |||
971 | trace_module_put(module, _RET_IP_); | 942 | trace_module_put(module, _RET_IP_); |
972 | preempt_enable(); | 943 | preempt_enable(); |
973 | } | 944 | } |