aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/module.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2012-09-28 01:01:03 -0400
committerRusty Russell <rusty@rustcorp.com.au>2012-09-28 01:01:03 -0400
commit9bb9c3be56834653878f766f471fa1c20e562f4c (patch)
tree5b84043b800db520f551c86f10418e2e2a852cf0 /kernel/module.c
parent6f13909f4fe9652f189b462c6c98767309000321 (diff)
module: wait when loading a module which is currently initializing.
The original module-init-tools module loader used a fnctl lock on the .ko file to avoid attempts to simultaneously load a module. Unfortunately, you can't get an exclusive fcntl lock on a read-only fd, making this not work for read-only mounted filesystems. module-init-tools has a hacky sleep-and-loop for this now. It's not that hard to wait in the kernel, and only return -EEXIST once the first module has finished loading (or continue loading the module if the first one failed to initialize for some reason). It's also consistent with what we do for dependent modules which are still loading. Suggested-by: Lucas De Marchi <lucas.demarchi@profusion.mobi> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 63cf6e7f1394..74bc19562ca3 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2845,6 +2845,20 @@ static int post_relocation(struct module *mod, const struct load_info *info)
2845 return module_finalize(info->hdr, info->sechdrs, mod); 2845 return module_finalize(info->hdr, info->sechdrs, mod);
2846} 2846}
2847 2847
2848/* Is this module of this name done loading? No locks held. */
2849static bool finished_loading(const char *name)
2850{
2851 struct module *mod;
2852 bool ret;
2853
2854 mutex_lock(&module_mutex);
2855 mod = find_module(name);
2856 ret = !mod || mod->state != MODULE_STATE_COMING;
2857 mutex_unlock(&module_mutex);
2858
2859 return ret;
2860}
2861
2848/* Allocate and load the module: note that size of section 0 is always 2862/* Allocate and load the module: note that size of section 0 is always
2849 zero, and we rely on this for optional sections. */ 2863 zero, and we rely on this for optional sections. */
2850static struct module *load_module(void __user *umod, 2864static struct module *load_module(void __user *umod,
@@ -2852,7 +2866,7 @@ static struct module *load_module(void __user *umod,
2852 const char __user *uargs) 2866 const char __user *uargs)
2853{ 2867{
2854 struct load_info info = { NULL, }; 2868 struct load_info info = { NULL, };
2855 struct module *mod; 2869 struct module *mod, *old;
2856 long err; 2870 long err;
2857 2871
2858 pr_debug("load_module: umod=%p, len=%lu, uargs=%p\n", 2872 pr_debug("load_module: umod=%p, len=%lu, uargs=%p\n",
@@ -2918,8 +2932,18 @@ static struct module *load_module(void __user *umod,
2918 * function to insert in a way safe to concurrent readers. 2932 * function to insert in a way safe to concurrent readers.
2919 * The mutex protects against concurrent writers. 2933 * The mutex protects against concurrent writers.
2920 */ 2934 */
2935again:
2921 mutex_lock(&module_mutex); 2936 mutex_lock(&module_mutex);
2922 if (find_module(mod->name)) { 2937 if ((old = find_module(mod->name)) != NULL) {
2938 if (old->state == MODULE_STATE_COMING) {
2939 /* Wait in case it fails to load. */
2940 mutex_unlock(&module_mutex);
2941 err = wait_event_interruptible(module_wq,
2942 finished_loading(mod->name));
2943 if (err)
2944 goto free_arch_cleanup;
2945 goto again;
2946 }
2923 err = -EEXIST; 2947 err = -EEXIST;
2924 goto unlock; 2948 goto unlock;
2925 } 2949 }