diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-05-26 21:39:35 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2015-05-27 22:01:52 -0400 |
commit | 0be964be0d45084245673c971d72a4b51690231d (patch) | |
tree | c3d0f0497d325c28f344adda9c3305e75e550c60 | |
parent | bed831f9a251968272dae10a83b512c7db256ef0 (diff) |
module: Sanitize RCU usage and locking
Currently the RCU usage in module is an inconsistent mess of RCU and
RCU-sched, this is broken for CONFIG_PREEMPT where synchronize_rcu()
does not imply synchronize_sched().
Most usage sites use preempt_{dis,en}able() which is RCU-sched, but
(most of) the modification sites use synchronize_rcu(). With the
exception of the module bug list, which actually uses RCU.
Convert everything over to RCU-sched.
Furthermore add lockdep asserts to all sites, because it's not at all
clear to me the required locking is observed, esp. on exported
functions.
Cc: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | include/linux/module.h | 12 | ||||
-rw-r--r-- | kernel/module.c | 40 | ||||
-rw-r--r-- | lib/bug.c | 7 |
3 files changed, 47 insertions, 12 deletions
diff --git a/include/linux/module.h b/include/linux/module.h index c883b86ea964..fb56dd85a862 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -421,14 +421,22 @@ struct symsearch { | |||
421 | bool unused; | 421 | bool unused; |
422 | }; | 422 | }; |
423 | 423 | ||
424 | /* Search for an exported symbol by name. */ | 424 | /* |
425 | * Search for an exported symbol by name. | ||
426 | * | ||
427 | * Must be called with module_mutex held or preemption disabled. | ||
428 | */ | ||
425 | const struct kernel_symbol *find_symbol(const char *name, | 429 | const struct kernel_symbol *find_symbol(const char *name, |
426 | struct module **owner, | 430 | struct module **owner, |
427 | const unsigned long **crc, | 431 | const unsigned long **crc, |
428 | bool gplok, | 432 | bool gplok, |
429 | bool warn); | 433 | bool warn); |
430 | 434 | ||
431 | /* Walk the exported symbol table */ | 435 | /* |
436 | * Walk the exported symbol table | ||
437 | * | ||
438 | * Must be called with module_mutex held or preemption disabled. | ||
439 | */ | ||
432 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, | 440 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, |
433 | struct module *owner, | 441 | struct module *owner, |
434 | void *data), void *data); | 442 | void *data), void *data); |
diff --git a/kernel/module.c b/kernel/module.c index 1150d5239205..a15899e00ca9 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -105,6 +105,22 @@ static LIST_HEAD(modules); | |||
105 | struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ | 105 | struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ |
106 | #endif /* CONFIG_KGDB_KDB */ | 106 | #endif /* CONFIG_KGDB_KDB */ |
107 | 107 | ||
108 | static void module_assert_mutex(void) | ||
109 | { | ||
110 | lockdep_assert_held(&module_mutex); | ||
111 | } | ||
112 | |||
113 | static void module_assert_mutex_or_preempt(void) | ||
114 | { | ||
115 | #ifdef CONFIG_LOCKDEP | ||
116 | if (unlikely(!debug_locks)) | ||
117 | return; | ||
118 | |||
119 | WARN_ON(!rcu_read_lock_sched_held() && | ||
120 | !lockdep_is_held(&module_mutex)); | ||
121 | #endif | ||
122 | } | ||
123 | |||
108 | #ifdef CONFIG_MODULE_SIG | 124 | #ifdef CONFIG_MODULE_SIG |
109 | #ifdef CONFIG_MODULE_SIG_FORCE | 125 | #ifdef CONFIG_MODULE_SIG_FORCE |
110 | static bool sig_enforce = true; | 126 | static bool sig_enforce = true; |
@@ -318,6 +334,8 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, | |||
318 | #endif | 334 | #endif |
319 | }; | 335 | }; |
320 | 336 | ||
337 | module_assert_mutex_or_preempt(); | ||
338 | |||
321 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) | 339 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) |
322 | return true; | 340 | return true; |
323 | 341 | ||
@@ -457,6 +475,8 @@ static struct module *find_module_all(const char *name, size_t len, | |||
457 | { | 475 | { |
458 | struct module *mod; | 476 | struct module *mod; |
459 | 477 | ||
478 | module_assert_mutex(); | ||
479 | |||
460 | list_for_each_entry(mod, &modules, list) { | 480 | list_for_each_entry(mod, &modules, list) { |
461 | if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) | 481 | if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) |
462 | continue; | 482 | continue; |
@@ -1860,8 +1880,8 @@ static void free_module(struct module *mod) | |||
1860 | list_del_rcu(&mod->list); | 1880 | list_del_rcu(&mod->list); |
1861 | /* Remove this module from bug list, this uses list_del_rcu */ | 1881 | /* Remove this module from bug list, this uses list_del_rcu */ |
1862 | module_bug_cleanup(mod); | 1882 | module_bug_cleanup(mod); |
1863 | /* Wait for RCU synchronizing before releasing mod->list and buglist. */ | 1883 | /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ |
1864 | synchronize_rcu(); | 1884 | synchronize_sched(); |
1865 | mutex_unlock(&module_mutex); | 1885 | mutex_unlock(&module_mutex); |
1866 | 1886 | ||
1867 | /* This may be NULL, but that's OK */ | 1887 | /* This may be NULL, but that's OK */ |
@@ -3133,11 +3153,11 @@ static noinline int do_init_module(struct module *mod) | |||
3133 | mod->init_text_size = 0; | 3153 | mod->init_text_size = 0; |
3134 | /* | 3154 | /* |
3135 | * We want to free module_init, but be aware that kallsyms may be | 3155 | * We want to free module_init, but be aware that kallsyms may be |
3136 | * walking this with preempt disabled. In all the failure paths, | 3156 | * walking this with preempt disabled. In all the failure paths, we |
3137 | * we call synchronize_rcu/synchronize_sched, but we don't want | 3157 | * call synchronize_sched(), but we don't want to slow down the success |
3138 | * to slow down the success path, so use actual RCU here. | 3158 | * path, so use actual RCU here. |
3139 | */ | 3159 | */ |
3140 | call_rcu(&freeinit->rcu, do_free_init); | 3160 | call_rcu_sched(&freeinit->rcu, do_free_init); |
3141 | mutex_unlock(&module_mutex); | 3161 | mutex_unlock(&module_mutex); |
3142 | wake_up_all(&module_wq); | 3162 | wake_up_all(&module_wq); |
3143 | 3163 | ||
@@ -3395,8 +3415,8 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3395 | /* Unlink carefully: kallsyms could be walking list. */ | 3415 | /* Unlink carefully: kallsyms could be walking list. */ |
3396 | list_del_rcu(&mod->list); | 3416 | list_del_rcu(&mod->list); |
3397 | wake_up_all(&module_wq); | 3417 | wake_up_all(&module_wq); |
3398 | /* Wait for RCU synchronizing before releasing mod->list. */ | 3418 | /* Wait for RCU-sched synchronizing before releasing mod->list. */ |
3399 | synchronize_rcu(); | 3419 | synchronize_sched(); |
3400 | mutex_unlock(&module_mutex); | 3420 | mutex_unlock(&module_mutex); |
3401 | free_module: | 3421 | free_module: |
3402 | /* Free lock-classes; relies on the preceding sync_rcu() */ | 3422 | /* Free lock-classes; relies on the preceding sync_rcu() */ |
@@ -3663,6 +3683,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, | |||
3663 | unsigned int i; | 3683 | unsigned int i; |
3664 | int ret; | 3684 | int ret; |
3665 | 3685 | ||
3686 | module_assert_mutex(); | ||
3687 | |||
3666 | list_for_each_entry(mod, &modules, list) { | 3688 | list_for_each_entry(mod, &modules, list) { |
3667 | if (mod->state == MODULE_STATE_UNFORMED) | 3689 | if (mod->state == MODULE_STATE_UNFORMED) |
3668 | continue; | 3690 | continue; |
@@ -3837,6 +3859,8 @@ struct module *__module_address(unsigned long addr) | |||
3837 | if (addr < module_addr_min || addr > module_addr_max) | 3859 | if (addr < module_addr_min || addr > module_addr_max) |
3838 | return NULL; | 3860 | return NULL; |
3839 | 3861 | ||
3862 | module_assert_mutex_or_preempt(); | ||
3863 | |||
3840 | list_for_each_entry_rcu(mod, &modules, list) { | 3864 | list_for_each_entry_rcu(mod, &modules, list) { |
3841 | if (mod->state == MODULE_STATE_UNFORMED) | 3865 | if (mod->state == MODULE_STATE_UNFORMED) |
3842 | continue; | 3866 | continue; |
@@ -66,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) | |||
66 | struct module *mod; | 66 | struct module *mod; |
67 | const struct bug_entry *bug = NULL; | 67 | const struct bug_entry *bug = NULL; |
68 | 68 | ||
69 | rcu_read_lock(); | 69 | rcu_read_lock_sched(); |
70 | list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { | 70 | list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { |
71 | unsigned i; | 71 | unsigned i; |
72 | 72 | ||
@@ -77,7 +77,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) | |||
77 | } | 77 | } |
78 | bug = NULL; | 78 | bug = NULL; |
79 | out: | 79 | out: |
80 | rcu_read_unlock(); | 80 | rcu_read_unlock_sched(); |
81 | 81 | ||
82 | return bug; | 82 | return bug; |
83 | } | 83 | } |
@@ -88,6 +88,8 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
88 | char *secstrings; | 88 | char *secstrings; |
89 | unsigned int i; | 89 | unsigned int i; |
90 | 90 | ||
91 | lockdep_assert_held(&module_mutex); | ||
92 | |||
91 | mod->bug_table = NULL; | 93 | mod->bug_table = NULL; |
92 | mod->num_bugs = 0; | 94 | mod->num_bugs = 0; |
93 | 95 | ||
@@ -113,6 +115,7 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
113 | 115 | ||
114 | void module_bug_cleanup(struct module *mod) | 116 | void module_bug_cleanup(struct module *mod) |
115 | { | 117 | { |
118 | lockdep_assert_held(&module_mutex); | ||
116 | list_del_rcu(&mod->bug_list); | 119 | list_del_rcu(&mod->bug_list); |
117 | } | 120 | } |
118 | 121 | ||