aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndi Kleen <andi@firstfloor.org>2008-08-30 04:09:00 -0400
committerRusty Russell <rusty@rustcorp.com.au>2008-10-21 19:00:22 -0400
commitd72b37513cdfbd3f53f3d485a8c403cc96d2c95f (patch)
treebdc0ad7aa94bc52290b42e325ac59c9140faf090 /kernel
parent5e458cc0f4770eea45d3c07110f01b3a94c72aa5 (diff)
Remove stop_machine during module load v2
Remove stop_machine during module load v2 module loading currently does a stop_machine on each module load to insert the module into the global module lists. Especially on larger systems this can be quite expensive. It does that to handle concurrent lock lessmodule list readers like kallsyms. I don't think stop_machine() is actually needed to insert something into a list though. There are no concurrent writers because the module mutex is taken. And the RCU list functions know how to insert a node into a list with the right memory ordering so that concurrent readers don't go off into the wood. So remove the stop_machine for the module list insert and just do a list_add_rcu() instead. Module removal will still do a stop_machine of course, it needs that for other reasons. v2: Revised readers based on Paul's comments. All readers that only rely on disabled preemption need to be changed to list_for_each_rcu(). Done that. The others are ok because they have the modules mutex. Also added a possible missing preempt disable for print_modules(). [cc Paul McKenney for review. It's not RCU, but quite similar.] Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/module.c47
1 files changed, 22 insertions, 25 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 3d256681ab64..c0f1826e2d9e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -42,6 +42,7 @@
42#include <linux/string.h> 42#include <linux/string.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/unwind.h> 44#include <linux/unwind.h>
45#include <linux/rculist.h>
45#include <asm/uaccess.h> 46#include <asm/uaccess.h>
46#include <asm/cacheflush.h> 47#include <asm/cacheflush.h>
47#include <linux/license.h> 48#include <linux/license.h>
@@ -63,7 +64,7 @@
63#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) 64#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
64 65
65/* List of modules, protected by module_mutex or preempt_disable 66/* List of modules, protected by module_mutex or preempt_disable
66 * (add/delete uses stop_machine). */ 67 * (delete uses stop_machine/add uses RCU list operations). */
67static DEFINE_MUTEX(module_mutex); 68static DEFINE_MUTEX(module_mutex);
68static LIST_HEAD(modules); 69static LIST_HEAD(modules);
69 70
@@ -241,7 +242,7 @@ static bool each_symbol(bool (*fn)(const struct symsearch *arr,
241 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) 242 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
242 return true; 243 return true;
243 244
244 list_for_each_entry(mod, &modules, list) { 245 list_for_each_entry_rcu(mod, &modules, list) {
245 struct symsearch arr[] = { 246 struct symsearch arr[] = {
246 { mod->syms, mod->syms + mod->num_syms, mod->crcs, 247 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
247 NOT_GPL_ONLY, false }, 248 NOT_GPL_ONLY, false },
@@ -1417,17 +1418,6 @@ static void mod_kobject_remove(struct module *mod)
1417} 1418}
1418 1419
1419/* 1420/*
1420 * link the module with the whole machine is stopped with interrupts off
1421 * - this defends against kallsyms not taking locks
1422 */
1423static int __link_module(void *_mod)
1424{
1425 struct module *mod = _mod;
1426 list_add(&mod->list, &modules);
1427 return 0;
1428}
1429
1430/*
1431 * unlink the module with the whole machine is stopped with interrupts off 1421 * unlink the module with the whole machine is stopped with interrupts off
1432 * - this defends against kallsyms not taking locks 1422 * - this defends against kallsyms not taking locks
1433 */ 1423 */
@@ -2239,9 +2229,13 @@ static noinline struct module *load_module(void __user *umod,
2239 mod->name); 2229 mod->name);
2240 2230
2241 /* Now sew it into the lists so we can get lockdep and oops 2231 /* Now sew it into the lists so we can get lockdep and oops
2242 * info during argument parsing. Noone should access us, since 2232 * info during argument parsing. Noone should access us, since
2243 * strong_try_module_get() will fail. */ 2233 * strong_try_module_get() will fail.
2244 stop_machine(__link_module, mod, NULL); 2234 * lockdep/oops can run asynchronous, so use the RCU list insertion
2235 * function to insert in a way safe to concurrent readers.
2236 * The mutex protects against concurrent writers.
2237 */
2238 list_add_rcu(&mod->list, &modules);
2245 2239
2246 err = parse_args(mod->name, mod->args, kp, num_kp, NULL); 2240 err = parse_args(mod->name, mod->args, kp, num_kp, NULL);
2247 if (err < 0) 2241 if (err < 0)
@@ -2436,7 +2430,7 @@ const char *module_address_lookup(unsigned long addr,
2436 const char *ret = NULL; 2430 const char *ret = NULL;
2437 2431
2438 preempt_disable(); 2432 preempt_disable();
2439 list_for_each_entry(mod, &modules, list) { 2433 list_for_each_entry_rcu(mod, &modules, list) {
2440 if (within(addr, mod->module_init, mod->init_size) 2434 if (within(addr, mod->module_init, mod->init_size)
2441 || within(addr, mod->module_core, mod->core_size)) { 2435 || within(addr, mod->module_core, mod->core_size)) {
2442 if (modname) 2436 if (modname)
@@ -2459,7 +2453,7 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
2459 struct module *mod; 2453 struct module *mod;
2460 2454
2461 preempt_disable(); 2455 preempt_disable();
2462 list_for_each_entry(mod, &modules, list) { 2456 list_for_each_entry_rcu(mod, &modules, list) {
2463 if (within(addr, mod->module_init, mod->init_size) || 2457 if (within(addr, mod->module_init, mod->init_size) ||
2464 within(addr, mod->module_core, mod->core_size)) { 2458 within(addr, mod->module_core, mod->core_size)) {
2465 const char *sym; 2459 const char *sym;
@@ -2483,7 +2477,7 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
2483 struct module *mod; 2477 struct module *mod;
2484 2478
2485 preempt_disable(); 2479 preempt_disable();
2486 list_for_each_entry(mod, &modules, list) { 2480 list_for_each_entry_rcu(mod, &modules, list) {
2487 if (within(addr, mod->module_init, mod->init_size) || 2481 if (within(addr, mod->module_init, mod->init_size) ||
2488 within(addr, mod->module_core, mod->core_size)) { 2482 within(addr, mod->module_core, mod->core_size)) {
2489 const char *sym; 2483 const char *sym;
@@ -2510,7 +2504,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2510 struct module *mod; 2504 struct module *mod;
2511 2505
2512 preempt_disable(); 2506 preempt_disable();
2513 list_for_each_entry(mod, &modules, list) { 2507 list_for_each_entry_rcu(mod, &modules, list) {
2514 if (symnum < mod->num_symtab) { 2508 if (symnum < mod->num_symtab) {
2515 *value = mod->symtab[symnum].st_value; 2509 *value = mod->symtab[symnum].st_value;
2516 *type = mod->symtab[symnum].st_info; 2510 *type = mod->symtab[symnum].st_info;
@@ -2553,7 +2547,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
2553 ret = mod_find_symname(mod, colon+1); 2547 ret = mod_find_symname(mod, colon+1);
2554 *colon = ':'; 2548 *colon = ':';
2555 } else { 2549 } else {
2556 list_for_each_entry(mod, &modules, list) 2550 list_for_each_entry_rcu(mod, &modules, list)
2557 if ((ret = mod_find_symname(mod, name)) != 0) 2551 if ((ret = mod_find_symname(mod, name)) != 0)
2558 break; 2552 break;
2559 } 2553 }
@@ -2656,7 +2650,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
2656 struct module *mod; 2650 struct module *mod;
2657 2651
2658 preempt_disable(); 2652 preempt_disable();
2659 list_for_each_entry(mod, &modules, list) { 2653 list_for_each_entry_rcu(mod, &modules, list) {
2660 if (mod->num_exentries == 0) 2654 if (mod->num_exentries == 0)
2661 continue; 2655 continue;
2662 2656
@@ -2682,7 +2676,7 @@ int is_module_address(unsigned long addr)
2682 2676
2683 preempt_disable(); 2677 preempt_disable();
2684 2678
2685 list_for_each_entry(mod, &modules, list) { 2679 list_for_each_entry_rcu(mod, &modules, list) {
2686 if (within(addr, mod->module_core, mod->core_size)) { 2680 if (within(addr, mod->module_core, mod->core_size)) {
2687 preempt_enable(); 2681 preempt_enable();
2688 return 1; 2682 return 1;
@@ -2703,7 +2697,7 @@ struct module *__module_text_address(unsigned long addr)
2703 if (addr < module_addr_min || addr > module_addr_max) 2697 if (addr < module_addr_min || addr > module_addr_max)
2704 return NULL; 2698 return NULL;
2705 2699
2706 list_for_each_entry(mod, &modules, list) 2700 list_for_each_entry_rcu(mod, &modules, list)
2707 if (within(addr, mod->module_init, mod->init_text_size) 2701 if (within(addr, mod->module_init, mod->init_text_size)
2708 || within(addr, mod->module_core, mod->core_text_size)) 2702 || within(addr, mod->module_core, mod->core_text_size))
2709 return mod; 2703 return mod;
@@ -2728,8 +2722,11 @@ void print_modules(void)
2728 char buf[8]; 2722 char buf[8];
2729 2723
2730 printk("Modules linked in:"); 2724 printk("Modules linked in:");
2731 list_for_each_entry(mod, &modules, list) 2725 /* Most callers should already have preempt disabled, but make sure */
2726 preempt_disable();
2727 list_for_each_entry_rcu(mod, &modules, list)
2732 printk(" %s%s", mod->name, module_flags(mod, buf)); 2728 printk(" %s%s", mod->name, module_flags(mod, buf));
2729 preempt_enable();
2733 if (last_unloaded_module[0]) 2730 if (last_unloaded_module[0])
2734 printk(" [last unloaded: %s]", last_unloaded_module); 2731 printk(" [last unloaded: %s]", last_unloaded_module);
2735 printk("\n"); 2732 printk("\n");