aboutsummaryrefslogtreecommitdiffstats
path: root/lib/bug.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 23:55:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 23:55:41 -0500
commitd790be3863b28fd22e0781c1a3ddefcbfd5f7086 (patch)
tree56a9f83b66f336df73ff81d13a14a2d63ed4c0db /lib/bug.c
parent64ec45bff6b3dade2643ed4c0f688a15ecf46ea2 (diff)
parentb0a65b0cccd477b2fd8b7adad0ac39433df54829 (diff)
Merge tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull module updates from Rusty Russell: "The exciting thing here is the getting rid of stop_machine on module removal. This is possible by using a simple atomic_t for the counter, rather than our fancy per-cpu counter: it turns out that no one is doing a module increment per net packet, so the slowdown should be in the noise" * tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: param: do not set store func without write perm params: cleanup sysfs allocation kernel:module Fix coding style errors and warnings. module: Remove stop_machine from module unloading module: Replace module_ref with atomic_t refcnt lib/bug: Use RCU list ops for module_bug_list module: Unlink module with RCU synchronizing instead of stop_machine module: Wait for RCU synchronizing before releasing a module
Diffstat (limited to 'lib/bug.c')
-rw-r--r--lib/bug.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/lib/bug.c b/lib/bug.c
index d1d7c7878900..0c3bd9552b6f 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -64,16 +64,22 @@ static LIST_HEAD(module_bug_list);
64static const struct bug_entry *module_find_bug(unsigned long bugaddr) 64static const struct bug_entry *module_find_bug(unsigned long bugaddr)
65{ 65{
66 struct module *mod; 66 struct module *mod;
67 const struct bug_entry *bug = NULL;
67 68
68 list_for_each_entry(mod, &module_bug_list, bug_list) { 69 rcu_read_lock();
69 const struct bug_entry *bug = mod->bug_table; 70 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
70 unsigned i; 71 unsigned i;
71 72
73 bug = mod->bug_table;
72 for (i = 0; i < mod->num_bugs; ++i, ++bug) 74 for (i = 0; i < mod->num_bugs; ++i, ++bug)
73 if (bugaddr == bug_addr(bug)) 75 if (bugaddr == bug_addr(bug))
74 return bug; 76 goto out;
75 } 77 }
76 return NULL; 78 bug = NULL;
79out:
80 rcu_read_unlock();
81
82 return bug;
77} 83}
78 84
79void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 85void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
@@ -99,13 +105,15 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
99 * Strictly speaking this should have a spinlock to protect against 105 * Strictly speaking this should have a spinlock to protect against
100 * traversals, but since we only traverse on BUG()s, a spinlock 106 * traversals, but since we only traverse on BUG()s, a spinlock
101 * could potentially lead to deadlock and thus be counter-productive. 107 * could potentially lead to deadlock and thus be counter-productive.
108 * Thus, this uses RCU to safely manipulate the bug list, since BUG
109 * must run in non-interruptive state.
102 */ 110 */
103 list_add(&mod->bug_list, &module_bug_list); 111 list_add_rcu(&mod->bug_list, &module_bug_list);
104} 112}
105 113
106void module_bug_cleanup(struct module *mod) 114void module_bug_cleanup(struct module *mod)
107{ 115{
108 list_del(&mod->bug_list); 116 list_del_rcu(&mod->bug_list);
109} 117}
110 118
111#else 119#else