aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/module.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 23:55:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 23:55:41 -0500
commitd790be3863b28fd22e0781c1a3ddefcbfd5f7086 (patch)
tree56a9f83b66f336df73ff81d13a14a2d63ed4c0db /include/linux/module.h
parent64ec45bff6b3dade2643ed4c0f688a15ecf46ea2 (diff)
parentb0a65b0cccd477b2fd8b7adad0ac39433df54829 (diff)
Merge tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull module updates from Rusty Russell: "The exciting thing here is the getting rid of stop_machine on module removal. This is possible by using a simple atomic_t for the counter, rather than our fancy per-cpu counter: it turns out that no one is doing a module increment per net packet, so the slowdown should be in the noise" * tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: param: do not set store func without write perm params: cleanup sysfs allocation kernel:module Fix coding style errors and warnings. module: Remove stop_machine from module unloading module: Replace module_ref with atomic_t refcnt lib/bug: Use RCU list ops for module_bug_list module: Unlink module with RCU synchronizing instead of stop_machine module: Wait for RCU synchronizing before releasing a module
Diffstat (limited to 'include/linux/module.h')
-rw-r--r--include/linux/module.h16
1 files changed, 1 insertions, 15 deletions
diff --git a/include/linux/module.h b/include/linux/module.h
index 71f282a4e307..ebfb0e153c6a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -210,20 +210,6 @@ enum module_state {
210 MODULE_STATE_UNFORMED, /* Still setting it up. */ 210 MODULE_STATE_UNFORMED, /* Still setting it up. */
211}; 211};
212 212
213/**
214 * struct module_ref - per cpu module reference counts
215 * @incs: number of module get on this cpu
216 * @decs: number of module put on this cpu
217 *
218 * We force an alignment on 8 or 16 bytes, so that alloc_percpu()
219 * put @incs/@decs in same cache line, with no extra memory cost,
220 * since alloc_percpu() is fine grained.
221 */
222struct module_ref {
223 unsigned long incs;
224 unsigned long decs;
225} __attribute((aligned(2 * sizeof(unsigned long))));
226
227struct module { 213struct module {
228 enum module_state state; 214 enum module_state state;
229 215
@@ -367,7 +353,7 @@ struct module {
367 /* Destruction function. */ 353 /* Destruction function. */
368 void (*exit)(void); 354 void (*exit)(void);
369 355
370 struct module_ref __percpu *refptr; 356 atomic_t refcnt;
371#endif 357#endif
372 358
373#ifdef CONFIG_CONSTRUCTORS 359#ifdef CONFIG_CONSTRUCTORS