diff options
author | Dan Magenheimer <dan.magenheimer@oracle.com> | 2013-04-30 18:27:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 20:04:01 -0400 |
commit | 10a7a0771399a57a297fca9615450dbb3f88081a (patch) | |
tree | 7c8564daf7ce2698277a4a38edb9ef0f636cb429 /drivers/xen/tmem.c | |
parent | ff610a1d55da22bf95bbc6a8b193e052169b34b7 (diff) |
xen: tmem: enable Xen tmem shim to be built/loaded as a module
Allow Xen tmem shim to be built/loaded as a module. Xen self-ballooning
and frontswap-selfshrinking are now also "lazily" initialized when the
Xen tmem shim is loaded as a module, unless explicitly disabled by
module parameters.
Note runtime dependency disallows loading if cleancache/frontswap lazy
initialization patches are not present.
If built-in (not built as a module), the original mechanism of enabling
via a kernel boot parameter is retained, but this should be considered
deprecated.
Note that module unload is explicitly not yet supported.
[v1: Removed the [CLEANCACHE|FRONTSWAP]_HAS_LAZY_INIT ifdef]
[v2: Squashed the xen/tmem: Remove the subsys call patch in]
[akpm@linux-foundation.org: fix build (disable_frontswap_selfshrinking undeclared)]
Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Bob Liu <lliubbo@gmail.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Andor Daam <andor.daam@googlemail.com>
Cc: Florian Schmaus <fschmaus@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Stefan Hengelein <ilendir@googlemail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/xen/tmem.c')
-rw-r--r-- | drivers/xen/tmem.c | 40 |
1 files changed, 31 insertions, 9 deletions
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index fd79eab08368..8adde8e8428c 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * Author: Dan Magenheimer | 5 | * Author: Dan Magenheimer |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/module.h> | ||
8 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
9 | #include <linux/types.h> | 10 | #include <linux/types.h> |
10 | #include <linux/init.h> | 11 | #include <linux/init.h> |
@@ -128,6 +129,7 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid) | |||
128 | return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); | 129 | return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); |
129 | } | 130 | } |
130 | 131 | ||
132 | #ifndef CONFIG_XEN_TMEM_MODULE | ||
131 | bool __read_mostly tmem_enabled = false; | 133 | bool __read_mostly tmem_enabled = false; |
132 | 134 | ||
133 | static int __init enable_tmem(char *s) | 135 | static int __init enable_tmem(char *s) |
@@ -136,6 +138,7 @@ static int __init enable_tmem(char *s) | |||
136 | return 1; | 138 | return 1; |
137 | } | 139 | } |
138 | __setup("tmem", enable_tmem); | 140 | __setup("tmem", enable_tmem); |
141 | #endif | ||
139 | 142 | ||
140 | #ifdef CONFIG_CLEANCACHE | 143 | #ifdef CONFIG_CLEANCACHE |
141 | static int xen_tmem_destroy_pool(u32 pool_id) | 144 | static int xen_tmem_destroy_pool(u32 pool_id) |
@@ -227,14 +230,19 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize) | |||
227 | return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); | 230 | return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); |
228 | } | 231 | } |
229 | 232 | ||
230 | static bool __initdata use_cleancache = true; | 233 | static bool disable_cleancache __read_mostly; |
231 | 234 | static bool disable_selfballooning __read_mostly; | |
235 | #ifdef CONFIG_XEN_TMEM_MODULE | ||
236 | module_param(disable_cleancache, bool, S_IRUGO); | ||
237 | module_param(disable_selfballooning, bool, S_IRUGO); | ||
238 | #else | ||
232 | static int __init no_cleancache(char *s) | 239 | static int __init no_cleancache(char *s) |
233 | { | 240 | { |
234 | use_cleancache = false; | 241 | disable_cleancache = true; |
235 | return 1; | 242 | return 1; |
236 | } | 243 | } |
237 | __setup("nocleancache", no_cleancache); | 244 | __setup("nocleancache", no_cleancache); |
245 | #endif | ||
238 | 246 | ||
239 | static struct cleancache_ops tmem_cleancache_ops = { | 247 | static struct cleancache_ops tmem_cleancache_ops = { |
240 | .put_page = tmem_cleancache_put_page, | 248 | .put_page = tmem_cleancache_put_page, |
@@ -353,14 +361,19 @@ static void tmem_frontswap_init(unsigned ignored) | |||
353 | xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); | 361 | xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); |
354 | } | 362 | } |
355 | 363 | ||
356 | static bool __initdata use_frontswap = true; | 364 | static bool disable_frontswap __read_mostly; |
357 | 365 | static bool disable_frontswap_selfshrinking __read_mostly; | |
366 | #ifdef CONFIG_XEN_TMEM_MODULE | ||
367 | module_param(disable_frontswap, bool, S_IRUGO); | ||
368 | module_param(disable_frontswap_selfshrinking, bool, S_IRUGO); | ||
369 | #else | ||
358 | static int __init no_frontswap(char *s) | 370 | static int __init no_frontswap(char *s) |
359 | { | 371 | { |
360 | use_frontswap = false; | 372 | disable_frontswap = true; |
361 | return 1; | 373 | return 1; |
362 | } | 374 | } |
363 | __setup("nofrontswap", no_frontswap); | 375 | __setup("nofrontswap", no_frontswap); |
376 | #endif | ||
364 | 377 | ||
365 | static struct frontswap_ops tmem_frontswap_ops = { | 378 | static struct frontswap_ops tmem_frontswap_ops = { |
366 | .store = tmem_frontswap_store, | 379 | .store = tmem_frontswap_store, |
@@ -369,14 +382,16 @@ static struct frontswap_ops tmem_frontswap_ops = { | |||
369 | .invalidate_area = tmem_frontswap_flush_area, | 382 | .invalidate_area = tmem_frontswap_flush_area, |
370 | .init = tmem_frontswap_init | 383 | .init = tmem_frontswap_init |
371 | }; | 384 | }; |
385 | #else /* CONFIG_FRONTSWAP */ | ||
386 | #define disable_frontswap_selfshrinking 1 | ||
372 | #endif | 387 | #endif |
373 | 388 | ||
374 | static int __init xen_tmem_init(void) | 389 | static int xen_tmem_init(void) |
375 | { | 390 | { |
376 | if (!xen_domain()) | 391 | if (!xen_domain()) |
377 | return 0; | 392 | return 0; |
378 | #ifdef CONFIG_FRONTSWAP | 393 | #ifdef CONFIG_FRONTSWAP |
379 | if (tmem_enabled && use_frontswap) { | 394 | if (tmem_enabled && !disable_frontswap) { |
380 | char *s = ""; | 395 | char *s = ""; |
381 | struct frontswap_ops *old_ops = | 396 | struct frontswap_ops *old_ops = |
382 | frontswap_register_ops(&tmem_frontswap_ops); | 397 | frontswap_register_ops(&tmem_frontswap_ops); |
@@ -390,7 +405,7 @@ static int __init xen_tmem_init(void) | |||
390 | #endif | 405 | #endif |
391 | #ifdef CONFIG_CLEANCACHE | 406 | #ifdef CONFIG_CLEANCACHE |
392 | BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); | 407 | BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); |
393 | if (tmem_enabled && use_cleancache) { | 408 | if (tmem_enabled && !disable_cleancache) { |
394 | char *s = ""; | 409 | char *s = ""; |
395 | struct cleancache_ops *old_ops = | 410 | struct cleancache_ops *old_ops = |
396 | cleancache_register_ops(&tmem_cleancache_ops); | 411 | cleancache_register_ops(&tmem_cleancache_ops); |
@@ -400,7 +415,14 @@ static int __init xen_tmem_init(void) | |||
400 | "Xen Transcendent Memory%s\n", s); | 415 | "Xen Transcendent Memory%s\n", s); |
401 | } | 416 | } |
402 | #endif | 417 | #endif |
418 | #ifdef CONFIG_XEN_SELFBALLOONING | ||
419 | xen_selfballoon_init(!disable_selfballooning, | ||
420 | !disable_frontswap_selfshrinking); | ||
421 | #endif | ||
403 | return 0; | 422 | return 0; |
404 | } | 423 | } |
405 | 424 | ||
406 | module_init(xen_tmem_init) | 425 | module_init(xen_tmem_init) |
426 | MODULE_LICENSE("GPL"); | ||
427 | MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>"); | ||
428 | MODULE_DESCRIPTION("Shim to Xen transcendent memory"); | ||