aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/tmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/tmem.c')
-rw-r--r--drivers/xen/tmem.c93
1 files changed, 43 insertions, 50 deletions
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index e3600be4e7fa..0f0493c63371 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -11,11 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/pagemap.h> 12#include <linux/pagemap.h>
13#include <linux/cleancache.h> 13#include <linux/cleancache.h>
14
15/* temporary ifdef until include/linux/frontswap.h is upstream */
16#ifdef CONFIG_FRONTSWAP
17#include <linux/frontswap.h> 14#include <linux/frontswap.h>
18#endif
19 15
20#include <xen/xen.h> 16#include <xen/xen.h>
21#include <xen/interface/xen.h> 17#include <xen/interface/xen.h>
@@ -24,6 +20,36 @@
24#include <asm/xen/hypervisor.h> 20#include <asm/xen/hypervisor.h>
25#include <xen/tmem.h> 21#include <xen/tmem.h>
26 22
23#ifndef CONFIG_XEN_TMEM_MODULE
24bool __read_mostly tmem_enabled = false;
25
26static int __init enable_tmem(char *s)
27{
28 tmem_enabled = true;
29 return 1;
30}
31__setup("tmem", enable_tmem);
32#endif
33
34#ifdef CONFIG_CLEANCACHE
35static bool cleancache __read_mostly = true;
36module_param(cleancache, bool, S_IRUGO);
37static bool selfballooning __read_mostly = true;
38module_param(selfballooning, bool, S_IRUGO);
39#endif /* CONFIG_CLEANCACHE */
40
41#ifdef CONFIG_FRONTSWAP
42static bool frontswap __read_mostly = true;
43module_param(frontswap, bool, S_IRUGO);
44#else /* CONFIG_FRONTSWAP */
45#define frontswap (0)
46#endif /* CONFIG_FRONTSWAP */
47
48#ifdef CONFIG_XEN_SELFBALLOONING
49static bool selfshrinking __read_mostly = true;
50module_param(selfshrinking, bool, S_IRUGO);
51#endif /* CONFIG_XEN_SELFBALLOONING */
52
27#define TMEM_CONTROL 0 53#define TMEM_CONTROL 0
28#define TMEM_NEW_POOL 1 54#define TMEM_NEW_POOL 1
29#define TMEM_DESTROY_POOL 2 55#define TMEM_DESTROY_POOL 2
@@ -129,16 +155,6 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
129 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); 155 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
130} 156}
131 157
132#ifndef CONFIG_XEN_TMEM_MODULE
133bool __read_mostly tmem_enabled = false;
134
135static int __init enable_tmem(char *s)
136{
137 tmem_enabled = true;
138 return 1;
139}
140__setup("tmem", enable_tmem);
141#endif
142 158
143#ifdef CONFIG_CLEANCACHE 159#ifdef CONFIG_CLEANCACHE
144static int xen_tmem_destroy_pool(u32 pool_id) 160static int xen_tmem_destroy_pool(u32 pool_id)
@@ -230,20 +246,6 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
230 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); 246 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
231} 247}
232 248
233static bool disable_cleancache __read_mostly;
234static bool disable_selfballooning __read_mostly;
235#ifdef CONFIG_XEN_TMEM_MODULE
236module_param(disable_cleancache, bool, S_IRUGO);
237module_param(disable_selfballooning, bool, S_IRUGO);
238#else
239static int __init no_cleancache(char *s)
240{
241 disable_cleancache = true;
242 return 1;
243}
244__setup("nocleancache", no_cleancache);
245#endif
246
247static struct cleancache_ops tmem_cleancache_ops = { 249static struct cleancache_ops tmem_cleancache_ops = {
248 .put_page = tmem_cleancache_put_page, 250 .put_page = tmem_cleancache_put_page,
249 .get_page = tmem_cleancache_get_page, 251 .get_page = tmem_cleancache_get_page,
@@ -361,20 +363,6 @@ static void tmem_frontswap_init(unsigned ignored)
361 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); 363 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
362} 364}
363 365
364static bool disable_frontswap __read_mostly;
365static bool disable_frontswap_selfshrinking __read_mostly;
366#ifdef CONFIG_XEN_TMEM_MODULE
367module_param(disable_frontswap, bool, S_IRUGO);
368module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
369#else
370static int __init no_frontswap(char *s)
371{
372 disable_frontswap = true;
373 return 1;
374}
375__setup("nofrontswap", no_frontswap);
376#endif
377
378static struct frontswap_ops tmem_frontswap_ops = { 366static struct frontswap_ops tmem_frontswap_ops = {
379 .store = tmem_frontswap_store, 367 .store = tmem_frontswap_store,
380 .load = tmem_frontswap_load, 368 .load = tmem_frontswap_load,
@@ -382,8 +370,6 @@ static struct frontswap_ops tmem_frontswap_ops = {
382 .invalidate_area = tmem_frontswap_flush_area, 370 .invalidate_area = tmem_frontswap_flush_area,
383 .init = tmem_frontswap_init 371 .init = tmem_frontswap_init
384}; 372};
385#else /* CONFIG_FRONTSWAP */
386#define disable_frontswap_selfshrinking 1
387#endif 373#endif
388 374
389static int xen_tmem_init(void) 375static int xen_tmem_init(void)
@@ -391,12 +377,12 @@ static int xen_tmem_init(void)
391 if (!xen_domain()) 377 if (!xen_domain())
392 return 0; 378 return 0;
393#ifdef CONFIG_FRONTSWAP 379#ifdef CONFIG_FRONTSWAP
394 if (tmem_enabled && !disable_frontswap) { 380 if (tmem_enabled && frontswap) {
395 char *s = ""; 381 char *s = "";
396 struct frontswap_ops *old_ops = 382 struct frontswap_ops *old_ops;
397 frontswap_register_ops(&tmem_frontswap_ops);
398 383
399 tmem_frontswap_poolid = -1; 384 tmem_frontswap_poolid = -1;
385 old_ops = frontswap_register_ops(&tmem_frontswap_ops);
400 if (IS_ERR(old_ops) || old_ops) { 386 if (IS_ERR(old_ops) || old_ops) {
401 if (IS_ERR(old_ops)) 387 if (IS_ERR(old_ops))
402 return PTR_ERR(old_ops); 388 return PTR_ERR(old_ops);
@@ -408,7 +394,7 @@ static int xen_tmem_init(void)
408#endif 394#endif
409#ifdef CONFIG_CLEANCACHE 395#ifdef CONFIG_CLEANCACHE
410 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); 396 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
411 if (tmem_enabled && !disable_cleancache) { 397 if (tmem_enabled && cleancache) {
412 char *s = ""; 398 char *s = "";
413 struct cleancache_ops *old_ops = 399 struct cleancache_ops *old_ops =
414 cleancache_register_ops(&tmem_cleancache_ops); 400 cleancache_register_ops(&tmem_cleancache_ops);
@@ -419,8 +405,15 @@ static int xen_tmem_init(void)
419 } 405 }
420#endif 406#endif
421#ifdef CONFIG_XEN_SELFBALLOONING 407#ifdef CONFIG_XEN_SELFBALLOONING
422 xen_selfballoon_init(!disable_selfballooning, 408 /*
423 !disable_frontswap_selfshrinking); 409 * There is no point of driving pages to the swap system if they
410 * aren't going anywhere in tmem universe.
411 */
412 if (!frontswap) {
413 selfshrinking = false;
414 selfballooning = false;
415 }
416 xen_selfballoon_init(selfballooning, selfshrinking);
424#endif 417#endif
425 return 0; 418 return 0;
426} 419}