aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBalbir Singh <balbir@linux.vnet.ibm.com>2008-02-07 03:13:59 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 11:42:19 -0500
commit8697d33194faae6fdd6b2e799f6308aa00cfdf67 (patch)
treeedf6b3e4698b80aac6f1d1f2b9e5698ce8dfa6e5
parentc7ba5c9e8176704bfac0729875fa62798037584d (diff)
Memory controller: add switch to control what type of pages to limit
Choose if we want cached pages to be accounted or not. By default both are accounted for. A new set of tunables are added. echo -n 1 > mem_control_type switches the accounting to account for only mapped pages echo -n 3 > mem_control_type switches the behaviour back [bunk@kernel.org: mm/memcontrol.c: clenups] [akpm@linux-foundation.org: fix sparc32 build] Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Pavel Emelianov <xemul@openvz.org> Cc: Paul Menage <menage@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Kirill Korotaev <dev@sw.ru> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: David Rientjes <rientjes@google.com> Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Signed-off-by: Adrian Bunk <bunk@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h9
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memcontrol.c98
-rw-r--r--mm/swap_state.c2
4 files changed, 106 insertions, 5 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 9bbbf524ba8f..bb6f5105401b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -22,6 +22,8 @@
22 22
23struct mem_cgroup; 23struct mem_cgroup;
24struct page_cgroup; 24struct page_cgroup;
25struct page;
26struct mm_struct;
25 27
26#ifdef CONFIG_CGROUP_MEM_CONT 28#ifdef CONFIG_CGROUP_MEM_CONT
27 29
@@ -40,6 +42,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
40 struct mem_cgroup *mem_cont, 42 struct mem_cgroup *mem_cont,
41 int active); 43 int active);
42extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); 44extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
45extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm);
43 46
44static inline void mem_cgroup_uncharge_page(struct page *page) 47static inline void mem_cgroup_uncharge_page(struct page *page)
45{ 48{
@@ -84,6 +87,12 @@ static inline void mem_cgroup_move_lists(struct page_cgroup *pc,
84{ 87{
85} 88}
86 89
90static inline int mem_cgroup_cache_charge(struct page *page,
91 struct mm_struct *mm)
92{
93 return 0;
94}
95
87#endif /* CONFIG_CGROUP_MEM_CONT */ 96#endif /* CONFIG_CGROUP_MEM_CONT */
88 97
89#endif /* _LINUX_MEMCONTROL_H */ 98#endif /* _LINUX_MEMCONTROL_H */
diff --git a/mm/filemap.c b/mm/filemap.c
index b7a01e927953..8ae171cc2811 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -464,7 +464,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
464 464
465 if (error == 0) { 465 if (error == 0) {
466 466
467 error = mem_cgroup_charge(page, current->mm); 467 error = mem_cgroup_cache_charge(page, current->mm);
468 if (error) 468 if (error)
469 goto out; 469 goto out;
470 470
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5260658c90aa..10833d969e3f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -29,6 +29,8 @@
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/fs.h> 30#include <linux/fs.h>
31 31
32#include <asm/uaccess.h>
33
32struct cgroup_subsys mem_cgroup_subsys; 34struct cgroup_subsys mem_cgroup_subsys;
33static const int MEM_CGROUP_RECLAIM_RETRIES = 5; 35static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
34 36
@@ -60,6 +62,7 @@ struct mem_cgroup {
60 * spin_lock to protect the per cgroup LRU 62 * spin_lock to protect the per cgroup LRU
61 */ 63 */
62 spinlock_t lru_lock; 64 spinlock_t lru_lock;
65 unsigned long control_type; /* control RSS or RSS+Pagecache */
63}; 66};
64 67
65/* 68/*
@@ -82,6 +85,15 @@ struct page_cgroup {
82 /* mapped and cached states */ 85 /* mapped and cached states */
83}; 86};
84 87
88enum {
89 MEM_CGROUP_TYPE_UNSPEC = 0,
90 MEM_CGROUP_TYPE_MAPPED,
91 MEM_CGROUP_TYPE_CACHED,
92 MEM_CGROUP_TYPE_ALL,
93 MEM_CGROUP_TYPE_MAX,
94};
95
96static struct mem_cgroup init_mem_cgroup;
85 97
86static inline 98static inline
87struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) 99struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
@@ -139,18 +151,18 @@ struct page_cgroup *page_get_page_cgroup(struct page *page)
139 (page->page_cgroup & ~PAGE_CGROUP_LOCK); 151 (page->page_cgroup & ~PAGE_CGROUP_LOCK);
140} 152}
141 153
142void __always_inline lock_page_cgroup(struct page *page) 154static void __always_inline lock_page_cgroup(struct page *page)
143{ 155{
144 bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 156 bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
145 VM_BUG_ON(!page_cgroup_locked(page)); 157 VM_BUG_ON(!page_cgroup_locked(page));
146} 158}
147 159
148void __always_inline unlock_page_cgroup(struct page *page) 160static void __always_inline unlock_page_cgroup(struct page *page)
149{ 161{
150 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 162 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
151} 163}
152 164
153void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) 165static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
154{ 166{
155 if (active) 167 if (active)
156 list_move(&pc->lru, &pc->mem_cgroup->active_list); 168 list_move(&pc->lru, &pc->mem_cgroup->active_list);
@@ -366,6 +378,22 @@ err:
366} 378}
367 379
368/* 380/*
381 * See if the cached pages should be charged at all?
382 */
383int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm)
384{
385 struct mem_cgroup *mem;
386 if (!mm)
387 mm = &init_mm;
388
389 mem = rcu_dereference(mm->mem_cgroup);
390 if (mem->control_type == MEM_CGROUP_TYPE_ALL)
391 return mem_cgroup_charge(page, mm);
392 else
393 return 0;
394}
395
396/*
369 * Uncharging is always a welcome operation, we never complain, simply 397 * Uncharging is always a welcome operation, we never complain, simply
370 * uncharge. 398 * uncharge.
371 */ 399 */
@@ -375,6 +403,10 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
375 struct page *page; 403 struct page *page;
376 unsigned long flags; 404 unsigned long flags;
377 405
406 /*
407 * This can handle cases when a page is not charged at all and we
408 * are switching between handling the control_type.
409 */
378 if (!pc) 410 if (!pc)
379 return; 411 return;
380 412
@@ -425,6 +457,60 @@ static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
425 mem_cgroup_write_strategy); 457 mem_cgroup_write_strategy);
426} 458}
427 459
460static ssize_t mem_control_type_write(struct cgroup *cont,
461 struct cftype *cft, struct file *file,
462 const char __user *userbuf,
463 size_t nbytes, loff_t *pos)
464{
465 int ret;
466 char *buf, *end;
467 unsigned long tmp;
468 struct mem_cgroup *mem;
469
470 mem = mem_cgroup_from_cont(cont);
471 buf = kmalloc(nbytes + 1, GFP_KERNEL);
472 ret = -ENOMEM;
473 if (buf == NULL)
474 goto out;
475
476 buf[nbytes] = 0;
477 ret = -EFAULT;
478 if (copy_from_user(buf, userbuf, nbytes))
479 goto out_free;
480
481 ret = -EINVAL;
482 tmp = simple_strtoul(buf, &end, 10);
483 if (*end != '\0')
484 goto out_free;
485
486 if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX)
487 goto out_free;
488
489 mem->control_type = tmp;
490 ret = nbytes;
491out_free:
492 kfree(buf);
493out:
494 return ret;
495}
496
497static ssize_t mem_control_type_read(struct cgroup *cont,
498 struct cftype *cft,
499 struct file *file, char __user *userbuf,
500 size_t nbytes, loff_t *ppos)
501{
502 unsigned long val;
503 char buf[64], *s;
504 struct mem_cgroup *mem;
505
506 mem = mem_cgroup_from_cont(cont);
507 s = buf;
508 val = mem->control_type;
509 s += sprintf(s, "%lu\n", val);
510 return simple_read_from_buffer((void __user *)userbuf, nbytes,
511 ppos, buf, s - buf);
512}
513
428static struct cftype mem_cgroup_files[] = { 514static struct cftype mem_cgroup_files[] = {
429 { 515 {
430 .name = "usage_in_bytes", 516 .name = "usage_in_bytes",
@@ -442,6 +528,11 @@ static struct cftype mem_cgroup_files[] = {
442 .private = RES_FAILCNT, 528 .private = RES_FAILCNT,
443 .read = mem_cgroup_read, 529 .read = mem_cgroup_read,
444 }, 530 },
531 {
532 .name = "control_type",
533 .write = mem_control_type_write,
534 .read = mem_control_type_read,
535 },
445}; 536};
446 537
447static struct mem_cgroup init_mem_cgroup; 538static struct mem_cgroup init_mem_cgroup;
@@ -464,6 +555,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
464 INIT_LIST_HEAD(&mem->active_list); 555 INIT_LIST_HEAD(&mem->active_list);
465 INIT_LIST_HEAD(&mem->inactive_list); 556 INIT_LIST_HEAD(&mem->inactive_list);
466 spin_lock_init(&mem->lru_lock); 557 spin_lock_init(&mem->lru_lock);
558 mem->control_type = MEM_CGROUP_TYPE_ALL;
467 return &mem->css; 559 return &mem->css;
468} 560}
469 561
diff --git a/mm/swap_state.c b/mm/swap_state.c
index f96e3ff1e791..88258869c8e7 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -78,7 +78,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
78 error = radix_tree_preload(gfp_mask); 78 error = radix_tree_preload(gfp_mask);
79 if (!error) { 79 if (!error) {
80 80
81 error = mem_cgroup_charge(page, current->mm); 81 error = mem_cgroup_cache_charge(page, current->mm);
82 if (error) 82 if (error)
83 goto out; 83 goto out;
84 84