diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2008-02-07 03:14:17 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-07 11:42:20 -0500 |
commit | 217bc3194d57150549e9234e6ddfee30de28cc78 (patch) | |
tree | 1de36e60115164b9c3d86b176ce45989cafbc2f2 /mm | |
parent | cc8475822f8a4b17e9b76e7fadb6b9a341860422 (diff) |
memory cgroup enhancements: remember "a page is charged as page cache"
Add a flag to page_cgroup to remember "this page is
charged as cache."
cache here includes page caches and swap cache.
This is useful for implementing precise accounting in memory cgroup.
TODO:
distinguish page-cache and swap-cache
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Paul Menage <menage@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: David Rientjes <rientjes@google.com>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 24 |
1 files changed, 21 insertions, 3 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c867612d9c04..975e89935d52 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -83,7 +83,9 @@ struct page_cgroup { | |||
83 | struct mem_cgroup *mem_cgroup; | 83 | struct mem_cgroup *mem_cgroup; |
84 | atomic_t ref_cnt; /* Helpful when pages move b/w */ | 84 | atomic_t ref_cnt; /* Helpful when pages move b/w */ |
85 | /* mapped and cached states */ | 85 | /* mapped and cached states */ |
86 | int flags; | ||
86 | }; | 87 | }; |
88 | #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ | ||
87 | 89 | ||
88 | enum { | 90 | enum { |
89 | MEM_CGROUP_TYPE_UNSPEC = 0, | 91 | MEM_CGROUP_TYPE_UNSPEC = 0, |
@@ -93,6 +95,11 @@ enum { | |||
93 | MEM_CGROUP_TYPE_MAX, | 95 | MEM_CGROUP_TYPE_MAX, |
94 | }; | 96 | }; |
95 | 97 | ||
98 | enum charge_type { | ||
99 | MEM_CGROUP_CHARGE_TYPE_CACHE = 0, | ||
100 | MEM_CGROUP_CHARGE_TYPE_MAPPED, | ||
101 | }; | ||
102 | |||
96 | static struct mem_cgroup init_mem_cgroup; | 103 | static struct mem_cgroup init_mem_cgroup; |
97 | 104 | ||
98 | static inline | 105 | static inline |
@@ -306,8 +313,8 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |||
306 | * 0 if the charge was successful | 313 | * 0 if the charge was successful |
307 | * < 0 if the cgroup is over its limit | 314 | * < 0 if the cgroup is over its limit |
308 | */ | 315 | */ |
309 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, | 316 | static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, |
310 | gfp_t gfp_mask) | 317 | gfp_t gfp_mask, enum charge_type ctype) |
311 | { | 318 | { |
312 | struct mem_cgroup *mem; | 319 | struct mem_cgroup *mem; |
313 | struct page_cgroup *pc; | 320 | struct page_cgroup *pc; |
@@ -409,6 +416,9 @@ noreclaim: | |||
409 | atomic_set(&pc->ref_cnt, 1); | 416 | atomic_set(&pc->ref_cnt, 1); |
410 | pc->mem_cgroup = mem; | 417 | pc->mem_cgroup = mem; |
411 | pc->page = page; | 418 | pc->page = page; |
419 | pc->flags = 0; | ||
420 | if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) | ||
421 | pc->flags |= PAGE_CGROUP_FLAG_CACHE; | ||
412 | if (page_cgroup_assign_new_page_cgroup(page, pc)) { | 422 | if (page_cgroup_assign_new_page_cgroup(page, pc)) { |
413 | /* | 423 | /* |
414 | * an another charge is added to this page already. | 424 | * an another charge is added to this page already. |
@@ -433,6 +443,13 @@ err: | |||
433 | return -ENOMEM; | 443 | return -ENOMEM; |
434 | } | 444 | } |
435 | 445 | ||
446 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, | ||
447 | gfp_t gfp_mask) | ||
448 | { | ||
449 | return mem_cgroup_charge_common(page, mm, gfp_mask, | ||
450 | MEM_CGROUP_CHARGE_TYPE_MAPPED); | ||
451 | } | ||
452 | |||
436 | /* | 453 | /* |
437 | * See if the cached pages should be charged at all? | 454 | * See if the cached pages should be charged at all? |
438 | */ | 455 | */ |
@@ -445,7 +462,8 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |||
445 | 462 | ||
446 | mem = rcu_dereference(mm->mem_cgroup); | 463 | mem = rcu_dereference(mm->mem_cgroup); |
447 | if (mem->control_type == MEM_CGROUP_TYPE_ALL) | 464 | if (mem->control_type == MEM_CGROUP_TYPE_ALL) |
448 | return mem_cgroup_charge(page, mm, gfp_mask); | 465 | return mem_cgroup_charge_common(page, mm, gfp_mask, |
466 | MEM_CGROUP_CHARGE_TYPE_CACHE); | ||
449 | else | 467 | else |
450 | return 0; | 468 | return 0; |
451 | } | 469 | } |