aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2008-02-07 03:14:32 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 11:42:21 -0500
commit58ae83db2a40dea15d4277d499a11dadc823c388 (patch)
tree18e43609ab67a9577c01188d39714999d5ed03d0
parent6d12e2d8ddbe653d80ea4f71578481c1bc933025 (diff)
per-zone and reclaim enhancements for memory controller: calculate mapper_ratio per cgroup
Define function for calculating mapped_ratio in memory cgroup. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: David Rientjes <rientjes@google.com> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: Kirill Korotaev <dev@sw.ru> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Paul Menage <menage@google.com> Cc: Pavel Emelianov <xemul@openvz.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h11
-rw-r--r--mm/memcontrol.c17
2 files changed, 27 insertions, 1 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 4ec712967f7c..085cdcd817b0 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -64,6 +64,12 @@ extern int mem_cgroup_prepare_migration(struct page *page);
64extern void mem_cgroup_end_migration(struct page *page); 64extern void mem_cgroup_end_migration(struct page *page);
65extern void mem_cgroup_page_migration(struct page *page, struct page *newpage); 65extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
66 66
67/*
68 * For memory reclaim.
69 */
70extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
71
72
67#else /* CONFIG_CGROUP_MEM_CONT */ 73#else /* CONFIG_CGROUP_MEM_CONT */
68static inline void mm_init_cgroup(struct mm_struct *mm, 74static inline void mm_init_cgroup(struct mm_struct *mm,
69 struct task_struct *p) 75 struct task_struct *p)
@@ -135,7 +141,10 @@ mem_cgroup_page_migration(struct page *page, struct page *newpage)
135{ 141{
136} 142}
137 143
138 144static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
145{
146 return 0;
147}
139#endif /* CONFIG_CGROUP_MEM_CONT */ 148#endif /* CONFIG_CGROUP_MEM_CONT */
140 149
141#endif /* _LINUX_MEMCONTROL_H */ 150#endif /* _LINUX_MEMCONTROL_H */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1637575d3339..2ef214ed5cf8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -420,6 +420,23 @@ void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
420 spin_unlock(&mem->lru_lock); 420 spin_unlock(&mem->lru_lock);
421} 421}
422 422
423/*
424 * Calculate mapped_ratio under memory controller. This will be used in
425 * vmscan.c for deteremining we have to reclaim mapped pages.
426 */
427int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
428{
429 long total, rss;
430
431 /*
432 * usage is recorded in bytes. But, here, we assume the number of
433 * physical pages can be represented by "long" on any arch.
434 */
435 total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
436 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
437 return (int)((rss * 100L) / total);
438}
439
423unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 440unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
424 struct list_head *dst, 441 struct list_head *dst,
425 unsigned long *scanned, int order, 442 unsigned long *scanned, int order,