diff options
-rw-r--r-- | include/linux/memcontrol.h | 11 | ||||
-rw-r--r-- | mm/memcontrol.c | 17 |
2 files changed, 27 insertions, 1 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 4ec712967f7c..085cdcd817b0 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -64,6 +64,12 @@ extern int mem_cgroup_prepare_migration(struct page *page); | |||
64 | extern void mem_cgroup_end_migration(struct page *page); | 64 | extern void mem_cgroup_end_migration(struct page *page); |
65 | extern void mem_cgroup_page_migration(struct page *page, struct page *newpage); | 65 | extern void mem_cgroup_page_migration(struct page *page, struct page *newpage); |
66 | 66 | ||
67 | /* | ||
68 | * For memory reclaim. | ||
69 | */ | ||
70 | extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem); | ||
71 | |||
72 | |||
67 | #else /* CONFIG_CGROUP_MEM_CONT */ | 73 | #else /* CONFIG_CGROUP_MEM_CONT */ |
68 | static inline void mm_init_cgroup(struct mm_struct *mm, | 74 | static inline void mm_init_cgroup(struct mm_struct *mm, |
69 | struct task_struct *p) | 75 | struct task_struct *p) |
@@ -135,7 +141,10 @@ mem_cgroup_page_migration(struct page *page, struct page *newpage) | |||
135 | { | 141 | { |
136 | } | 142 | } |
137 | 143 | ||
138 | 144 | static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) | |
145 | { | ||
146 | return 0; | ||
147 | } | ||
139 | #endif /* CONFIG_CGROUP_MEM_CONT */ | 148 | #endif /* CONFIG_CGROUP_MEM_CONT */ |
140 | 149 | ||
141 | #endif /* _LINUX_MEMCONTROL_H */ | 150 | #endif /* _LINUX_MEMCONTROL_H */ |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1637575d3339..2ef214ed5cf8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -420,6 +420,23 @@ void mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | |||
420 | spin_unlock(&mem->lru_lock); | 420 | spin_unlock(&mem->lru_lock); |
421 | } | 421 | } |
422 | 422 | ||
423 | /* | ||
424 | * Calculate mapped_ratio under memory controller. This will be used in | ||
425 | * vmscan.c for deteremining we have to reclaim mapped pages. | ||
426 | */ | ||
427 | int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) | ||
428 | { | ||
429 | long total, rss; | ||
430 | |||
431 | /* | ||
432 | * usage is recorded in bytes. But, here, we assume the number of | ||
433 | * physical pages can be represented by "long" on any arch. | ||
434 | */ | ||
435 | total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L; | ||
436 | rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); | ||
437 | return (int)((rss * 100L) / total); | ||
438 | } | ||
439 | |||
423 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 440 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, |
424 | struct list_head *dst, | 441 | struct list_head *dst, |
425 | unsigned long *scanned, int order, | 442 | unsigned long *scanned, int order, |