diff options
-rw-r--r-- | include/linux/memcontrol.h | 8 | ||||
-rw-r--r-- | mm/memcontrol.c | 14 |
2 files changed, 22 insertions, 0 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 085cdcd817b0..bb9c079eeb0c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -68,6 +68,8 @@ extern void mem_cgroup_page_migration(struct page *page, struct page *newpage); | |||
68 | * For memory reclaim. | 68 | * For memory reclaim. |
69 | */ | 69 | */ |
70 | extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem); | 70 | extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem); |
71 | extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem); | ||
72 | |||
71 | 73 | ||
72 | 74 | ||
73 | #else /* CONFIG_CGROUP_MEM_CONT */ | 75 | #else /* CONFIG_CGROUP_MEM_CONT */ |
@@ -145,6 +147,12 @@ static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) | |||
145 | { | 147 | { |
146 | return 0; | 148 | return 0; |
147 | } | 149 | } |
150 | |||
151 | static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) | ||
152 | { | ||
153 | return 0; | ||
154 | } | ||
155 | |||
148 | #endif /* CONFIG_CGROUP_MEM_CONT */ | 156 | #endif /* CONFIG_CGROUP_MEM_CONT */ |
149 | 157 | ||
150 | #endif /* _LINUX_MEMCONTROL_H */ | 158 | #endif /* _LINUX_MEMCONTROL_H */ |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2ef214ed5cf8..78a928d90267 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -436,6 +436,20 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) | |||
436 | rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); | 436 | rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); |
437 | return (int)((rss * 100L) / total); | 437 | return (int)((rss * 100L) / total); |
438 | } | 438 | } |
439 | /* | ||
440 | * This function is called from vmscan.c. In page reclaiming loop. balance | ||
441 | * between active and inactive list is calculated. For memory controller | ||
442 | * page reclaiming, we should use using mem_cgroup's imbalance rather than | ||
443 | * zone's global lru imbalance. | ||
444 | */ | ||
445 | long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) | ||
446 | { | ||
447 | unsigned long active, inactive; | ||
448 | /* active and inactive are the number of pages. 'long' is ok.*/ | ||
449 | active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE); | ||
450 | inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE); | ||
451 | return (long) (active / (inactive + 1)); | ||
452 | } | ||
439 | 453 | ||
440 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 454 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, |
441 | struct list_head *dst, | 455 | struct list_head *dst, |