aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2ef214ed5cf8..78a928d90267 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -436,6 +436,20 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
436 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); 436 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
437 return (int)((rss * 100L) / total); 437 return (int)((rss * 100L) / total);
438} 438}
439/*
440 * This function is called from vmscan.c. In page reclaiming loop. balance
441 * between active and inactive list is calculated. For memory controller
442 * page reclaiming, we should use using mem_cgroup's imbalance rather than
443 * zone's global lru imbalance.
444 */
445long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
446{
447 unsigned long active, inactive;
448 /* active and inactive are the number of pages. 'long' is ok.*/
449 active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
450 inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
451 return (long) (active / (inactive + 1));
452}
439 453
440unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 454unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
441 struct list_head *dst, 455 struct list_head *dst,