aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-01-07 21:08:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 11:31:08 -0500
commit9439c1c95b5c25b8031b2a7eb7e1590eb84be7f5 (patch)
tree79eaae56278407e0a223e562a2d0079834ca5529
parent3e2f41f1f64744f7942980d93cc93dd3e5924560 (diff)
memcg: remove mem_cgroup_cal_reclaim()
Now, get_scan_ratio() return correct value although memcg reclaim. Then, mem_cgroup_calc_reclaim() can be removed. So, memcg reclaim get the same capability of anon/file reclaim balancing as global reclaim now. Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Hugh Dickins <hugh@veritas.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h10
-rw-r--r--mm/memcontrol.c21
-rw-r--r--mm/vmscan.c27
3 files changed, 10 insertions, 48 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 36b8ebb39b82..8752052da8df 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -97,9 +97,6 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
97 int priority); 97 int priority);
98extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, 98extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
99 int priority); 99 int priority);
100
101extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
102 int priority, enum lru_list lru);
103int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, 100int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
104 struct zone *zone); 101 struct zone *zone);
105unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, 102unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
@@ -244,13 +241,6 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
244{ 241{
245} 242}
246 243
247static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
248 struct zone *zone, int priority,
249 enum lru_list lru)
250{
251 return 0;
252}
253
254static inline bool mem_cgroup_disabled(void) 244static inline bool mem_cgroup_disabled(void)
255{ 245{
256 return true; 246 return true;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7b7f4dc05035..b8c1e5acc25a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -414,27 +414,6 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
414 mem->prev_priority = priority; 414 mem->prev_priority = priority;
415} 415}
416 416
417/*
418 * Calculate # of pages to be scanned in this priority/zone.
419 * See also vmscan.c
420 *
421 * priority starts from "DEF_PRIORITY" and decremented in each loop.
422 * (see include/linux/mmzone.h)
423 */
424
425long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
426 int priority, enum lru_list lru)
427{
428 long nr_pages;
429 int nid = zone->zone_pgdat->node_id;
430 int zid = zone_idx(zone);
431 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
432
433 nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
434
435 return (nr_pages >> priority);
436}
437
438int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) 417int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
439{ 418{
440 unsigned long active; 419 unsigned long active;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 56fc7abe4d23..66bb6ef44b5f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1466,30 +1466,23 @@ static void shrink_zone(int priority, struct zone *zone,
1466 get_scan_ratio(zone, sc, percent); 1466 get_scan_ratio(zone, sc, percent);
1467 1467
1468 for_each_evictable_lru(l) { 1468 for_each_evictable_lru(l) {
1469 if (scan_global_lru(sc)) { 1469 int file = is_file_lru(l);
1470 int file = is_file_lru(l); 1470 int scan;
1471 int scan;
1472 1471
1473 scan = zone_page_state(zone, NR_LRU_BASE + l); 1472 scan = zone_page_state(zone, NR_LRU_BASE + l);
1474 if (priority) { 1473 if (priority) {
1475 scan >>= priority; 1474 scan >>= priority;
1476 scan = (scan * percent[file]) / 100; 1475 scan = (scan * percent[file]) / 100;
1477 } 1476 }
1477 if (scan_global_lru(sc)) {
1478 zone->lru[l].nr_scan += scan; 1478 zone->lru[l].nr_scan += scan;
1479 nr[l] = zone->lru[l].nr_scan; 1479 nr[l] = zone->lru[l].nr_scan;
1480 if (nr[l] >= swap_cluster_max) 1480 if (nr[l] >= swap_cluster_max)
1481 zone->lru[l].nr_scan = 0; 1481 zone->lru[l].nr_scan = 0;
1482 else 1482 else
1483 nr[l] = 0; 1483 nr[l] = 0;
1484 } else { 1484 } else
1485 /* 1485 nr[l] = scan;
1486 * This reclaim occurs not because zone memory shortage
1487 * but because memory controller hits its limit.
1488 * Don't modify zone reclaim related data.
1489 */
1490 nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
1491 priority, l);
1492 }
1493 } 1486 }
1494 1487
1495 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1488 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||