aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan.kim@gmail.com>2010-10-26 17:21:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 19:52:06 -0400
commit74e3f3c3391d81a959f58a1191a560703a4415b4 (patch)
treeb4688926ebe2c40b422bd6df0989ec09ea0f7046 /mm/vmscan.c
parent49ac825587f33afec8841b7fab2eb4db775014e6 (diff)
vmscan: prevent background aging of anon page in no swap system
Ying Han reported that backing aging of anon pages in no swap system causes unnecessary TLB flush. When I sent a patch(69c8548175), I wanted this patch but Rik pointed out and allowed aging of anon pages to give a chance to promote from inactive to active LRU. It has a two problem. 1) non-swap system Never make sense to age anon pages. 2) swap configured but still doesn't swapon It doesn't make sense to age anon pages until swap-on time. But it's arguable. If we have aged anon pages by swapon, VM have moved anon pages from active to inactive. And in the time swapon by admin, the VM can't reclaim hot pages so we can protect hot pages swapout. But let's think about it. When does swap-on happen? It depends on admin. we can't expect it. Nonetheless, we have done aging of anon pages to protect hot pages swapout. It means we lost run time overhead when below high watermark but gain hot page swap-[in/out] overhead when VM decide swapout. Is it true? Let's think more detail. We don't promote anon pages in case of non-swap system. So even though VM does aging of anon pages, the pages would be in inactive LRU for a long time. It means many of pages in there would mark access bit again. So access bit hot/code separation would be pointless. This patch prevents unnecessary anon pages demotion in not-yet-swapon and non-configured swap system. Even, in non-configuared swap system inactive_anon_is_low can be compiled out. It could make side effect that hot anon pages could swap out when admin does swap on. But I think sooner or later it would be steady state. So it's not a big problem. We could lose someting but gain more thing(TLB flush and unnecessary function call to demote anon pages). Signed-off-by: Ying Han <yinghan@google.com> Signed-off-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f5871ee50000..0c33a0997907 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1505,6 +1505,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1505 spin_unlock_irq(&zone->lru_lock); 1505 spin_unlock_irq(&zone->lru_lock);
1506} 1506}
1507 1507
1508#ifdef CONFIG_SWAP
1508static int inactive_anon_is_low_global(struct zone *zone) 1509static int inactive_anon_is_low_global(struct zone *zone)
1509{ 1510{
1510 unsigned long active, inactive; 1511 unsigned long active, inactive;
@@ -1530,12 +1531,26 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1530{ 1531{
1531 int low; 1532 int low;
1532 1533
1534 /*
1535 * If we don't have swap space, anonymous page deactivation
1536 * is pointless.
1537 */
1538 if (!total_swap_pages)
1539 return 0;
1540
1533 if (scanning_global_lru(sc)) 1541 if (scanning_global_lru(sc))
1534 low = inactive_anon_is_low_global(zone); 1542 low = inactive_anon_is_low_global(zone);
1535 else 1543 else
1536 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup); 1544 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1537 return low; 1545 return low;
1538} 1546}
1547#else
1548static inline int inactive_anon_is_low(struct zone *zone,
1549 struct scan_control *sc)
1550{
1551 return 0;
1552}
1553#endif
1539 1554
1540static int inactive_file_is_low_global(struct zone *zone) 1555static int inactive_file_is_low_global(struct zone *zone)
1541{ 1556{
@@ -1781,7 +1796,7 @@ static void shrink_zone(int priority, struct zone *zone,
1781 * Even if we did not try to evict anon pages at all, we want to 1796 * Even if we did not try to evict anon pages at all, we want to
1782 * rebalance the anon lru active/inactive ratio. 1797 * rebalance the anon lru active/inactive ratio.
1783 */ 1798 */
1784 if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0) 1799 if (inactive_anon_is_low(zone, sc))
1785 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 1800 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1786 1801
1787 throttle_vm_writeout(sc->gfp_mask); 1802 throttle_vm_writeout(sc->gfp_mask);