aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan.kim@gmail.com>2011-10-31 20:06:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 20:30:44 -0400
commitf80c0673610e36ae29d63e3297175e22f70dde5f (patch)
tree0a6aab3b637fa75961224e9261eb544156672c34 /mm/vmscan.c
parent39deaf8585152f1a35c1676d3d7dc6ae0fb65967 (diff)
mm: zone_reclaim: make isolate_lru_page() filter-aware
In __zone_reclaim case, we don't want to shrink mapped page. Nonetheless, we have isolated mapped page and re-add it into LRU's head. It's unnecessary CPU overhead and makes LRU churning. Of course, when we isolate the page, the page might be mapped but when we try to migrate the page, the page would be not mapped. So it could be migrated. But race is rare and although it happens, it's no big deal. Signed-off-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c007e78d7078..b68a9342d5a3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1048,6 +1048,9 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
1048 if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page))) 1048 if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page)))
1049 return ret; 1049 return ret;
1050 1050
1051 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1052 return ret;
1053
1051 if (likely(get_page_unless_zero(page))) { 1054 if (likely(get_page_unless_zero(page))) {
1052 /* 1055 /*
1053 * Be careful not to clear PageLRU until after we're 1056 * Be careful not to clear PageLRU until after we're
@@ -1471,6 +1474,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1471 reclaim_mode |= ISOLATE_ACTIVE; 1474 reclaim_mode |= ISOLATE_ACTIVE;
1472 1475
1473 lru_add_drain(); 1476 lru_add_drain();
1477
1478 if (!sc->may_unmap)
1479 reclaim_mode |= ISOLATE_UNMAPPED;
1480 if (!sc->may_writepage)
1481 reclaim_mode |= ISOLATE_CLEAN;
1482
1474 spin_lock_irq(&zone->lru_lock); 1483 spin_lock_irq(&zone->lru_lock);
1475 1484
1476 if (scanning_global_lru(sc)) { 1485 if (scanning_global_lru(sc)) {
@@ -1588,19 +1597,26 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1588 struct page *page; 1597 struct page *page;
1589 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1598 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1590 unsigned long nr_rotated = 0; 1599 unsigned long nr_rotated = 0;
1600 isolate_mode_t reclaim_mode = ISOLATE_ACTIVE;
1591 1601
1592 lru_add_drain(); 1602 lru_add_drain();
1603
1604 if (!sc->may_unmap)
1605 reclaim_mode |= ISOLATE_UNMAPPED;
1606 if (!sc->may_writepage)
1607 reclaim_mode |= ISOLATE_CLEAN;
1608
1593 spin_lock_irq(&zone->lru_lock); 1609 spin_lock_irq(&zone->lru_lock);
1594 if (scanning_global_lru(sc)) { 1610 if (scanning_global_lru(sc)) {
1595 nr_taken = isolate_pages_global(nr_pages, &l_hold, 1611 nr_taken = isolate_pages_global(nr_pages, &l_hold,
1596 &pgscanned, sc->order, 1612 &pgscanned, sc->order,
1597 ISOLATE_ACTIVE, zone, 1613 reclaim_mode, zone,
1598 1, file); 1614 1, file);
1599 zone->pages_scanned += pgscanned; 1615 zone->pages_scanned += pgscanned;
1600 } else { 1616 } else {
1601 nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold, 1617 nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
1602 &pgscanned, sc->order, 1618 &pgscanned, sc->order,
1603 ISOLATE_ACTIVE, zone, 1619 reclaim_mode, zone,
1604 sc->mem_cgroup, 1, file); 1620 sc->mem_cgroup, 1, file);
1605 /* 1621 /*
1606 * mem_cgroup_isolate_pages() keeps track of 1622 * mem_cgroup_isolate_pages() keeps track of