aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2010-05-24 17:32:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 11:07:00 -0400
commit5f53e76299ceebd68bdf9495e8ff80db77711236 (patch)
tree2ecb8324a6593a49868161d85511cc14d474900a /mm/vmscan.c
parentbf8abe8b926f7546eb763fd2a088fe461dde6317 (diff)
vmscan: page_check_references(): check low order lumpy reclaim properly
If vmscan is under lumpy reclaim mode, it have to ignore referenced bit for making contenious free pages. but current page_check_references() doesn't. Fix it. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: Rik van Riel <riel@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c41
1 files changed, 26 insertions, 15 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8e1d72333e8a..cd4a5edf5be2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -77,6 +77,12 @@ struct scan_control {
77 77
78 int order; 78 int order;
79 79
80 /*
81 * Intend to reclaim enough contenious memory rather than to reclaim
82 * enough amount memory. I.e, it's the mode for high order allocation.
83 */
84 bool lumpy_reclaim_mode;
85
80 /* Which cgroup do we reclaim from */ 86 /* Which cgroup do we reclaim from */
81 struct mem_cgroup *mem_cgroup; 87 struct mem_cgroup *mem_cgroup;
82 88
@@ -575,7 +581,7 @@ static enum page_references page_check_references(struct page *page,
575 referenced_page = TestClearPageReferenced(page); 581 referenced_page = TestClearPageReferenced(page);
576 582
577 /* Lumpy reclaim - ignore references */ 583 /* Lumpy reclaim - ignore references */
578 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 584 if (sc->lumpy_reclaim_mode)
579 return PAGEREF_RECLAIM; 585 return PAGEREF_RECLAIM;
580 586
581 /* 587 /*
@@ -1125,7 +1131,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1125 unsigned long nr_scanned = 0; 1131 unsigned long nr_scanned = 0;
1126 unsigned long nr_reclaimed = 0; 1132 unsigned long nr_reclaimed = 0;
1127 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1133 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1128 int lumpy_reclaim = 0;
1129 1134
1130 while (unlikely(too_many_isolated(zone, file, sc))) { 1135 while (unlikely(too_many_isolated(zone, file, sc))) {
1131 congestion_wait(BLK_RW_ASYNC, HZ/10); 1136 congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1135,17 +1140,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1135 return SWAP_CLUSTER_MAX; 1140 return SWAP_CLUSTER_MAX;
1136 } 1141 }
1137 1142
1138 /*
1139 * If we need a large contiguous chunk of memory, or have
1140 * trouble getting a small set of contiguous pages, we
1141 * will reclaim both active and inactive pages.
1142 *
1143 * We use the same threshold as pageout congestion_wait below.
1144 */
1145 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1146 lumpy_reclaim = 1;
1147 else if (sc->order && priority < DEF_PRIORITY - 2)
1148 lumpy_reclaim = 1;
1149 1143
1150 pagevec_init(&pvec, 1); 1144 pagevec_init(&pvec, 1);
1151 1145
@@ -1158,7 +1152,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1158 unsigned long nr_freed; 1152 unsigned long nr_freed;
1159 unsigned long nr_active; 1153 unsigned long nr_active;
1160 unsigned int count[NR_LRU_LISTS] = { 0, }; 1154 unsigned int count[NR_LRU_LISTS] = { 0, };
1161 int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE; 1155 int mode = sc->lumpy_reclaim_mode ? ISOLATE_BOTH : ISOLATE_INACTIVE;
1162 unsigned long nr_anon; 1156 unsigned long nr_anon;
1163 unsigned long nr_file; 1157 unsigned long nr_file;
1164 1158
@@ -1211,7 +1205,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1211 * but that should be acceptable to the caller 1205 * but that should be acceptable to the caller
1212 */ 1206 */
1213 if (nr_freed < nr_taken && !current_is_kswapd() && 1207 if (nr_freed < nr_taken && !current_is_kswapd() &&
1214 lumpy_reclaim) { 1208 sc->lumpy_reclaim_mode) {
1215 congestion_wait(BLK_RW_ASYNC, HZ/10); 1209 congestion_wait(BLK_RW_ASYNC, HZ/10);
1216 1210
1217 /* 1211 /*
@@ -1639,6 +1633,21 @@ out:
1639 } 1633 }
1640} 1634}
1641 1635
1636static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
1637{
1638 /*
1639 * If we need a large contiguous chunk of memory, or have
1640 * trouble getting a small set of contiguous pages, we
1641 * will reclaim both active and inactive pages.
1642 */
1643 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1644 sc->lumpy_reclaim_mode = 1;
1645 else if (sc->order && priority < DEF_PRIORITY - 2)
1646 sc->lumpy_reclaim_mode = 1;
1647 else
1648 sc->lumpy_reclaim_mode = 0;
1649}
1650
1642/* 1651/*
1643 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1652 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1644 */ 1653 */
@@ -1653,6 +1662,8 @@ static void shrink_zone(int priority, struct zone *zone,
1653 1662
1654 get_scan_count(zone, sc, nr, priority); 1663 get_scan_count(zone, sc, nr, priority);
1655 1664
1665 set_lumpy_reclaim_mode(priority, sc);
1666
1656 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1667 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1657 nr[LRU_INACTIVE_FILE]) { 1668 nr[LRU_INACTIVE_FILE]) {
1658 for_each_evictable_lru(l) { 1669 for_each_evictable_lru(l) {