aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2009-06-16 18:32:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:39 -0400
commitaf166777cf451f0373b952ce6766dc1c25385686 (patch)
tree2051064ffccb223e1f29b8af52ef28651eff9062 /mm/vmscan.c
parent08d9ae7cbbd0c5c07573d072ec771e997a9a39e0 (diff)
vmscan: ZVC updates in shrink_active_list() can be done once
This effectively lifts the unit of updates to nr_inactive_* and pgdeactivate from PAGEVEC_SIZE=14 to SWAP_CLUSTER_MAX=32, or MAX_ORDER_NR_PAGES=1024 for reclaim_zone(). Cc: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c11
1 files changed, 3 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d4da097533ce..7592d8eb1148 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1223,7 +1223,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1223 struct scan_control *sc, int priority, int file) 1223 struct scan_control *sc, int priority, int file)
1224{ 1224{
1225 unsigned long pgmoved; 1225 unsigned long pgmoved;
1226 int pgdeactivate = 0;
1227 unsigned long pgscanned; 1226 unsigned long pgscanned;
1228 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1227 LIST_HEAD(l_hold); /* The pages which were snipped off */
1229 LIST_HEAD(l_inactive); 1228 LIST_HEAD(l_inactive);
@@ -1252,7 +1251,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1252 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved); 1251 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
1253 spin_unlock_irq(&zone->lru_lock); 1252 spin_unlock_irq(&zone->lru_lock);
1254 1253
1255 pgmoved = 0; 1254 pgmoved = 0; /* count referenced (mapping) mapped pages */
1256 while (!list_empty(&l_hold)) { 1255 while (!list_empty(&l_hold)) {
1257 cond_resched(); 1256 cond_resched();
1258 page = lru_to_page(&l_hold); 1257 page = lru_to_page(&l_hold);
@@ -1286,7 +1285,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1286 */ 1285 */
1287 reclaim_stat->recent_rotated[!!file] += pgmoved; 1286 reclaim_stat->recent_rotated[!!file] += pgmoved;
1288 1287
1289 pgmoved = 0; 1288 pgmoved = 0; /* count pages moved to inactive list */
1290 while (!list_empty(&l_inactive)) { 1289 while (!list_empty(&l_inactive)) {
1291 page = lru_to_page(&l_inactive); 1290 page = lru_to_page(&l_inactive);
1292 prefetchw_prev_lru_page(page, &l_inactive, flags); 1291 prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1299,10 +1298,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1299 mem_cgroup_add_lru_list(page, lru); 1298 mem_cgroup_add_lru_list(page, lru);
1300 pgmoved++; 1299 pgmoved++;
1301 if (!pagevec_add(&pvec, page)) { 1300 if (!pagevec_add(&pvec, page)) {
1302 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1303 spin_unlock_irq(&zone->lru_lock); 1301 spin_unlock_irq(&zone->lru_lock);
1304 pgdeactivate += pgmoved;
1305 pgmoved = 0;
1306 if (buffer_heads_over_limit) 1302 if (buffer_heads_over_limit)
1307 pagevec_strip(&pvec); 1303 pagevec_strip(&pvec);
1308 __pagevec_release(&pvec); 1304 __pagevec_release(&pvec);
@@ -1310,9 +1306,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1310 } 1306 }
1311 } 1307 }
1312 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1308 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1313 pgdeactivate += pgmoved;
1314 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1309 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1315 __count_vm_events(PGDEACTIVATE, pgdeactivate); 1310 __count_vm_events(PGDEACTIVATE, pgmoved);
1316 spin_unlock_irq(&zone->lru_lock); 1311 spin_unlock_irq(&zone->lru_lock);
1317 if (buffer_heads_over_limit) 1312 if (buffer_heads_over_limit)
1318 pagevec_strip(&pvec); 1313 pagevec_strip(&pvec);