aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-09-21 20:01:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:27 -0400
commit44c241f166b31999482c3c40448f4bbb2157a804 (patch)
tree1bfa7e8940de223e0ac35e0598c942f4e4404e20 /mm/vmscan.c
parentb259fbde0a86085264c89aa2ce9c6e35792a1aad (diff)
mm: rename pgmoved variable in shrink_active_list()
Currently the pgmoved variable has two meanings. It causes harder reviewing. This patch separates it. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ba8228e0a806..45a150a3a442 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1244,7 +1244,7 @@ static void move_active_pages_to_lru(struct zone *zone,
1244static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 1244static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1245 struct scan_control *sc, int priority, int file) 1245 struct scan_control *sc, int priority, int file)
1246{ 1246{
1247 unsigned long pgmoved; 1247 unsigned long nr_taken;
1248 unsigned long pgscanned; 1248 unsigned long pgscanned;
1249 unsigned long vm_flags; 1249 unsigned long vm_flags;
1250 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1250 LIST_HEAD(l_hold); /* The pages which were snipped off */
@@ -1252,10 +1252,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1252 LIST_HEAD(l_inactive); 1252 LIST_HEAD(l_inactive);
1253 struct page *page; 1253 struct page *page;
1254 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1254 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1255 unsigned long nr_rotated = 0;
1255 1256
1256 lru_add_drain(); 1257 lru_add_drain();
1257 spin_lock_irq(&zone->lru_lock); 1258 spin_lock_irq(&zone->lru_lock);
1258 pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order, 1259 nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1259 ISOLATE_ACTIVE, zone, 1260 ISOLATE_ACTIVE, zone,
1260 sc->mem_cgroup, 1, file); 1261 sc->mem_cgroup, 1, file);
1261 /* 1262 /*
@@ -1265,16 +1266,15 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1265 if (scanning_global_lru(sc)) { 1266 if (scanning_global_lru(sc)) {
1266 zone->pages_scanned += pgscanned; 1267 zone->pages_scanned += pgscanned;
1267 } 1268 }
1268 reclaim_stat->recent_scanned[!!file] += pgmoved; 1269 reclaim_stat->recent_scanned[!!file] += nr_taken;
1269 1270
1270 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1271 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1271 if (file) 1272 if (file)
1272 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); 1273 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
1273 else 1274 else
1274 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved); 1275 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
1275 spin_unlock_irq(&zone->lru_lock); 1276 spin_unlock_irq(&zone->lru_lock);
1276 1277
1277 pgmoved = 0; /* count referenced (mapping) mapped pages */
1278 while (!list_empty(&l_hold)) { 1278 while (!list_empty(&l_hold)) {
1279 cond_resched(); 1279 cond_resched();
1280 page = lru_to_page(&l_hold); 1280 page = lru_to_page(&l_hold);
@@ -1288,7 +1288,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1288 /* page_referenced clears PageReferenced */ 1288 /* page_referenced clears PageReferenced */
1289 if (page_mapping_inuse(page) && 1289 if (page_mapping_inuse(page) &&
1290 page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { 1290 page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1291 pgmoved++; 1291 nr_rotated++;
1292 /* 1292 /*
1293 * Identify referenced, file-backed active pages and 1293 * Identify referenced, file-backed active pages and
1294 * give them one more trip around the active list. So 1294 * give them one more trip around the active list. So
@@ -1317,7 +1317,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1317 * helps balance scan pressure between file and anonymous pages in 1317 * helps balance scan pressure between file and anonymous pages in
1318 * get_scan_ratio. 1318 * get_scan_ratio.
1319 */ 1319 */
1320 reclaim_stat->recent_rotated[!!file] += pgmoved; 1320 reclaim_stat->recent_rotated[!!file] += nr_rotated;
1321 1321
1322 move_active_pages_to_lru(zone, &l_active, 1322 move_active_pages_to_lru(zone, &l_active,
1323 LRU_ACTIVE + file * LRU_FILE); 1323 LRU_ACTIVE + file * LRU_FILE);