aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2009-06-16 18:33:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:45 -0400
commit3eb4140f0389bdada022d5e8efd88504ad30df14 (patch)
tree6f3f229a2a3bee1bd0a5846c7e7f7987a5cc602b /mm/vmscan.c
parent8cab4754d24a0f2e05920170c845bd84472814c6 (diff)
vmscan: merge duplicate code in shrink_active_list()
The "move pages to active list" and "move pages to inactive list" code blocks are mostly identical and can be served by a function. Thanks to Andrew Morton for pointing this out. Note that buffer_heads_over_limit check will also be carried out for re-activated pages, which is slightly different from pre-2.6.28 kernels. Also, Rik's "vmscan: evict use-once pages first" patch could totally stop scans of active file list when memory pressure is low. So the net effect could be, the number of buffer heads is now more likely to grow large. However that's fine according to Johannes' comments: I don't think that this could be harmful. We just preserve the buffer mappings of what we consider the working set and with low memory pressure, as you say, this set is not big. As to stripping of reactivated pages: the only pages we re-activate for now are those VM_EXEC mapped ones. Since we don't expect IO from or to these pages, removing the buffer mappings in case they grow too large should be okay, I guess. Cc: Pekka Enberg <penberg@cs.helsinki.fi> Acked-by: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c95
1 files changed, 42 insertions, 53 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1024979d6589..f3a55d1f9ab7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1204,6 +1204,43 @@ static inline void note_zone_scanning_priority(struct zone *zone, int priority)
1204 * But we had to alter page->flags anyway. 1204 * But we had to alter page->flags anyway.
1205 */ 1205 */
1206 1206
1207static void move_active_pages_to_lru(struct zone *zone,
1208 struct list_head *list,
1209 enum lru_list lru)
1210{
1211 unsigned long pgmoved = 0;
1212 struct pagevec pvec;
1213 struct page *page;
1214
1215 pagevec_init(&pvec, 1);
1216
1217 while (!list_empty(list)) {
1218 page = lru_to_page(list);
1219 prefetchw_prev_lru_page(page, list, flags);
1220
1221 VM_BUG_ON(PageLRU(page));
1222 SetPageLRU(page);
1223
1224 VM_BUG_ON(!PageActive(page));
1225 if (!is_active_lru(lru))
1226 ClearPageActive(page); /* we are de-activating */
1227
1228 list_move(&page->lru, &zone->lru[lru].list);
1229 mem_cgroup_add_lru_list(page, lru);
1230 pgmoved++;
1231
1232 if (!pagevec_add(&pvec, page) || list_empty(list)) {
1233 spin_unlock_irq(&zone->lru_lock);
1234 if (buffer_heads_over_limit)
1235 pagevec_strip(&pvec);
1236 __pagevec_release(&pvec);
1237 spin_lock_irq(&zone->lru_lock);
1238 }
1239 }
1240 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1241 if (!is_active_lru(lru))
1242 __count_vm_events(PGDEACTIVATE, pgmoved);
1243}
1207 1244
1208static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 1245static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1209 struct scan_control *sc, int priority, int file) 1246 struct scan_control *sc, int priority, int file)
@@ -1215,8 +1252,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1215 LIST_HEAD(l_active); 1252 LIST_HEAD(l_active);
1216 LIST_HEAD(l_inactive); 1253 LIST_HEAD(l_inactive);
1217 struct page *page; 1254 struct page *page;
1218 struct pagevec pvec;
1219 enum lru_list lru;
1220 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1255 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1221 1256
1222 lru_add_drain(); 1257 lru_add_drain();
@@ -1233,6 +1268,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1233 } 1268 }
1234 reclaim_stat->recent_scanned[!!file] += pgmoved; 1269 reclaim_stat->recent_scanned[!!file] += pgmoved;
1235 1270
1271 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1236 if (file) 1272 if (file)
1237 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); 1273 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
1238 else 1274 else
@@ -1275,8 +1311,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1275 /* 1311 /*
1276 * Move pages back to the lru list. 1312 * Move pages back to the lru list.
1277 */ 1313 */
1278 pagevec_init(&pvec, 1);
1279
1280 spin_lock_irq(&zone->lru_lock); 1314 spin_lock_irq(&zone->lru_lock);
1281 /* 1315 /*
1282 * Count referenced pages from currently used mappings as rotated, 1316 * Count referenced pages from currently used mappings as rotated,
@@ -1286,57 +1320,12 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1286 */ 1320 */
1287 reclaim_stat->recent_rotated[!!file] += pgmoved; 1321 reclaim_stat->recent_rotated[!!file] += pgmoved;
1288 1322
1289 pgmoved = 0; /* count pages moved to inactive list */ 1323 move_active_pages_to_lru(zone, &l_active,
1290 lru = LRU_BASE + file * LRU_FILE; 1324 LRU_ACTIVE + file * LRU_FILE);
1291 while (!list_empty(&l_inactive)) { 1325 move_active_pages_to_lru(zone, &l_inactive,
1292 page = lru_to_page(&l_inactive); 1326 LRU_BASE + file * LRU_FILE);
1293 prefetchw_prev_lru_page(page, &l_inactive, flags);
1294 VM_BUG_ON(PageLRU(page));
1295 SetPageLRU(page);
1296 VM_BUG_ON(!PageActive(page));
1297 ClearPageActive(page);
1298
1299 list_move(&page->lru, &zone->lru[lru].list);
1300 mem_cgroup_add_lru_list(page, lru);
1301 pgmoved++;
1302 if (!pagevec_add(&pvec, page)) {
1303 spin_unlock_irq(&zone->lru_lock);
1304 if (buffer_heads_over_limit)
1305 pagevec_strip(&pvec);
1306 __pagevec_release(&pvec);
1307 spin_lock_irq(&zone->lru_lock);
1308 }
1309 }
1310 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1311 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1312 __count_vm_events(PGDEACTIVATE, pgmoved);
1313
1314 pgmoved = 0; /* count pages moved back to active list */
1315 lru = LRU_ACTIVE + file * LRU_FILE;
1316 while (!list_empty(&l_active)) {
1317 page = lru_to_page(&l_active);
1318 prefetchw_prev_lru_page(page, &l_active, flags);
1319 VM_BUG_ON(PageLRU(page));
1320 SetPageLRU(page);
1321 VM_BUG_ON(!PageActive(page));
1322
1323 list_move(&page->lru, &zone->lru[lru].list);
1324 mem_cgroup_add_lru_list(page, lru);
1325 pgmoved++;
1326 if (!pagevec_add(&pvec, page)) {
1327 spin_unlock_irq(&zone->lru_lock);
1328 if (buffer_heads_over_limit)
1329 pagevec_strip(&pvec);
1330 __pagevec_release(&pvec);
1331 spin_lock_irq(&zone->lru_lock);
1332 }
1333 }
1334 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1335 1327
1336 spin_unlock_irq(&zone->lru_lock); 1328 spin_unlock_irq(&zone->lru_lock);
1337 if (buffer_heads_over_limit)
1338 pagevec_strip(&pvec);
1339 pagevec_release(&pvec);
1340} 1329}
1341 1330
1342static int inactive_anon_is_low_global(struct zone *zone) 1331static int inactive_anon_is_low_global(struct zone *zone)