aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c52
1 files changed, 45 insertions, 7 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6be2068f61c8..1024979d6589 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1212,6 +1212,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1212 unsigned long pgscanned; 1212 unsigned long pgscanned;
1213 unsigned long vm_flags; 1213 unsigned long vm_flags;
1214 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1214 LIST_HEAD(l_hold); /* The pages which were snipped off */
1215 LIST_HEAD(l_active);
1215 LIST_HEAD(l_inactive); 1216 LIST_HEAD(l_inactive);
1216 struct page *page; 1217 struct page *page;
1217 struct pagevec pvec; 1218 struct pagevec pvec;
@@ -1251,28 +1252,42 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1251 1252
1252 /* page_referenced clears PageReferenced */ 1253 /* page_referenced clears PageReferenced */
1253 if (page_mapping_inuse(page) && 1254 if (page_mapping_inuse(page) &&
1254 page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) 1255 page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1255 pgmoved++; 1256 pgmoved++;
1257 /*
1258 * Identify referenced, file-backed active pages and
1259 * give them one more trip around the active list. So
1260 * that executable code get better chances to stay in
1261 * memory under moderate memory pressure. Anon pages
1262 * are not likely to be evicted by use-once streaming
1263 * IO, plus JVM can create lots of anon VM_EXEC pages,
1264 * so we ignore them here.
1265 */
1266 if ((vm_flags & VM_EXEC) && !PageAnon(page)) {
1267 list_add(&page->lru, &l_active);
1268 continue;
1269 }
1270 }
1256 1271
1257 list_add(&page->lru, &l_inactive); 1272 list_add(&page->lru, &l_inactive);
1258 } 1273 }
1259 1274
1260 /* 1275 /*
1261 * Move the pages to the [file or anon] inactive list. 1276 * Move pages back to the lru list.
1262 */ 1277 */
1263 pagevec_init(&pvec, 1); 1278 pagevec_init(&pvec, 1);
1264 lru = LRU_BASE + file * LRU_FILE;
1265 1279
1266 spin_lock_irq(&zone->lru_lock); 1280 spin_lock_irq(&zone->lru_lock);
1267 /* 1281 /*
1268 * Count referenced pages from currently used mappings as 1282 * Count referenced pages from currently used mappings as rotated,
1269 * rotated, even though they are moved to the inactive list. 1283 * even though only some of them are actually re-activated. This
1270 * This helps balance scan pressure between file and anonymous 1284 * helps balance scan pressure between file and anonymous pages in
1271 * pages in get_scan_ratio. 1285 * get_scan_ratio.
1272 */ 1286 */
1273 reclaim_stat->recent_rotated[!!file] += pgmoved; 1287 reclaim_stat->recent_rotated[!!file] += pgmoved;
1274 1288
1275 pgmoved = 0; /* count pages moved to inactive list */ 1289 pgmoved = 0; /* count pages moved to inactive list */
1290 lru = LRU_BASE + file * LRU_FILE;
1276 while (!list_empty(&l_inactive)) { 1291 while (!list_empty(&l_inactive)) {
1277 page = lru_to_page(&l_inactive); 1292 page = lru_to_page(&l_inactive);
1278 prefetchw_prev_lru_page(page, &l_inactive, flags); 1293 prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1295,6 +1310,29 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1295 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1310 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1296 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1311 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1297 __count_vm_events(PGDEACTIVATE, pgmoved); 1312 __count_vm_events(PGDEACTIVATE, pgmoved);
1313
1314 pgmoved = 0; /* count pages moved back to active list */
1315 lru = LRU_ACTIVE + file * LRU_FILE;
1316 while (!list_empty(&l_active)) {
1317 page = lru_to_page(&l_active);
1318 prefetchw_prev_lru_page(page, &l_active, flags);
1319 VM_BUG_ON(PageLRU(page));
1320 SetPageLRU(page);
1321 VM_BUG_ON(!PageActive(page));
1322
1323 list_move(&page->lru, &zone->lru[lru].list);
1324 mem_cgroup_add_lru_list(page, lru);
1325 pgmoved++;
1326 if (!pagevec_add(&pvec, page)) {
1327 spin_unlock_irq(&zone->lru_lock);
1328 if (buffer_heads_over_limit)
1329 pagevec_strip(&pvec);
1330 __pagevec_release(&pvec);
1331 spin_lock_irq(&zone->lru_lock);
1332 }
1333 }
1334 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1335
1298 spin_unlock_irq(&zone->lru_lock); 1336 spin_unlock_irq(&zone->lru_lock);
1299 if (buffer_heads_over_limit) 1337 if (buffer_heads_over_limit)
1300 pagevec_strip(&pvec); 1338 pagevec_strip(&pvec);