aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c197
1 files changed, 125 insertions, 72 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b07c48b09a93..9a27c44aa327 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -125,11 +125,30 @@ static LIST_HEAD(shrinker_list);
125static DECLARE_RWSEM(shrinker_rwsem); 125static DECLARE_RWSEM(shrinker_rwsem);
126 126
127#ifdef CONFIG_CGROUP_MEM_RES_CTLR 127#ifdef CONFIG_CGROUP_MEM_RES_CTLR
128#define scan_global_lru(sc) (!(sc)->mem_cgroup) 128#define scanning_global_lru(sc) (!(sc)->mem_cgroup)
129#else 129#else
130#define scan_global_lru(sc) (1) 130#define scanning_global_lru(sc) (1)
131#endif 131#endif
132 132
133static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
134 struct scan_control *sc)
135{
136 if (!scanning_global_lru(sc))
137 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
138
139 return &zone->reclaim_stat;
140}
141
142static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
143 enum lru_list lru)
144{
145 if (!scanning_global_lru(sc))
146 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
147
148 return zone_page_state(zone, NR_LRU_BASE + lru);
149}
150
151
133/* 152/*
134 * Add a shrinker callback to be called from the vm 153 * Add a shrinker callback to be called from the vm
135 */ 154 */
@@ -512,7 +531,6 @@ redo:
512 lru = LRU_UNEVICTABLE; 531 lru = LRU_UNEVICTABLE;
513 add_page_to_unevictable_list(page); 532 add_page_to_unevictable_list(page);
514 } 533 }
515 mem_cgroup_move_lists(page, lru);
516 534
517 /* 535 /*
518 * page's status can change while we move it among lru. If an evictable 536 * page's status can change while we move it among lru. If an evictable
@@ -547,7 +565,6 @@ void putback_lru_page(struct page *page)
547 565
548 lru = !!TestClearPageActive(page) + page_is_file_cache(page); 566 lru = !!TestClearPageActive(page) + page_is_file_cache(page);
549 lru_cache_add_lru(page, lru); 567 lru_cache_add_lru(page, lru);
550 mem_cgroup_move_lists(page, lru);
551 put_page(page); 568 put_page(page);
552} 569}
553#endif /* CONFIG_UNEVICTABLE_LRU */ 570#endif /* CONFIG_UNEVICTABLE_LRU */
@@ -813,6 +830,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
813 return ret; 830 return ret;
814 831
815 ret = -EBUSY; 832 ret = -EBUSY;
833
816 if (likely(get_page_unless_zero(page))) { 834 if (likely(get_page_unless_zero(page))) {
817 /* 835 /*
818 * Be careful not to clear PageLRU until after we're 836 * Be careful not to clear PageLRU until after we're
@@ -821,6 +839,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
821 */ 839 */
822 ClearPageLRU(page); 840 ClearPageLRU(page);
823 ret = 0; 841 ret = 0;
842 mem_cgroup_del_lru(page);
824 } 843 }
825 844
826 return ret; 845 return ret;
@@ -1029,6 +1048,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1029 struct pagevec pvec; 1048 struct pagevec pvec;
1030 unsigned long nr_scanned = 0; 1049 unsigned long nr_scanned = 0;
1031 unsigned long nr_reclaimed = 0; 1050 unsigned long nr_reclaimed = 0;
1051 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1032 1052
1033 pagevec_init(&pvec, 1); 1053 pagevec_init(&pvec, 1);
1034 1054
@@ -1070,13 +1090,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1070 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1090 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1071 -count[LRU_INACTIVE_ANON]); 1091 -count[LRU_INACTIVE_ANON]);
1072 1092
1073 if (scan_global_lru(sc)) { 1093 if (scanning_global_lru(sc))
1074 zone->pages_scanned += nr_scan; 1094 zone->pages_scanned += nr_scan;
1075 zone->recent_scanned[0] += count[LRU_INACTIVE_ANON]; 1095
1076 zone->recent_scanned[0] += count[LRU_ACTIVE_ANON]; 1096 reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
1077 zone->recent_scanned[1] += count[LRU_INACTIVE_FILE]; 1097 reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
1078 zone->recent_scanned[1] += count[LRU_ACTIVE_FILE]; 1098 reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
1079 } 1099 reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
1100
1080 spin_unlock_irq(&zone->lru_lock); 1101 spin_unlock_irq(&zone->lru_lock);
1081 1102
1082 nr_scanned += nr_scan; 1103 nr_scanned += nr_scan;
@@ -1108,7 +1129,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1108 if (current_is_kswapd()) { 1129 if (current_is_kswapd()) {
1109 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); 1130 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
1110 __count_vm_events(KSWAPD_STEAL, nr_freed); 1131 __count_vm_events(KSWAPD_STEAL, nr_freed);
1111 } else if (scan_global_lru(sc)) 1132 } else if (scanning_global_lru(sc))
1112 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); 1133 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
1113 1134
1114 __count_zone_vm_events(PGSTEAL, zone, nr_freed); 1135 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
@@ -1134,10 +1155,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1134 SetPageLRU(page); 1155 SetPageLRU(page);
1135 lru = page_lru(page); 1156 lru = page_lru(page);
1136 add_page_to_lru_list(zone, page, lru); 1157 add_page_to_lru_list(zone, page, lru);
1137 mem_cgroup_move_lists(page, lru); 1158 if (PageActive(page)) {
1138 if (PageActive(page) && scan_global_lru(sc)) {
1139 int file = !!page_is_file_cache(page); 1159 int file = !!page_is_file_cache(page);
1140 zone->recent_rotated[file]++; 1160 reclaim_stat->recent_rotated[file]++;
1141 } 1161 }
1142 if (!pagevec_add(&pvec, page)) { 1162 if (!pagevec_add(&pvec, page)) {
1143 spin_unlock_irq(&zone->lru_lock); 1163 spin_unlock_irq(&zone->lru_lock);
@@ -1197,6 +1217,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1197 struct page *page; 1217 struct page *page;
1198 struct pagevec pvec; 1218 struct pagevec pvec;
1199 enum lru_list lru; 1219 enum lru_list lru;
1220 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1200 1221
1201 lru_add_drain(); 1222 lru_add_drain();
1202 spin_lock_irq(&zone->lru_lock); 1223 spin_lock_irq(&zone->lru_lock);
@@ -1207,10 +1228,10 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1207 * zone->pages_scanned is used for detect zone's oom 1228 * zone->pages_scanned is used for detect zone's oom
1208 * mem_cgroup remembers nr_scan by itself. 1229 * mem_cgroup remembers nr_scan by itself.
1209 */ 1230 */
1210 if (scan_global_lru(sc)) { 1231 if (scanning_global_lru(sc)) {
1211 zone->pages_scanned += pgscanned; 1232 zone->pages_scanned += pgscanned;
1212 zone->recent_scanned[!!file] += pgmoved;
1213 } 1233 }
1234 reclaim_stat->recent_scanned[!!file] += pgmoved;
1214 1235
1215 if (file) 1236 if (file)
1216 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); 1237 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
@@ -1251,8 +1272,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1251 * This helps balance scan pressure between file and anonymous 1272 * This helps balance scan pressure between file and anonymous
1252 * pages in get_scan_ratio. 1273 * pages in get_scan_ratio.
1253 */ 1274 */
1254 if (scan_global_lru(sc)) 1275 reclaim_stat->recent_rotated[!!file] += pgmoved;
1255 zone->recent_rotated[!!file] += pgmoved;
1256 1276
1257 while (!list_empty(&l_inactive)) { 1277 while (!list_empty(&l_inactive)) {
1258 page = lru_to_page(&l_inactive); 1278 page = lru_to_page(&l_inactive);
@@ -1263,7 +1283,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1263 ClearPageActive(page); 1283 ClearPageActive(page);
1264 1284
1265 list_move(&page->lru, &zone->lru[lru].list); 1285 list_move(&page->lru, &zone->lru[lru].list);
1266 mem_cgroup_move_lists(page, lru); 1286 mem_cgroup_add_lru_list(page, lru);
1267 pgmoved++; 1287 pgmoved++;
1268 if (!pagevec_add(&pvec, page)) { 1288 if (!pagevec_add(&pvec, page)) {
1269 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1289 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
@@ -1292,6 +1312,38 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1292 pagevec_release(&pvec); 1312 pagevec_release(&pvec);
1293} 1313}
1294 1314
1315static int inactive_anon_is_low_global(struct zone *zone)
1316{
1317 unsigned long active, inactive;
1318
1319 active = zone_page_state(zone, NR_ACTIVE_ANON);
1320 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1321
1322 if (inactive * zone->inactive_ratio < active)
1323 return 1;
1324
1325 return 0;
1326}
1327
1328/**
1329 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1330 * @zone: zone to check
1331 * @sc: scan control of this context
1332 *
1333 * Returns true if the zone does not have enough inactive anon pages,
1334 * meaning some active anon pages need to be deactivated.
1335 */
1336static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1337{
1338 int low;
1339
1340 if (scanning_global_lru(sc))
1341 low = inactive_anon_is_low_global(zone);
1342 else
1343 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1344 return low;
1345}
1346
1295static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1347static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1296 struct zone *zone, struct scan_control *sc, int priority) 1348 struct zone *zone, struct scan_control *sc, int priority)
1297{ 1349{
@@ -1302,8 +1354,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1302 return 0; 1354 return 0;
1303 } 1355 }
1304 1356
1305 if (lru == LRU_ACTIVE_ANON && 1357 if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
1306 (!scan_global_lru(sc) || inactive_anon_is_low(zone))) {
1307 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1358 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1308 return 0; 1359 return 0;
1309 } 1360 }
@@ -1325,6 +1376,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1325 unsigned long anon, file, free; 1376 unsigned long anon, file, free;
1326 unsigned long anon_prio, file_prio; 1377 unsigned long anon_prio, file_prio;
1327 unsigned long ap, fp; 1378 unsigned long ap, fp;
1379 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1328 1380
1329 /* If we have no swap space, do not bother scanning anon pages. */ 1381 /* If we have no swap space, do not bother scanning anon pages. */
1330 if (nr_swap_pages <= 0) { 1382 if (nr_swap_pages <= 0) {
@@ -1333,17 +1385,20 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1333 return; 1385 return;
1334 } 1386 }
1335 1387
1336 anon = zone_page_state(zone, NR_ACTIVE_ANON) + 1388 anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
1337 zone_page_state(zone, NR_INACTIVE_ANON); 1389 zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
1338 file = zone_page_state(zone, NR_ACTIVE_FILE) + 1390 file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
1339 zone_page_state(zone, NR_INACTIVE_FILE); 1391 zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
1340 free = zone_page_state(zone, NR_FREE_PAGES); 1392
1341 1393 if (scanning_global_lru(sc)) {
1342 /* If we have very few page cache pages, force-scan anon pages. */ 1394 free = zone_page_state(zone, NR_FREE_PAGES);
1343 if (unlikely(file + free <= zone->pages_high)) { 1395 /* If we have very few page cache pages,
1344 percent[0] = 100; 1396 force-scan anon pages. */
1345 percent[1] = 0; 1397 if (unlikely(file + free <= zone->pages_high)) {
1346 return; 1398 percent[0] = 100;
1399 percent[1] = 0;
1400 return;
1401 }
1347 } 1402 }
1348 1403
1349 /* 1404 /*
@@ -1357,17 +1412,17 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1357 * 1412 *
1358 * anon in [0], file in [1] 1413 * anon in [0], file in [1]
1359 */ 1414 */
1360 if (unlikely(zone->recent_scanned[0] > anon / 4)) { 1415 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1361 spin_lock_irq(&zone->lru_lock); 1416 spin_lock_irq(&zone->lru_lock);
1362 zone->recent_scanned[0] /= 2; 1417 reclaim_stat->recent_scanned[0] /= 2;
1363 zone->recent_rotated[0] /= 2; 1418 reclaim_stat->recent_rotated[0] /= 2;
1364 spin_unlock_irq(&zone->lru_lock); 1419 spin_unlock_irq(&zone->lru_lock);
1365 } 1420 }
1366 1421
1367 if (unlikely(zone->recent_scanned[1] > file / 4)) { 1422 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1368 spin_lock_irq(&zone->lru_lock); 1423 spin_lock_irq(&zone->lru_lock);
1369 zone->recent_scanned[1] /= 2; 1424 reclaim_stat->recent_scanned[1] /= 2;
1370 zone->recent_rotated[1] /= 2; 1425 reclaim_stat->recent_rotated[1] /= 2;
1371 spin_unlock_irq(&zone->lru_lock); 1426 spin_unlock_irq(&zone->lru_lock);
1372 } 1427 }
1373 1428
@@ -1383,11 +1438,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1383 * proportional to the fraction of recently scanned pages on 1438 * proportional to the fraction of recently scanned pages on
1384 * each list that were recently referenced and in active use. 1439 * each list that were recently referenced and in active use.
1385 */ 1440 */
1386 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); 1441 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1387 ap /= zone->recent_rotated[0] + 1; 1442 ap /= reclaim_stat->recent_rotated[0] + 1;
1388 1443
1389 fp = (file_prio + 1) * (zone->recent_scanned[1] + 1); 1444 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1390 fp /= zone->recent_rotated[1] + 1; 1445 fp /= reclaim_stat->recent_rotated[1] + 1;
1391 1446
1392 /* Normalize to percentages */ 1447 /* Normalize to percentages */
1393 percent[0] = 100 * ap / (ap + fp + 1); 1448 percent[0] = 100 * ap / (ap + fp + 1);
@@ -1411,30 +1466,23 @@ static void shrink_zone(int priority, struct zone *zone,
1411 get_scan_ratio(zone, sc, percent); 1466 get_scan_ratio(zone, sc, percent);
1412 1467
1413 for_each_evictable_lru(l) { 1468 for_each_evictable_lru(l) {
1414 if (scan_global_lru(sc)) { 1469 int file = is_file_lru(l);
1415 int file = is_file_lru(l); 1470 int scan;
1416 int scan; 1471
1417 1472 scan = zone_page_state(zone, NR_LRU_BASE + l);
1418 scan = zone_page_state(zone, NR_LRU_BASE + l); 1473 if (priority) {
1419 if (priority) { 1474 scan >>= priority;
1420 scan >>= priority; 1475 scan = (scan * percent[file]) / 100;
1421 scan = (scan * percent[file]) / 100; 1476 }
1422 } 1477 if (scanning_global_lru(sc)) {
1423 zone->lru[l].nr_scan += scan; 1478 zone->lru[l].nr_scan += scan;
1424 nr[l] = zone->lru[l].nr_scan; 1479 nr[l] = zone->lru[l].nr_scan;
1425 if (nr[l] >= swap_cluster_max) 1480 if (nr[l] >= swap_cluster_max)
1426 zone->lru[l].nr_scan = 0; 1481 zone->lru[l].nr_scan = 0;
1427 else 1482 else
1428 nr[l] = 0; 1483 nr[l] = 0;
1429 } else { 1484 } else
1430 /* 1485 nr[l] = scan;
1431 * This reclaim occurs not because zone memory shortage
1432 * but because memory controller hits its limit.
1433 * Don't modify zone reclaim related data.
1434 */
1435 nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
1436 priority, l);
1437 }
1438 } 1486 }
1439 1487
1440 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1488 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -1467,9 +1515,7 @@ static void shrink_zone(int priority, struct zone *zone,
1467 * Even if we did not try to evict anon pages at all, we want to 1515 * Even if we did not try to evict anon pages at all, we want to
1468 * rebalance the anon lru active/inactive ratio. 1516 * rebalance the anon lru active/inactive ratio.
1469 */ 1517 */
1470 if (!scan_global_lru(sc) || inactive_anon_is_low(zone)) 1518 if (inactive_anon_is_low(zone, sc))
1471 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1472 else if (!scan_global_lru(sc))
1473 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 1519 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1474 1520
1475 throttle_vm_writeout(sc->gfp_mask); 1521 throttle_vm_writeout(sc->gfp_mask);
@@ -1504,7 +1550,7 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1504 * Take care memory controller reclaiming has small influence 1550 * Take care memory controller reclaiming has small influence
1505 * to global LRU. 1551 * to global LRU.
1506 */ 1552 */
1507 if (scan_global_lru(sc)) { 1553 if (scanning_global_lru(sc)) {
1508 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1554 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1509 continue; 1555 continue;
1510 note_zone_scanning_priority(zone, priority); 1556 note_zone_scanning_priority(zone, priority);
@@ -1557,12 +1603,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1557 1603
1558 delayacct_freepages_start(); 1604 delayacct_freepages_start();
1559 1605
1560 if (scan_global_lru(sc)) 1606 if (scanning_global_lru(sc))
1561 count_vm_event(ALLOCSTALL); 1607 count_vm_event(ALLOCSTALL);
1562 /* 1608 /*
1563 * mem_cgroup will not do shrink_slab. 1609 * mem_cgroup will not do shrink_slab.
1564 */ 1610 */
1565 if (scan_global_lru(sc)) { 1611 if (scanning_global_lru(sc)) {
1566 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1612 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1567 1613
1568 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1614 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1581,7 +1627,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1581 * Don't shrink slabs when reclaiming memory from 1627 * Don't shrink slabs when reclaiming memory from
1582 * over limit cgroups 1628 * over limit cgroups
1583 */ 1629 */
1584 if (scan_global_lru(sc)) { 1630 if (scanning_global_lru(sc)) {
1585 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); 1631 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1586 if (reclaim_state) { 1632 if (reclaim_state) {
1587 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 1633 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -1612,7 +1658,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1612 congestion_wait(WRITE, HZ/10); 1658 congestion_wait(WRITE, HZ/10);
1613 } 1659 }
1614 /* top priority shrink_zones still had more to do? don't OOM, then */ 1660 /* top priority shrink_zones still had more to do? don't OOM, then */
1615 if (!sc->all_unreclaimable && scan_global_lru(sc)) 1661 if (!sc->all_unreclaimable && scanning_global_lru(sc))
1616 ret = sc->nr_reclaimed; 1662 ret = sc->nr_reclaimed;
1617out: 1663out:
1618 /* 1664 /*
@@ -1625,7 +1671,7 @@ out:
1625 if (priority < 0) 1671 if (priority < 0)
1626 priority = 0; 1672 priority = 0;
1627 1673
1628 if (scan_global_lru(sc)) { 1674 if (scanning_global_lru(sc)) {
1629 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1675 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1630 1676
1631 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1677 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1661,19 +1707,24 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1661#ifdef CONFIG_CGROUP_MEM_RES_CTLR 1707#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1662 1708
1663unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 1709unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1664 gfp_t gfp_mask) 1710 gfp_t gfp_mask,
1711 bool noswap,
1712 unsigned int swappiness)
1665{ 1713{
1666 struct scan_control sc = { 1714 struct scan_control sc = {
1667 .may_writepage = !laptop_mode, 1715 .may_writepage = !laptop_mode,
1668 .may_swap = 1, 1716 .may_swap = 1,
1669 .swap_cluster_max = SWAP_CLUSTER_MAX, 1717 .swap_cluster_max = SWAP_CLUSTER_MAX,
1670 .swappiness = vm_swappiness, 1718 .swappiness = swappiness,
1671 .order = 0, 1719 .order = 0,
1672 .mem_cgroup = mem_cont, 1720 .mem_cgroup = mem_cont,
1673 .isolate_pages = mem_cgroup_isolate_pages, 1721 .isolate_pages = mem_cgroup_isolate_pages,
1674 }; 1722 };
1675 struct zonelist *zonelist; 1723 struct zonelist *zonelist;
1676 1724
1725 if (noswap)
1726 sc.may_swap = 0;
1727
1677 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 1728 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1678 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 1729 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1679 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 1730 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
@@ -1761,7 +1812,7 @@ loop_again:
1761 * Do some background aging of the anon list, to give 1812 * Do some background aging of the anon list, to give
1762 * pages a chance to be referenced before reclaiming. 1813 * pages a chance to be referenced before reclaiming.
1763 */ 1814 */
1764 if (inactive_anon_is_low(zone)) 1815 if (inactive_anon_is_low(zone, &sc))
1765 shrink_active_list(SWAP_CLUSTER_MAX, zone, 1816 shrink_active_list(SWAP_CLUSTER_MAX, zone,
1766 &sc, priority, 0); 1817 &sc, priority, 0);
1767 1818
@@ -2404,6 +2455,7 @@ retry:
2404 2455
2405 __dec_zone_state(zone, NR_UNEVICTABLE); 2456 __dec_zone_state(zone, NR_UNEVICTABLE);
2406 list_move(&page->lru, &zone->lru[l].list); 2457 list_move(&page->lru, &zone->lru[l].list);
2458 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
2407 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 2459 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
2408 __count_vm_event(UNEVICTABLE_PGRESCUED); 2460 __count_vm_event(UNEVICTABLE_PGRESCUED);
2409 } else { 2461 } else {
@@ -2412,6 +2464,7 @@ retry:
2412 */ 2464 */
2413 SetPageUnevictable(page); 2465 SetPageUnevictable(page);
2414 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); 2466 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
2467 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
2415 if (page_evictable(page, NULL)) 2468 if (page_evictable(page, NULL))
2416 goto retry; 2469 goto retry;
2417 } 2470 }