aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c59
1 files changed, 25 insertions, 34 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e21bab4deda6..f7c4f37c3b18 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -52,9 +52,6 @@ typedef enum {
52} pageout_t; 52} pageout_t;
53 53
54struct scan_control { 54struct scan_control {
55 /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */
56 unsigned long nr_to_scan;
57
58 /* Incremented by the number of inactive pages that were scanned */ 55 /* Incremented by the number of inactive pages that were scanned */
59 unsigned long nr_scanned; 56 unsigned long nr_scanned;
60 57
@@ -63,9 +60,6 @@ struct scan_control {
63 60
64 unsigned long nr_mapped; /* From page_state */ 61 unsigned long nr_mapped; /* From page_state */
65 62
66 /* Ask shrink_caches, or shrink_zone to scan at this priority */
67 unsigned int priority;
68
69 /* This context's GFP mask */ 63 /* This context's GFP mask */
70 gfp_t gfp_mask; 64 gfp_t gfp_mask;
71 65
@@ -1112,11 +1106,10 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
1112/* 1106/*
1113 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed 1107 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
1114 */ 1108 */
1115static void shrink_cache(struct zone *zone, struct scan_control *sc) 1109static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *sc)
1116{ 1110{
1117 LIST_HEAD(page_list); 1111 LIST_HEAD(page_list);
1118 struct pagevec pvec; 1112 struct pagevec pvec;
1119 int max_scan = sc->nr_to_scan;
1120 1113
1121 pagevec_init(&pvec, 1); 1114 pagevec_init(&pvec, 1);
1122 1115
@@ -1192,12 +1185,11 @@ done:
1192 * But we had to alter page->flags anyway. 1185 * But we had to alter page->flags anyway.
1193 */ 1186 */
1194static void 1187static void
1195refill_inactive_zone(struct zone *zone, struct scan_control *sc) 1188refill_inactive_zone(int nr_pages, struct zone *zone, struct scan_control *sc)
1196{ 1189{
1197 int pgmoved; 1190 int pgmoved;
1198 int pgdeactivate = 0; 1191 int pgdeactivate = 0;
1199 int pgscanned; 1192 int pgscanned;
1200 int nr_pages = sc->nr_to_scan;
1201 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1193 LIST_HEAD(l_hold); /* The pages which were snipped off */
1202 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ 1194 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
1203 LIST_HEAD(l_active); /* Pages to go onto the active_list */ 1195 LIST_HEAD(l_active); /* Pages to go onto the active_list */
@@ -1332,10 +1324,11 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
1332 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1324 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1333 */ 1325 */
1334static void 1326static void
1335shrink_zone(struct zone *zone, struct scan_control *sc) 1327shrink_zone(int priority, struct zone *zone, struct scan_control *sc)
1336{ 1328{
1337 unsigned long nr_active; 1329 unsigned long nr_active;
1338 unsigned long nr_inactive; 1330 unsigned long nr_inactive;
1331 unsigned long nr_to_scan;
1339 1332
1340 atomic_inc(&zone->reclaim_in_progress); 1333 atomic_inc(&zone->reclaim_in_progress);
1341 1334
@@ -1343,14 +1336,14 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
1343 * Add one to `nr_to_scan' just to make sure that the kernel will 1336 * Add one to `nr_to_scan' just to make sure that the kernel will
1344 * slowly sift through the active list. 1337 * slowly sift through the active list.
1345 */ 1338 */
1346 zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1; 1339 zone->nr_scan_active += (zone->nr_active >> priority) + 1;
1347 nr_active = zone->nr_scan_active; 1340 nr_active = zone->nr_scan_active;
1348 if (nr_active >= sc->swap_cluster_max) 1341 if (nr_active >= sc->swap_cluster_max)
1349 zone->nr_scan_active = 0; 1342 zone->nr_scan_active = 0;
1350 else 1343 else
1351 nr_active = 0; 1344 nr_active = 0;
1352 1345
1353 zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1; 1346 zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1;
1354 nr_inactive = zone->nr_scan_inactive; 1347 nr_inactive = zone->nr_scan_inactive;
1355 if (nr_inactive >= sc->swap_cluster_max) 1348 if (nr_inactive >= sc->swap_cluster_max)
1356 zone->nr_scan_inactive = 0; 1349 zone->nr_scan_inactive = 0;
@@ -1359,17 +1352,17 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
1359 1352
1360 while (nr_active || nr_inactive) { 1353 while (nr_active || nr_inactive) {
1361 if (nr_active) { 1354 if (nr_active) {
1362 sc->nr_to_scan = min(nr_active, 1355 nr_to_scan = min(nr_active,
1363 (unsigned long)sc->swap_cluster_max); 1356 (unsigned long)sc->swap_cluster_max);
1364 nr_active -= sc->nr_to_scan; 1357 nr_active -= nr_to_scan;
1365 refill_inactive_zone(zone, sc); 1358 refill_inactive_zone(nr_to_scan, zone, sc);
1366 } 1359 }
1367 1360
1368 if (nr_inactive) { 1361 if (nr_inactive) {
1369 sc->nr_to_scan = min(nr_inactive, 1362 nr_to_scan = min(nr_inactive,
1370 (unsigned long)sc->swap_cluster_max); 1363 (unsigned long)sc->swap_cluster_max);
1371 nr_inactive -= sc->nr_to_scan; 1364 nr_inactive -= nr_to_scan;
1372 shrink_cache(zone, sc); 1365 shrink_cache(nr_to_scan, zone, sc);
1373 } 1366 }
1374 } 1367 }
1375 1368
@@ -1395,7 +1388,7 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
1395 * scan then give up on it. 1388 * scan then give up on it.
1396 */ 1389 */
1397static void 1390static void
1398shrink_caches(struct zone **zones, struct scan_control *sc) 1391shrink_caches(int priority, struct zone **zones, struct scan_control *sc)
1399{ 1392{
1400 int i; 1393 int i;
1401 1394
@@ -1408,14 +1401,14 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
1408 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1401 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1409 continue; 1402 continue;
1410 1403
1411 zone->temp_priority = sc->priority; 1404 zone->temp_priority = priority;
1412 if (zone->prev_priority > sc->priority) 1405 if (zone->prev_priority > priority)
1413 zone->prev_priority = sc->priority; 1406 zone->prev_priority = priority;
1414 1407
1415 if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) 1408 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1416 continue; /* Let kswapd poll it */ 1409 continue; /* Let kswapd poll it */
1417 1410
1418 shrink_zone(zone, sc); 1411 shrink_zone(priority, zone, sc);
1419 } 1412 }
1420} 1413}
1421 1414
@@ -1462,11 +1455,10 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
1462 sc.nr_mapped = read_page_state(nr_mapped); 1455 sc.nr_mapped = read_page_state(nr_mapped);
1463 sc.nr_scanned = 0; 1456 sc.nr_scanned = 0;
1464 sc.nr_reclaimed = 0; 1457 sc.nr_reclaimed = 0;
1465 sc.priority = priority;
1466 sc.swap_cluster_max = SWAP_CLUSTER_MAX; 1458 sc.swap_cluster_max = SWAP_CLUSTER_MAX;
1467 if (!priority) 1459 if (!priority)
1468 disable_swap_token(); 1460 disable_swap_token();
1469 shrink_caches(zones, &sc); 1461 shrink_caches(priority, zones, &sc);
1470 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); 1462 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
1471 if (reclaim_state) { 1463 if (reclaim_state) {
1472 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 1464 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -1629,9 +1621,8 @@ scan:
1629 zone->prev_priority = priority; 1621 zone->prev_priority = priority;
1630 sc.nr_scanned = 0; 1622 sc.nr_scanned = 0;
1631 sc.nr_reclaimed = 0; 1623 sc.nr_reclaimed = 0;
1632 sc.priority = priority;
1633 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; 1624 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
1634 shrink_zone(zone, &sc); 1625 shrink_zone(priority, zone, &sc);
1635 reclaim_state->reclaimed_slab = 0; 1626 reclaim_state->reclaimed_slab = 0;
1636 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 1627 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1637 lru_pages); 1628 lru_pages);
@@ -1886,6 +1877,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1886 struct scan_control sc; 1877 struct scan_control sc;
1887 cpumask_t mask; 1878 cpumask_t mask;
1888 int node_id; 1879 int node_id;
1880 int priority;
1889 1881
1890 if (time_before(jiffies, 1882 if (time_before(jiffies,
1891 zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) 1883 zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval))
@@ -1906,7 +1898,6 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1906 sc.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP); 1898 sc.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP);
1907 sc.nr_scanned = 0; 1899 sc.nr_scanned = 0;
1908 sc.nr_reclaimed = 0; 1900 sc.nr_reclaimed = 0;
1909 sc.priority = ZONE_RECLAIM_PRIORITY + 1;
1910 sc.nr_mapped = read_page_state(nr_mapped); 1901 sc.nr_mapped = read_page_state(nr_mapped);
1911 sc.gfp_mask = gfp_mask; 1902 sc.gfp_mask = gfp_mask;
1912 1903
@@ -1932,11 +1923,11 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1932 * Free memory by calling shrink zone with increasing priorities 1923 * Free memory by calling shrink zone with increasing priorities
1933 * until we have enough memory freed. 1924 * until we have enough memory freed.
1934 */ 1925 */
1926 priority = ZONE_RECLAIM_PRIORITY;
1935 do { 1927 do {
1936 sc.priority--; 1928 shrink_zone(priority, zone, &sc);
1937 shrink_zone(zone, &sc); 1929 priority--;
1938 1930 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
1939 } while (sc.nr_reclaimed < nr_pages && sc.priority > 0);
1940 1931
1941 if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { 1932 if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) {
1942 /* 1933 /*