diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 64 |
1 files changed, 30 insertions, 34 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 4046434046e6..eceac9f9032f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1246,17 +1246,16 @@ static unsigned long shrink_zone(int priority, struct zone *zone, | |||
1246 | * If a zone is deemed to be full of pinned pages then just give it a light | 1246 | * If a zone is deemed to be full of pinned pages then just give it a light |
1247 | * scan then give up on it. | 1247 | * scan then give up on it. |
1248 | */ | 1248 | */ |
1249 | static unsigned long shrink_zones(int priority, struct zone **zones, | 1249 | static unsigned long shrink_zones(int priority, struct zonelist *zonelist, |
1250 | struct scan_control *sc) | 1250 | struct scan_control *sc) |
1251 | { | 1251 | { |
1252 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); | ||
1252 | unsigned long nr_reclaimed = 0; | 1253 | unsigned long nr_reclaimed = 0; |
1253 | int i; | 1254 | struct zoneref *z; |
1254 | 1255 | struct zone *zone; | |
1255 | 1256 | ||
1256 | sc->all_unreclaimable = 1; | 1257 | sc->all_unreclaimable = 1; |
1257 | for (i = 0; zones[i] != NULL; i++) { | 1258 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { |
1258 | struct zone *zone = zones[i]; | ||
1259 | |||
1260 | if (!populated_zone(zone)) | 1259 | if (!populated_zone(zone)) |
1261 | continue; | 1260 | continue; |
1262 | /* | 1261 | /* |
@@ -1301,8 +1300,8 @@ static unsigned long shrink_zones(int priority, struct zone **zones, | |||
1301 | * holds filesystem locks which prevent writeout this might not work, and the | 1300 | * holds filesystem locks which prevent writeout this might not work, and the |
1302 | * allocation attempt will fail. | 1301 | * allocation attempt will fail. |
1303 | */ | 1302 | */ |
1304 | static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, | 1303 | static unsigned long do_try_to_free_pages(struct zonelist *zonelist, |
1305 | struct scan_control *sc) | 1304 | struct scan_control *sc) |
1306 | { | 1305 | { |
1307 | int priority; | 1306 | int priority; |
1308 | int ret = 0; | 1307 | int ret = 0; |
@@ -1310,7 +1309,9 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, | |||
1310 | unsigned long nr_reclaimed = 0; | 1309 | unsigned long nr_reclaimed = 0; |
1311 | struct reclaim_state *reclaim_state = current->reclaim_state; | 1310 | struct reclaim_state *reclaim_state = current->reclaim_state; |
1312 | unsigned long lru_pages = 0; | 1311 | unsigned long lru_pages = 0; |
1313 | int i; | 1312 | struct zoneref *z; |
1313 | struct zone *zone; | ||
1314 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); | ||
1314 | 1315 | ||
1315 | if (scan_global_lru(sc)) | 1316 | if (scan_global_lru(sc)) |
1316 | count_vm_event(ALLOCSTALL); | 1317 | count_vm_event(ALLOCSTALL); |
@@ -1318,8 +1319,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, | |||
1318 | * mem_cgroup will not do shrink_slab. | 1319 | * mem_cgroup will not do shrink_slab. |
1319 | */ | 1320 | */ |
1320 | if (scan_global_lru(sc)) { | 1321 | if (scan_global_lru(sc)) { |
1321 | for (i = 0; zones[i] != NULL; i++) { | 1322 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { |
1322 | struct zone *zone = zones[i]; | ||
1323 | 1323 | ||
1324 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 1324 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) |
1325 | continue; | 1325 | continue; |
@@ -1333,13 +1333,13 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, | |||
1333 | sc->nr_scanned = 0; | 1333 | sc->nr_scanned = 0; |
1334 | if (!priority) | 1334 | if (!priority) |
1335 | disable_swap_token(); | 1335 | disable_swap_token(); |
1336 | nr_reclaimed += shrink_zones(priority, zones, sc); | 1336 | nr_reclaimed += shrink_zones(priority, zonelist, sc); |
1337 | /* | 1337 | /* |
1338 | * Don't shrink slabs when reclaiming memory from | 1338 | * Don't shrink slabs when reclaiming memory from |
1339 | * over limit cgroups | 1339 | * over limit cgroups |
1340 | */ | 1340 | */ |
1341 | if (scan_global_lru(sc)) { | 1341 | if (scan_global_lru(sc)) { |
1342 | shrink_slab(sc->nr_scanned, gfp_mask, lru_pages); | 1342 | shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); |
1343 | if (reclaim_state) { | 1343 | if (reclaim_state) { |
1344 | nr_reclaimed += reclaim_state->reclaimed_slab; | 1344 | nr_reclaimed += reclaim_state->reclaimed_slab; |
1345 | reclaim_state->reclaimed_slab = 0; | 1345 | reclaim_state->reclaimed_slab = 0; |
@@ -1383,8 +1383,7 @@ out: | |||
1383 | priority = 0; | 1383 | priority = 0; |
1384 | 1384 | ||
1385 | if (scan_global_lru(sc)) { | 1385 | if (scan_global_lru(sc)) { |
1386 | for (i = 0; zones[i] != NULL; i++) { | 1386 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { |
1387 | struct zone *zone = zones[i]; | ||
1388 | 1387 | ||
1389 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 1388 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) |
1390 | continue; | 1389 | continue; |
@@ -1397,7 +1396,8 @@ out: | |||
1397 | return ret; | 1396 | return ret; |
1398 | } | 1397 | } |
1399 | 1398 | ||
1400 | unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask) | 1399 | unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
1400 | gfp_t gfp_mask) | ||
1401 | { | 1401 | { |
1402 | struct scan_control sc = { | 1402 | struct scan_control sc = { |
1403 | .gfp_mask = gfp_mask, | 1403 | .gfp_mask = gfp_mask, |
@@ -1410,7 +1410,7 @@ unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask) | |||
1410 | .isolate_pages = isolate_pages_global, | 1410 | .isolate_pages = isolate_pages_global, |
1411 | }; | 1411 | }; |
1412 | 1412 | ||
1413 | return do_try_to_free_pages(zones, gfp_mask, &sc); | 1413 | return do_try_to_free_pages(zonelist, &sc); |
1414 | } | 1414 | } |
1415 | 1415 | ||
1416 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 1416 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
@@ -1419,7 +1419,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
1419 | gfp_t gfp_mask) | 1419 | gfp_t gfp_mask) |
1420 | { | 1420 | { |
1421 | struct scan_control sc = { | 1421 | struct scan_control sc = { |
1422 | .gfp_mask = gfp_mask, | ||
1423 | .may_writepage = !laptop_mode, | 1422 | .may_writepage = !laptop_mode, |
1424 | .may_swap = 1, | 1423 | .may_swap = 1, |
1425 | .swap_cluster_max = SWAP_CLUSTER_MAX, | 1424 | .swap_cluster_max = SWAP_CLUSTER_MAX, |
@@ -1428,13 +1427,12 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
1428 | .mem_cgroup = mem_cont, | 1427 | .mem_cgroup = mem_cont, |
1429 | .isolate_pages = mem_cgroup_isolate_pages, | 1428 | .isolate_pages = mem_cgroup_isolate_pages, |
1430 | }; | 1429 | }; |
1431 | struct zone **zones; | 1430 | struct zonelist *zonelist; |
1432 | int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE); | ||
1433 | 1431 | ||
1434 | zones = NODE_DATA(numa_node_id())->node_zonelists[target_zone].zones; | 1432 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
1435 | if (do_try_to_free_pages(zones, sc.gfp_mask, &sc)) | 1433 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); |
1436 | return 1; | 1434 | zonelist = NODE_DATA(numa_node_id())->node_zonelists; |
1437 | return 0; | 1435 | return do_try_to_free_pages(zonelist, &sc); |
1438 | } | 1436 | } |
1439 | #endif | 1437 | #endif |
1440 | 1438 | ||
@@ -1647,11 +1645,10 @@ static int kswapd(void *p) | |||
1647 | struct reclaim_state reclaim_state = { | 1645 | struct reclaim_state reclaim_state = { |
1648 | .reclaimed_slab = 0, | 1646 | .reclaimed_slab = 0, |
1649 | }; | 1647 | }; |
1650 | cpumask_t cpumask; | 1648 | node_to_cpumask_ptr(cpumask, pgdat->node_id); |
1651 | 1649 | ||
1652 | cpumask = node_to_cpumask(pgdat->node_id); | 1650 | if (!cpus_empty(*cpumask)) |
1653 | if (!cpus_empty(cpumask)) | 1651 | set_cpus_allowed_ptr(tsk, cpumask); |
1654 | set_cpus_allowed(tsk, cpumask); | ||
1655 | current->reclaim_state = &reclaim_state; | 1652 | current->reclaim_state = &reclaim_state; |
1656 | 1653 | ||
1657 | /* | 1654 | /* |
@@ -1880,17 +1877,16 @@ out: | |||
1880 | static int __devinit cpu_callback(struct notifier_block *nfb, | 1877 | static int __devinit cpu_callback(struct notifier_block *nfb, |
1881 | unsigned long action, void *hcpu) | 1878 | unsigned long action, void *hcpu) |
1882 | { | 1879 | { |
1883 | pg_data_t *pgdat; | ||
1884 | cpumask_t mask; | ||
1885 | int nid; | 1880 | int nid; |
1886 | 1881 | ||
1887 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { | 1882 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { |
1888 | for_each_node_state(nid, N_HIGH_MEMORY) { | 1883 | for_each_node_state(nid, N_HIGH_MEMORY) { |
1889 | pgdat = NODE_DATA(nid); | 1884 | pg_data_t *pgdat = NODE_DATA(nid); |
1890 | mask = node_to_cpumask(pgdat->node_id); | 1885 | node_to_cpumask_ptr(mask, pgdat->node_id); |
1891 | if (any_online_cpu(mask) != NR_CPUS) | 1886 | |
1887 | if (any_online_cpu(*mask) < nr_cpu_ids) | ||
1892 | /* One of our CPUs online: restore mask */ | 1888 | /* One of our CPUs online: restore mask */ |
1893 | set_cpus_allowed(pgdat->kswapd, mask); | 1889 | set_cpus_allowed_ptr(pgdat->kswapd, mask); |
1894 | } | 1890 | } |
1895 | } | 1891 | } |
1896 | return NOTIFY_OK; | 1892 | return NOTIFY_OK; |