aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 093f5fe6dd77..40fea4918390 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -692,7 +692,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
692 __count_vm_events(KSWAPD_STEAL, nr_freed); 692 __count_vm_events(KSWAPD_STEAL, nr_freed);
693 } else 693 } else
694 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); 694 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
695 __count_vm_events(PGACTIVATE, nr_freed); 695 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
696 696
697 if (nr_taken == 0) 697 if (nr_taken == 0)
698 goto done; 698 goto done;
@@ -984,7 +984,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
984 if (!populated_zone(zone)) 984 if (!populated_zone(zone))
985 continue; 985 continue;
986 986
987 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 987 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
988 continue; 988 continue;
989 989
990 note_zone_scanning_priority(zone, priority); 990 note_zone_scanning_priority(zone, priority);
@@ -1034,7 +1034,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
1034 for (i = 0; zones[i] != NULL; i++) { 1034 for (i = 0; zones[i] != NULL; i++) {
1035 struct zone *zone = zones[i]; 1035 struct zone *zone = zones[i];
1036 1036
1037 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1037 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1038 continue; 1038 continue;
1039 1039
1040 lru_pages += zone->nr_active + zone->nr_inactive; 1040 lru_pages += zone->nr_active + zone->nr_inactive;
@@ -1089,7 +1089,7 @@ out:
1089 for (i = 0; zones[i] != 0; i++) { 1089 for (i = 0; zones[i] != 0; i++) {
1090 struct zone *zone = zones[i]; 1090 struct zone *zone = zones[i];
1091 1091
1092 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1092 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1093 continue; 1093 continue;
1094 1094
1095 zone->prev_priority = priority; 1095 zone->prev_priority = priority;
@@ -1354,7 +1354,7 @@ void wakeup_kswapd(struct zone *zone, int order)
1354 return; 1354 return;
1355 if (pgdat->kswapd_max_order < order) 1355 if (pgdat->kswapd_max_order < order)
1356 pgdat->kswapd_max_order = order; 1356 pgdat->kswapd_max_order = order;
1357 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1357 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1358 return; 1358 return;
1359 if (!waitqueue_active(&pgdat->kswapd_wait)) 1359 if (!waitqueue_active(&pgdat->kswapd_wait))
1360 return; 1360 return;
@@ -1369,8 +1369,8 @@ void wakeup_kswapd(struct zone *zone, int order)
1369 * 1369 *
1370 * For pass > 3 we also try to shrink the LRU lists that contain a few pages 1370 * For pass > 3 we also try to shrink the LRU lists that contain a few pages
1371 */ 1371 */
1372static unsigned long shrink_all_zones(unsigned long nr_pages, int pass, 1372static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1373 int prio, struct scan_control *sc) 1373 int pass, struct scan_control *sc)
1374{ 1374{
1375 struct zone *zone; 1375 struct zone *zone;
1376 unsigned long nr_to_scan, ret = 0; 1376 unsigned long nr_to_scan, ret = 0;