aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c125
1 files changed, 20 insertions, 105 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b0cd81c32de6..be8235fb1939 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -63,9 +63,6 @@ struct scan_control {
63 63
64 unsigned long nr_mapped; /* From page_state */ 64 unsigned long nr_mapped; /* From page_state */
65 65
66 /* How many pages shrink_cache() should reclaim */
67 int nr_to_reclaim;
68
69 /* Ask shrink_caches, or shrink_zone to scan at this priority */ 66 /* Ask shrink_caches, or shrink_zone to scan at this priority */
70 unsigned int priority; 67 unsigned int priority;
71 68
@@ -74,9 +71,6 @@ struct scan_control {
74 71
75 int may_writepage; 72 int may_writepage;
76 73
77 /* Can pages be swapped as part of reclaim? */
78 int may_swap;
79
80 /* This context's SWAP_CLUSTER_MAX. If freeing memory for 74 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
81 * suspend, we effectively ignore SWAP_CLUSTER_MAX. 75 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
82 * In this context, it doesn't matter that we scan the 76 * In this context, it doesn't matter that we scan the
@@ -367,7 +361,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
367 res = mapping->a_ops->writepage(page, &wbc); 361 res = mapping->a_ops->writepage(page, &wbc);
368 if (res < 0) 362 if (res < 0)
369 handle_write_error(mapping, page, res); 363 handle_write_error(mapping, page, res);
370 if (res == WRITEPAGE_ACTIVATE) { 364 if (res == AOP_WRITEPAGE_ACTIVATE) {
371 ClearPageReclaim(page); 365 ClearPageReclaim(page);
372 return PAGE_ACTIVATE; 366 return PAGE_ACTIVATE;
373 } 367 }
@@ -430,8 +424,6 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
430 * Try to allocate it some swap space here. 424 * Try to allocate it some swap space here.
431 */ 425 */
432 if (PageAnon(page) && !PageSwapCache(page)) { 426 if (PageAnon(page) && !PageSwapCache(page)) {
433 if (!sc->may_swap)
434 goto keep_locked;
435 if (!add_to_swap(page)) 427 if (!add_to_swap(page))
436 goto activate_locked; 428 goto activate_locked;
437 } 429 }
@@ -653,17 +645,17 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
653 goto done; 645 goto done;
654 646
655 max_scan -= nr_scan; 647 max_scan -= nr_scan;
656 if (current_is_kswapd())
657 mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
658 else
659 mod_page_state_zone(zone, pgscan_direct, nr_scan);
660 nr_freed = shrink_list(&page_list, sc); 648 nr_freed = shrink_list(&page_list, sc);
661 if (current_is_kswapd())
662 mod_page_state(kswapd_steal, nr_freed);
663 mod_page_state_zone(zone, pgsteal, nr_freed);
664 sc->nr_to_reclaim -= nr_freed;
665 649
666 spin_lock_irq(&zone->lru_lock); 650 local_irq_disable();
651 if (current_is_kswapd()) {
652 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
653 __mod_page_state(kswapd_steal, nr_freed);
654 } else
655 __mod_page_state_zone(zone, pgscan_direct, nr_scan);
656 __mod_page_state_zone(zone, pgsteal, nr_freed);
657
658 spin_lock(&zone->lru_lock);
667 /* 659 /*
668 * Put back any unfreeable pages. 660 * Put back any unfreeable pages.
669 */ 661 */
@@ -825,11 +817,13 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
825 } 817 }
826 } 818 }
827 zone->nr_active += pgmoved; 819 zone->nr_active += pgmoved;
828 spin_unlock_irq(&zone->lru_lock); 820 spin_unlock(&zone->lru_lock);
829 pagevec_release(&pvec); 821
822 __mod_page_state_zone(zone, pgrefill, pgscanned);
823 __mod_page_state(pgdeactivate, pgdeactivate);
824 local_irq_enable();
830 825
831 mod_page_state_zone(zone, pgrefill, pgscanned); 826 pagevec_release(&pvec);
832 mod_page_state(pgdeactivate, pgdeactivate);
833} 827}
834 828
835/* 829/*
@@ -861,8 +855,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
861 else 855 else
862 nr_inactive = 0; 856 nr_inactive = 0;
863 857
864 sc->nr_to_reclaim = sc->swap_cluster_max;
865
866 while (nr_active || nr_inactive) { 858 while (nr_active || nr_inactive) {
867 if (nr_active) { 859 if (nr_active) {
868 sc->nr_to_scan = min(nr_active, 860 sc->nr_to_scan = min(nr_active,
@@ -876,8 +868,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
876 (unsigned long)sc->swap_cluster_max); 868 (unsigned long)sc->swap_cluster_max);
877 nr_inactive -= sc->nr_to_scan; 869 nr_inactive -= sc->nr_to_scan;
878 shrink_cache(zone, sc); 870 shrink_cache(zone, sc);
879 if (sc->nr_to_reclaim <= 0)
880 break;
881 } 871 }
882 } 872 }
883 873
@@ -910,7 +900,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
910 for (i = 0; zones[i] != NULL; i++) { 900 for (i = 0; zones[i] != NULL; i++) {
911 struct zone *zone = zones[i]; 901 struct zone *zone = zones[i];
912 902
913 if (zone->present_pages == 0) 903 if (!populated_zone(zone))
914 continue; 904 continue;
915 905
916 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 906 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
@@ -952,7 +942,6 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
952 942
953 sc.gfp_mask = gfp_mask; 943 sc.gfp_mask = gfp_mask;
954 sc.may_writepage = 0; 944 sc.may_writepage = 0;
955 sc.may_swap = 1;
956 945
957 inc_page_state(allocstall); 946 inc_page_state(allocstall);
958 947
@@ -1055,7 +1044,6 @@ loop_again:
1055 total_reclaimed = 0; 1044 total_reclaimed = 0;
1056 sc.gfp_mask = GFP_KERNEL; 1045 sc.gfp_mask = GFP_KERNEL;
1057 sc.may_writepage = 0; 1046 sc.may_writepage = 0;
1058 sc.may_swap = 1;
1059 sc.nr_mapped = read_page_state(nr_mapped); 1047 sc.nr_mapped = read_page_state(nr_mapped);
1060 1048
1061 inc_page_state(pageoutrun); 1049 inc_page_state(pageoutrun);
@@ -1084,7 +1072,7 @@ loop_again:
1084 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 1072 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1085 struct zone *zone = pgdat->node_zones + i; 1073 struct zone *zone = pgdat->node_zones + i;
1086 1074
1087 if (zone->present_pages == 0) 1075 if (!populated_zone(zone))
1088 continue; 1076 continue;
1089 1077
1090 if (zone->all_unreclaimable && 1078 if (zone->all_unreclaimable &&
@@ -1121,7 +1109,7 @@ scan:
1121 struct zone *zone = pgdat->node_zones + i; 1109 struct zone *zone = pgdat->node_zones + i;
1122 int nr_slab; 1110 int nr_slab;
1123 1111
1124 if (zone->present_pages == 0) 1112 if (!populated_zone(zone))
1125 continue; 1113 continue;
1126 1114
1127 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 1115 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
@@ -1273,7 +1261,7 @@ void wakeup_kswapd(struct zone *zone, int order)
1273{ 1261{
1274 pg_data_t *pgdat; 1262 pg_data_t *pgdat;
1275 1263
1276 if (zone->present_pages == 0) 1264 if (!populated_zone(zone))
1277 return; 1265 return;
1278 1266
1279 pgdat = zone->zone_pgdat; 1267 pgdat = zone->zone_pgdat;
@@ -1353,76 +1341,3 @@ static int __init kswapd_init(void)
1353} 1341}
1354 1342
1355module_init(kswapd_init) 1343module_init(kswapd_init)
1356
1357
1358/*
1359 * Try to free up some pages from this zone through reclaim.
1360 */
1361int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1362{
1363 struct scan_control sc;
1364 int nr_pages = 1 << order;
1365 int total_reclaimed = 0;
1366
1367 /* The reclaim may sleep, so don't do it if sleep isn't allowed */
1368 if (!(gfp_mask & __GFP_WAIT))
1369 return 0;
1370 if (zone->all_unreclaimable)
1371 return 0;
1372
1373 sc.gfp_mask = gfp_mask;
1374 sc.may_writepage = 0;
1375 sc.may_swap = 0;
1376 sc.nr_mapped = read_page_state(nr_mapped);
1377 sc.nr_scanned = 0;
1378 sc.nr_reclaimed = 0;
1379 /* scan at the highest priority */
1380 sc.priority = 0;
1381 disable_swap_token();
1382
1383 if (nr_pages > SWAP_CLUSTER_MAX)
1384 sc.swap_cluster_max = nr_pages;
1385 else
1386 sc.swap_cluster_max = SWAP_CLUSTER_MAX;
1387
1388 /* Don't reclaim the zone if there are other reclaimers active */
1389 if (atomic_read(&zone->reclaim_in_progress) > 0)
1390 goto out;
1391
1392 shrink_zone(zone, &sc);
1393 total_reclaimed = sc.nr_reclaimed;
1394
1395 out:
1396 return total_reclaimed;
1397}
1398
1399asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone,
1400 unsigned int state)
1401{
1402 struct zone *z;
1403 int i;
1404
1405 if (!capable(CAP_SYS_ADMIN))
1406 return -EACCES;
1407
1408 if (node >= MAX_NUMNODES || !node_online(node))
1409 return -EINVAL;
1410
1411 /* This will break if we ever add more zones */
1412 if (!(zone & (1<<ZONE_DMA|1<<ZONE_NORMAL|1<<ZONE_HIGHMEM)))
1413 return -EINVAL;
1414
1415 for (i = 0; i < MAX_NR_ZONES; i++) {
1416 if (!(zone & 1<<i))
1417 continue;
1418
1419 z = &NODE_DATA(node)->node_zones[i];
1420
1421 if (state)
1422 z->reclaim_pages = 1;
1423 else
1424 z->reclaim_pages = 0;
1425 }
1426
1427 return 0;
1428}