diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 51 |
1 files changed, 28 insertions, 23 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7430df68cb64..0655d5fe73e8 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -679,7 +679,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
679 | nr_taken = isolate_lru_pages(sc->swap_cluster_max, | 679 | nr_taken = isolate_lru_pages(sc->swap_cluster_max, |
680 | &zone->inactive_list, | 680 | &zone->inactive_list, |
681 | &page_list, &nr_scan); | 681 | &page_list, &nr_scan); |
682 | zone->nr_inactive -= nr_taken; | 682 | __mod_zone_page_state(zone, NR_INACTIVE, -nr_taken); |
683 | zone->pages_scanned += nr_scan; | 683 | zone->pages_scanned += nr_scan; |
684 | spin_unlock_irq(&zone->lru_lock); | 684 | spin_unlock_irq(&zone->lru_lock); |
685 | 685 | ||
@@ -740,7 +740,8 @@ static inline void note_zone_scanning_priority(struct zone *zone, int priority) | |||
740 | 740 | ||
741 | static inline int zone_is_near_oom(struct zone *zone) | 741 | static inline int zone_is_near_oom(struct zone *zone) |
742 | { | 742 | { |
743 | return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3; | 743 | return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE) |
744 | + zone_page_state(zone, NR_INACTIVE))*3; | ||
744 | } | 745 | } |
745 | 746 | ||
746 | /* | 747 | /* |
@@ -825,7 +826,7 @@ force_reclaim_mapped: | |||
825 | pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, | 826 | pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, |
826 | &l_hold, &pgscanned); | 827 | &l_hold, &pgscanned); |
827 | zone->pages_scanned += pgscanned; | 828 | zone->pages_scanned += pgscanned; |
828 | zone->nr_active -= pgmoved; | 829 | __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved); |
829 | spin_unlock_irq(&zone->lru_lock); | 830 | spin_unlock_irq(&zone->lru_lock); |
830 | 831 | ||
831 | while (!list_empty(&l_hold)) { | 832 | while (!list_empty(&l_hold)) { |
@@ -857,7 +858,7 @@ force_reclaim_mapped: | |||
857 | list_move(&page->lru, &zone->inactive_list); | 858 | list_move(&page->lru, &zone->inactive_list); |
858 | pgmoved++; | 859 | pgmoved++; |
859 | if (!pagevec_add(&pvec, page)) { | 860 | if (!pagevec_add(&pvec, page)) { |
860 | zone->nr_inactive += pgmoved; | 861 | __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); |
861 | spin_unlock_irq(&zone->lru_lock); | 862 | spin_unlock_irq(&zone->lru_lock); |
862 | pgdeactivate += pgmoved; | 863 | pgdeactivate += pgmoved; |
863 | pgmoved = 0; | 864 | pgmoved = 0; |
@@ -867,7 +868,7 @@ force_reclaim_mapped: | |||
867 | spin_lock_irq(&zone->lru_lock); | 868 | spin_lock_irq(&zone->lru_lock); |
868 | } | 869 | } |
869 | } | 870 | } |
870 | zone->nr_inactive += pgmoved; | 871 | __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); |
871 | pgdeactivate += pgmoved; | 872 | pgdeactivate += pgmoved; |
872 | if (buffer_heads_over_limit) { | 873 | if (buffer_heads_over_limit) { |
873 | spin_unlock_irq(&zone->lru_lock); | 874 | spin_unlock_irq(&zone->lru_lock); |
@@ -885,14 +886,14 @@ force_reclaim_mapped: | |||
885 | list_move(&page->lru, &zone->active_list); | 886 | list_move(&page->lru, &zone->active_list); |
886 | pgmoved++; | 887 | pgmoved++; |
887 | if (!pagevec_add(&pvec, page)) { | 888 | if (!pagevec_add(&pvec, page)) { |
888 | zone->nr_active += pgmoved; | 889 | __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); |
889 | pgmoved = 0; | 890 | pgmoved = 0; |
890 | spin_unlock_irq(&zone->lru_lock); | 891 | spin_unlock_irq(&zone->lru_lock); |
891 | __pagevec_release(&pvec); | 892 | __pagevec_release(&pvec); |
892 | spin_lock_irq(&zone->lru_lock); | 893 | spin_lock_irq(&zone->lru_lock); |
893 | } | 894 | } |
894 | } | 895 | } |
895 | zone->nr_active += pgmoved; | 896 | __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); |
896 | 897 | ||
897 | __count_zone_vm_events(PGREFILL, zone, pgscanned); | 898 | __count_zone_vm_events(PGREFILL, zone, pgscanned); |
898 | __count_vm_events(PGDEACTIVATE, pgdeactivate); | 899 | __count_vm_events(PGDEACTIVATE, pgdeactivate); |
@@ -918,14 +919,16 @@ static unsigned long shrink_zone(int priority, struct zone *zone, | |||
918 | * Add one to `nr_to_scan' just to make sure that the kernel will | 919 | * Add one to `nr_to_scan' just to make sure that the kernel will |
919 | * slowly sift through the active list. | 920 | * slowly sift through the active list. |
920 | */ | 921 | */ |
921 | zone->nr_scan_active += (zone->nr_active >> priority) + 1; | 922 | zone->nr_scan_active += |
923 | (zone_page_state(zone, NR_ACTIVE) >> priority) + 1; | ||
922 | nr_active = zone->nr_scan_active; | 924 | nr_active = zone->nr_scan_active; |
923 | if (nr_active >= sc->swap_cluster_max) | 925 | if (nr_active >= sc->swap_cluster_max) |
924 | zone->nr_scan_active = 0; | 926 | zone->nr_scan_active = 0; |
925 | else | 927 | else |
926 | nr_active = 0; | 928 | nr_active = 0; |
927 | 929 | ||
928 | zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1; | 930 | zone->nr_scan_inactive += |
931 | (zone_page_state(zone, NR_INACTIVE) >> priority) + 1; | ||
929 | nr_inactive = zone->nr_scan_inactive; | 932 | nr_inactive = zone->nr_scan_inactive; |
930 | if (nr_inactive >= sc->swap_cluster_max) | 933 | if (nr_inactive >= sc->swap_cluster_max) |
931 | zone->nr_scan_inactive = 0; | 934 | zone->nr_scan_inactive = 0; |
@@ -1037,7 +1040,8 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) | |||
1037 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 1040 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) |
1038 | continue; | 1041 | continue; |
1039 | 1042 | ||
1040 | lru_pages += zone->nr_active + zone->nr_inactive; | 1043 | lru_pages += zone_page_state(zone, NR_ACTIVE) |
1044 | + zone_page_state(zone, NR_INACTIVE); | ||
1041 | } | 1045 | } |
1042 | 1046 | ||
1043 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 1047 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { |
@@ -1182,7 +1186,8 @@ loop_again: | |||
1182 | for (i = 0; i <= end_zone; i++) { | 1186 | for (i = 0; i <= end_zone; i++) { |
1183 | struct zone *zone = pgdat->node_zones + i; | 1187 | struct zone *zone = pgdat->node_zones + i; |
1184 | 1188 | ||
1185 | lru_pages += zone->nr_active + zone->nr_inactive; | 1189 | lru_pages += zone_page_state(zone, NR_ACTIVE) |
1190 | + zone_page_state(zone, NR_INACTIVE); | ||
1186 | } | 1191 | } |
1187 | 1192 | ||
1188 | /* | 1193 | /* |
@@ -1219,8 +1224,9 @@ loop_again: | |||
1219 | if (zone->all_unreclaimable) | 1224 | if (zone->all_unreclaimable) |
1220 | continue; | 1225 | continue; |
1221 | if (nr_slab == 0 && zone->pages_scanned >= | 1226 | if (nr_slab == 0 && zone->pages_scanned >= |
1222 | (zone->nr_active + zone->nr_inactive) * 6) | 1227 | (zone_page_state(zone, NR_ACTIVE) |
1223 | zone->all_unreclaimable = 1; | 1228 | + zone_page_state(zone, NR_INACTIVE)) * 6) |
1229 | zone->all_unreclaimable = 1; | ||
1224 | /* | 1230 | /* |
1225 | * If we've done a decent amount of scanning and | 1231 | * If we've done a decent amount of scanning and |
1226 | * the reclaim ratio is low, start doing writepage | 1232 | * the reclaim ratio is low, start doing writepage |
@@ -1385,18 +1391,22 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
1385 | 1391 | ||
1386 | /* For pass = 0 we don't shrink the active list */ | 1392 | /* For pass = 0 we don't shrink the active list */ |
1387 | if (pass > 0) { | 1393 | if (pass > 0) { |
1388 | zone->nr_scan_active += (zone->nr_active >> prio) + 1; | 1394 | zone->nr_scan_active += |
1395 | (zone_page_state(zone, NR_ACTIVE) >> prio) + 1; | ||
1389 | if (zone->nr_scan_active >= nr_pages || pass > 3) { | 1396 | if (zone->nr_scan_active >= nr_pages || pass > 3) { |
1390 | zone->nr_scan_active = 0; | 1397 | zone->nr_scan_active = 0; |
1391 | nr_to_scan = min(nr_pages, zone->nr_active); | 1398 | nr_to_scan = min(nr_pages, |
1399 | zone_page_state(zone, NR_ACTIVE)); | ||
1392 | shrink_active_list(nr_to_scan, zone, sc, prio); | 1400 | shrink_active_list(nr_to_scan, zone, sc, prio); |
1393 | } | 1401 | } |
1394 | } | 1402 | } |
1395 | 1403 | ||
1396 | zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1; | 1404 | zone->nr_scan_inactive += |
1405 | (zone_page_state(zone, NR_INACTIVE) >> prio) + 1; | ||
1397 | if (zone->nr_scan_inactive >= nr_pages || pass > 3) { | 1406 | if (zone->nr_scan_inactive >= nr_pages || pass > 3) { |
1398 | zone->nr_scan_inactive = 0; | 1407 | zone->nr_scan_inactive = 0; |
1399 | nr_to_scan = min(nr_pages, zone->nr_inactive); | 1408 | nr_to_scan = min(nr_pages, |
1409 | zone_page_state(zone, NR_INACTIVE)); | ||
1400 | ret += shrink_inactive_list(nr_to_scan, zone, sc); | 1410 | ret += shrink_inactive_list(nr_to_scan, zone, sc); |
1401 | if (ret >= nr_pages) | 1411 | if (ret >= nr_pages) |
1402 | return ret; | 1412 | return ret; |
@@ -1408,12 +1418,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
1408 | 1418 | ||
1409 | static unsigned long count_lru_pages(void) | 1419 | static unsigned long count_lru_pages(void) |
1410 | { | 1420 | { |
1411 | struct zone *zone; | 1421 | return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE); |
1412 | unsigned long ret = 0; | ||
1413 | |||
1414 | for_each_zone(zone) | ||
1415 | ret += zone->nr_active + zone->nr_inactive; | ||
1416 | return ret; | ||
1417 | } | 1422 | } |
1418 | 1423 | ||
1419 | /* | 1424 | /* |