diff options
Diffstat (limited to 'mm/vmscan.c')
| -rw-r--r-- | mm/vmscan.c | 68 | 
1 files changed, 25 insertions, 43 deletions
| diff --git a/mm/vmscan.c b/mm/vmscan.c index eeacb0d695c3..ff2ebe9458a3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -47,8 +47,6 @@ struct scan_control { | |||
| 47 | /* Incremented by the number of inactive pages that were scanned */ | 47 | /* Incremented by the number of inactive pages that were scanned */ | 
| 48 | unsigned long nr_scanned; | 48 | unsigned long nr_scanned; | 
| 49 | 49 | ||
| 50 | unsigned long nr_mapped; /* From page_state */ | ||
| 51 | |||
| 52 | /* This context's GFP mask */ | 50 | /* This context's GFP mask */ | 
| 53 | gfp_t gfp_mask; | 51 | gfp_t gfp_mask; | 
| 54 | 52 | ||
| @@ -217,7 +215,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | |||
| 217 | break; | 215 | break; | 
| 218 | if (shrink_ret < nr_before) | 216 | if (shrink_ret < nr_before) | 
| 219 | ret += nr_before - shrink_ret; | 217 | ret += nr_before - shrink_ret; | 
| 220 | mod_page_state(slabs_scanned, this_scan); | 218 | count_vm_events(SLABS_SCANNED, this_scan); | 
| 221 | total_scan -= this_scan; | 219 | total_scan -= this_scan; | 
| 222 | 220 | ||
| 223 | cond_resched(); | 221 | cond_resched(); | 
| @@ -571,7 +569,7 @@ keep: | |||
| 571 | list_splice(&ret_pages, page_list); | 569 | list_splice(&ret_pages, page_list); | 
| 572 | if (pagevec_count(&freed_pvec)) | 570 | if (pagevec_count(&freed_pvec)) | 
| 573 | __pagevec_release_nonlru(&freed_pvec); | 571 | __pagevec_release_nonlru(&freed_pvec); | 
| 574 | mod_page_state(pgactivate, pgactivate); | 572 | count_vm_events(PGACTIVATE, pgactivate); | 
| 575 | return nr_reclaimed; | 573 | return nr_reclaimed; | 
| 576 | } | 574 | } | 
| 577 | 575 | ||
| @@ -661,11 +659,11 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
| 661 | nr_reclaimed += nr_freed; | 659 | nr_reclaimed += nr_freed; | 
| 662 | local_irq_disable(); | 660 | local_irq_disable(); | 
| 663 | if (current_is_kswapd()) { | 661 | if (current_is_kswapd()) { | 
| 664 | __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); | 662 | __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); | 
| 665 | __mod_page_state(kswapd_steal, nr_freed); | 663 | __count_vm_events(KSWAPD_STEAL, nr_freed); | 
| 666 | } else | 664 | } else | 
| 667 | __mod_page_state_zone(zone, pgscan_direct, nr_scan); | 665 | __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); | 
| 668 | __mod_page_state_zone(zone, pgsteal, nr_freed); | 666 | __count_vm_events(PGACTIVATE, nr_freed); | 
| 669 | 667 | ||
| 670 | if (nr_taken == 0) | 668 | if (nr_taken == 0) | 
| 671 | goto done; | 669 | goto done; | 
| @@ -744,7 +742,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
| 744 | * how much memory | 742 | * how much memory | 
| 745 | * is mapped. | 743 | * is mapped. | 
| 746 | */ | 744 | */ | 
| 747 | mapped_ratio = (sc->nr_mapped * 100) / vm_total_pages; | 745 | mapped_ratio = ((global_page_state(NR_FILE_MAPPED) + | 
| 746 | global_page_state(NR_ANON_PAGES)) * 100) / | ||
| 747 | vm_total_pages; | ||
| 748 | 748 | ||
| 749 | /* | 749 | /* | 
| 750 | * Now decide how much we really want to unmap some pages. The | 750 | * Now decide how much we really want to unmap some pages. The | 
| @@ -841,11 +841,10 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
| 841 | } | 841 | } | 
| 842 | } | 842 | } | 
| 843 | zone->nr_active += pgmoved; | 843 | zone->nr_active += pgmoved; | 
| 844 | spin_unlock(&zone->lru_lock); | ||
| 845 | 844 | ||
| 846 | __mod_page_state_zone(zone, pgrefill, pgscanned); | 845 | __count_zone_vm_events(PGREFILL, zone, pgscanned); | 
| 847 | __mod_page_state(pgdeactivate, pgdeactivate); | 846 | __count_vm_events(PGDEACTIVATE, pgdeactivate); | 
| 848 | local_irq_enable(); | 847 | spin_unlock_irq(&zone->lru_lock); | 
| 849 | 848 | ||
| 850 | pagevec_release(&pvec); | 849 | pagevec_release(&pvec); | 
| 851 | } | 850 | } | 
| @@ -977,7 +976,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) | |||
| 977 | .swappiness = vm_swappiness, | 976 | .swappiness = vm_swappiness, | 
| 978 | }; | 977 | }; | 
| 979 | 978 | ||
| 980 | inc_page_state(allocstall); | 979 | count_vm_event(ALLOCSTALL); | 
| 981 | 980 | ||
| 982 | for (i = 0; zones[i] != NULL; i++) { | 981 | for (i = 0; zones[i] != NULL; i++) { | 
| 983 | struct zone *zone = zones[i]; | 982 | struct zone *zone = zones[i]; | 
| @@ -990,7 +989,6 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) | |||
| 990 | } | 989 | } | 
| 991 | 990 | ||
| 992 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 991 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 
| 993 | sc.nr_mapped = read_page_state(nr_mapped); | ||
| 994 | sc.nr_scanned = 0; | 992 | sc.nr_scanned = 0; | 
| 995 | if (!priority) | 993 | if (!priority) | 
| 996 | disable_swap_token(); | 994 | disable_swap_token(); | 
| @@ -1075,9 +1073,7 @@ loop_again: | |||
| 1075 | total_scanned = 0; | 1073 | total_scanned = 0; | 
| 1076 | nr_reclaimed = 0; | 1074 | nr_reclaimed = 0; | 
| 1077 | sc.may_writepage = !laptop_mode; | 1075 | sc.may_writepage = !laptop_mode; | 
| 1078 | sc.nr_mapped = read_page_state(nr_mapped); | 1076 | count_vm_event(PAGEOUTRUN); | 
| 1079 | |||
| 1080 | inc_page_state(pageoutrun); | ||
| 1081 | 1077 | ||
| 1082 | for (i = 0; i < pgdat->nr_zones; i++) { | 1078 | for (i = 0; i < pgdat->nr_zones; i++) { | 
| 1083 | struct zone *zone = pgdat->node_zones + i; | 1079 | struct zone *zone = pgdat->node_zones + i; | 
| @@ -1365,7 +1361,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
| 1365 | for_each_zone(zone) | 1361 | for_each_zone(zone) | 
| 1366 | lru_pages += zone->nr_active + zone->nr_inactive; | 1362 | lru_pages += zone->nr_active + zone->nr_inactive; | 
| 1367 | 1363 | ||
| 1368 | nr_slab = read_page_state(nr_slab); | 1364 | nr_slab = global_page_state(NR_SLAB); | 
| 1369 | /* If slab caches are huge, it's better to hit them first */ | 1365 | /* If slab caches are huge, it's better to hit them first */ | 
| 1370 | while (nr_slab >= lru_pages) { | 1366 | while (nr_slab >= lru_pages) { | 
| 1371 | reclaim_state.reclaimed_slab = 0; | 1367 | reclaim_state.reclaimed_slab = 0; | 
| @@ -1407,9 +1403,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
| 1407 | for (prio = DEF_PRIORITY; prio >= 0; prio--) { | 1403 | for (prio = DEF_PRIORITY; prio >= 0; prio--) { | 
| 1408 | unsigned long nr_to_scan = nr_pages - ret; | 1404 | unsigned long nr_to_scan = nr_pages - ret; | 
| 1409 | 1405 | ||
| 1410 | sc.nr_mapped = read_page_state(nr_mapped); | ||
| 1411 | sc.nr_scanned = 0; | 1406 | sc.nr_scanned = 0; | 
| 1412 | |||
| 1413 | ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); | 1407 | ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); | 
| 1414 | if (ret >= nr_pages) | 1408 | if (ret >= nr_pages) | 
| 1415 | goto out; | 1409 | goto out; | 
| @@ -1523,11 +1517,6 @@ int zone_reclaim_mode __read_mostly; | |||
| 1523 | #define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */ | 1517 | #define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */ | 
| 1524 | 1518 | ||
| 1525 | /* | 1519 | /* | 
| 1526 | * Mininum time between zone reclaim scans | ||
| 1527 | */ | ||
| 1528 | int zone_reclaim_interval __read_mostly = 30*HZ; | ||
| 1529 | |||
| 1530 | /* | ||
| 1531 | * Priority for ZONE_RECLAIM. This determines the fraction of pages | 1520 | * Priority for ZONE_RECLAIM. This determines the fraction of pages | 
| 1532 | * of a node considered for each zone_reclaim. 4 scans 1/16th of | 1521 | * of a node considered for each zone_reclaim. 4 scans 1/16th of | 
| 1533 | * a zone. | 1522 | * a zone. | 
| @@ -1548,7 +1537,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
| 1548 | struct scan_control sc = { | 1537 | struct scan_control sc = { | 
| 1549 | .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), | 1538 | .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), | 
| 1550 | .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), | 1539 | .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), | 
| 1551 | .nr_mapped = read_page_state(nr_mapped), | ||
| 1552 | .swap_cluster_max = max_t(unsigned long, nr_pages, | 1540 | .swap_cluster_max = max_t(unsigned long, nr_pages, | 
| 1553 | SWAP_CLUSTER_MAX), | 1541 | SWAP_CLUSTER_MAX), | 
| 1554 | .gfp_mask = gfp_mask, | 1542 | .gfp_mask = gfp_mask, | 
| @@ -1593,16 +1581,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
| 1593 | 1581 | ||
| 1594 | p->reclaim_state = NULL; | 1582 | p->reclaim_state = NULL; | 
| 1595 | current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); | 1583 | current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); | 
| 1596 | |||
| 1597 | if (nr_reclaimed == 0) { | ||
| 1598 | /* | ||
| 1599 | * We were unable to reclaim enough pages to stay on node. We | ||
| 1600 | * now allow off node accesses for a certain time period before | ||
| 1601 | * trying again to reclaim pages from the local zone. | ||
| 1602 | */ | ||
| 1603 | zone->last_unsuccessful_zone_reclaim = jiffies; | ||
| 1604 | } | ||
| 1605 | |||
| 1606 | return nr_reclaimed >= nr_pages; | 1584 | return nr_reclaimed >= nr_pages; | 
| 1607 | } | 1585 | } | 
| 1608 | 1586 | ||
| @@ -1612,13 +1590,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
| 1612 | int node_id; | 1590 | int node_id; | 
| 1613 | 1591 | ||
| 1614 | /* | 1592 | /* | 
| 1615 | * Do not reclaim if there was a recent unsuccessful attempt at zone | 1593 | * Do not reclaim if there are not enough reclaimable pages in this | 
| 1616 | * reclaim. In that case we let allocations go off node for the | 1594 | * zone that would satify this allocations. | 
| 1617 | * zone_reclaim_interval. Otherwise we would scan for each off-node | 1595 | * | 
| 1618 | * page allocation. | 1596 | * All unmapped pagecache pages are reclaimable. | 
| 1597 | * | ||
| 1598 | * Both counters may be temporarily off a bit so we use | ||
| 1599 | * SWAP_CLUSTER_MAX as the boundary. It may also be good to | ||
| 1600 | * leave a few frequently used unmapped pagecache pages around. | ||
| 1619 | */ | 1601 | */ | 
| 1620 | if (time_before(jiffies, | 1602 | if (zone_page_state(zone, NR_FILE_PAGES) - | 
| 1621 | zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) | 1603 | zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX) | 
| 1622 | return 0; | 1604 | return 0; | 
| 1623 | 1605 | ||
| 1624 | /* | 1606 | /* | 
