aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c42
1 files changed, 29 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 87e4d6a6dc11..ae3bf0a09cdd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1642,18 +1642,6 @@ static void move_active_pages_to_lru(struct zone *zone,
1642 unsigned long pgmoved = 0; 1642 unsigned long pgmoved = 0;
1643 struct page *page; 1643 struct page *page;
1644 1644
1645 if (buffer_heads_over_limit) {
1646 spin_unlock_irq(&zone->lru_lock);
1647 list_for_each_entry(page, list, lru) {
1648 if (page_has_private(page) && trylock_page(page)) {
1649 if (page_has_private(page))
1650 try_to_release_page(page, 0);
1651 unlock_page(page);
1652 }
1653 }
1654 spin_lock_irq(&zone->lru_lock);
1655 }
1656
1657 while (!list_empty(list)) { 1645 while (!list_empty(list)) {
1658 struct lruvec *lruvec; 1646 struct lruvec *lruvec;
1659 1647
@@ -1735,6 +1723,14 @@ static void shrink_active_list(unsigned long nr_to_scan,
1735 continue; 1723 continue;
1736 } 1724 }
1737 1725
1726 if (unlikely(buffer_heads_over_limit)) {
1727 if (page_has_private(page) && trylock_page(page)) {
1728 if (page_has_private(page))
1729 try_to_release_page(page, 0);
1730 unlock_page(page);
1731 }
1732 }
1733
1738 if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) { 1734 if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
1739 nr_rotated += hpage_nr_pages(page); 1735 nr_rotated += hpage_nr_pages(page);
1740 /* 1736 /*
@@ -2238,6 +2234,14 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
2238 unsigned long nr_soft_scanned; 2234 unsigned long nr_soft_scanned;
2239 bool aborted_reclaim = false; 2235 bool aborted_reclaim = false;
2240 2236
2237 /*
2238 * If the number of buffer_heads in the machine exceeds the maximum
2239 * allowed level, force direct reclaim to scan the highmem zone as
2240 * highmem pages could be pinning lowmem pages storing buffer_heads
2241 */
2242 if (buffer_heads_over_limit)
2243 sc->gfp_mask |= __GFP_HIGHMEM;
2244
2241 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2245 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2242 gfp_zone(sc->gfp_mask), sc->nodemask) { 2246 gfp_zone(sc->gfp_mask), sc->nodemask) {
2243 if (!populated_zone(zone)) 2247 if (!populated_zone(zone))
@@ -2727,6 +2731,17 @@ loop_again:
2727 */ 2731 */
2728 age_active_anon(zone, &sc, priority); 2732 age_active_anon(zone, &sc, priority);
2729 2733
2734 /*
2735 * If the number of buffer_heads in the machine
2736 * exceeds the maximum allowed level and this node
2737 * has a highmem zone, force kswapd to reclaim from
2738 * it to relieve lowmem pressure.
2739 */
2740 if (buffer_heads_over_limit && is_highmem_idx(i)) {
2741 end_zone = i;
2742 break;
2743 }
2744
2730 if (!zone_watermark_ok_safe(zone, order, 2745 if (!zone_watermark_ok_safe(zone, order,
2731 high_wmark_pages(zone), 0, 0)) { 2746 high_wmark_pages(zone), 0, 0)) {
2732 end_zone = i; 2747 end_zone = i;
@@ -2802,7 +2817,8 @@ loop_again:
2802 COMPACT_SKIPPED) 2817 COMPACT_SKIPPED)
2803 testorder = 0; 2818 testorder = 0;
2804 2819
2805 if (!zone_watermark_ok_safe(zone, testorder, 2820 if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
2821 !zone_watermark_ok_safe(zone, order,
2806 high_wmark_pages(zone) + balance_gap, 2822 high_wmark_pages(zone) + balance_gap,
2807 end_zone, 0)) { 2823 end_zone, 0)) {
2808 shrink_zone(priority, zone, &sc); 2824 shrink_zone(priority, zone, &sc);