aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2012-03-21 19:34:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 20:54:57 -0400
commitcc715d99e529d470dde2f33a6614f255adea71f3 (patch)
treef8e8aefb8416aed6747e9b5c071e9575484aa912 /mm/vmscan.c
parentce1744f4ed20ca873360e54502f8a71564ef7cc6 (diff)
mm: vmscan: forcibly scan highmem if there are too many buffer_heads pinning highmem
Stuart Foster reported on bugzilla that copying large amounts of data from NTFS caused an OOM kill on 32-bit X86 with 16G of memory. Andrew Morton correctly identified that the problem was NTFS was using 512 blocks meaning each page had 8 buffer_heads in low memory pinning it. In the past, direct reclaim used to scan highmem even if the allocating process did not specify __GFP_HIGHMEM but not any more. kswapd no longer will reclaim from zones that are above the high watermark. The intention in both cases was to minimise unnecessary reclaim. The downside is on machines with large amounts of highmem that lowmem can be fully consumed by buffer_heads with nothing trying to free them. The following patch is based on a suggestion by Andrew Morton to extend the buffer_heads_over_limit case to force kswapd and direct reclaim to scan the highmem zone regardless of the allocation request or watermarks. Addresses https://bugzilla.kernel.org/show_bug.cgi?id=42578 [hughd@google.com: move buffer_heads_over_limit check up] [akpm@linux-foundation.org: buffer_heads_over_limit is unlikely] Reported-by: Stuart Foster <smf.linux@ntlworld.com> Tested-by: Stuart Foster <smf.linux@ntlworld.com> Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Cc: Christoph Lameter <cl@linux.com> Cc: stable <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c42
1 files changed, 29 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 87e4d6a6dc11..ae3bf0a09cdd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1642,18 +1642,6 @@ static void move_active_pages_to_lru(struct zone *zone,
1642 unsigned long pgmoved = 0; 1642 unsigned long pgmoved = 0;
1643 struct page *page; 1643 struct page *page;
1644 1644
1645 if (buffer_heads_over_limit) {
1646 spin_unlock_irq(&zone->lru_lock);
1647 list_for_each_entry(page, list, lru) {
1648 if (page_has_private(page) && trylock_page(page)) {
1649 if (page_has_private(page))
1650 try_to_release_page(page, 0);
1651 unlock_page(page);
1652 }
1653 }
1654 spin_lock_irq(&zone->lru_lock);
1655 }
1656
1657 while (!list_empty(list)) { 1645 while (!list_empty(list)) {
1658 struct lruvec *lruvec; 1646 struct lruvec *lruvec;
1659 1647
@@ -1735,6 +1723,14 @@ static void shrink_active_list(unsigned long nr_to_scan,
1735 continue; 1723 continue;
1736 } 1724 }
1737 1725
1726 if (unlikely(buffer_heads_over_limit)) {
1727 if (page_has_private(page) && trylock_page(page)) {
1728 if (page_has_private(page))
1729 try_to_release_page(page, 0);
1730 unlock_page(page);
1731 }
1732 }
1733
1738 if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) { 1734 if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
1739 nr_rotated += hpage_nr_pages(page); 1735 nr_rotated += hpage_nr_pages(page);
1740 /* 1736 /*
@@ -2238,6 +2234,14 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
2238 unsigned long nr_soft_scanned; 2234 unsigned long nr_soft_scanned;
2239 bool aborted_reclaim = false; 2235 bool aborted_reclaim = false;
2240 2236
2237 /*
2238 * If the number of buffer_heads in the machine exceeds the maximum
2239 * allowed level, force direct reclaim to scan the highmem zone as
2240 * highmem pages could be pinning lowmem pages storing buffer_heads
2241 */
2242 if (buffer_heads_over_limit)
2243 sc->gfp_mask |= __GFP_HIGHMEM;
2244
2241 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2245 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2242 gfp_zone(sc->gfp_mask), sc->nodemask) { 2246 gfp_zone(sc->gfp_mask), sc->nodemask) {
2243 if (!populated_zone(zone)) 2247 if (!populated_zone(zone))
@@ -2727,6 +2731,17 @@ loop_again:
2727 */ 2731 */
2728 age_active_anon(zone, &sc, priority); 2732 age_active_anon(zone, &sc, priority);
2729 2733
2734 /*
2735 * If the number of buffer_heads in the machine
2736 * exceeds the maximum allowed level and this node
2737 * has a highmem zone, force kswapd to reclaim from
2738 * it to relieve lowmem pressure.
2739 */
2740 if (buffer_heads_over_limit && is_highmem_idx(i)) {
2741 end_zone = i;
2742 break;
2743 }
2744
2730 if (!zone_watermark_ok_safe(zone, order, 2745 if (!zone_watermark_ok_safe(zone, order,
2731 high_wmark_pages(zone), 0, 0)) { 2746 high_wmark_pages(zone), 0, 0)) {
2732 end_zone = i; 2747 end_zone = i;
@@ -2802,7 +2817,8 @@ loop_again:
2802 COMPACT_SKIPPED) 2817 COMPACT_SKIPPED)
2803 testorder = 0; 2818 testorder = 0;
2804 2819
2805 if (!zone_watermark_ok_safe(zone, testorder, 2820 if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
2821 !zone_watermark_ok_safe(zone, order,
2806 high_wmark_pages(zone) + balance_gap, 2822 high_wmark_pages(zone) + balance_gap,
2807 end_zone, 0)) { 2823 end_zone, 0)) {
2808 shrink_zone(priority, zone, &sc); 2824 shrink_zone(priority, zone, &sc);