aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-10-02 19:21:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-02 19:28:44 -0400
commitabe5f972912d086c080be4bde67750630b6fb38b (patch)
treea601d6b5a7cb3733492d7c1591df135b95c1f4bb /mm/page_alloc.c
parent6c72e3501d0d62fc064d3680e5234f3463ec5a86 (diff)
mm: page_alloc: fix zone allocation fairness on UP
The zone allocation batches can easily underflow due to higher-order allocations or spills to remote nodes. On SMP that's fine, because underflows are expected from concurrency and dealt with by returning 0. But on UP, zone_page_state will just return a wrapped unsigned long, which will get past the <= 0 check and then consider the zone eligible until its watermarks are hit. Commit 3a025760fc15 ("mm: page_alloc: spill to remote nodes before waking kswapd") already made the counter-resetting use atomic_long_read() to accomodate underflows from remote spills, but it didn't go all the way with it. Make it clear that these batches are expected to go negative regardless of concurrency, and use atomic_long_read() everywhere. Fixes: 81c0a2bb515f ("mm: page_alloc: fair zone allocator policy") Reported-by: Vlastimil Babka <vbabka@suse.cz> Reported-by: Leon Romanovsky <leon@leon.nu> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: <stable@vger.kernel.org> [3.12+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 18cee0d4c8a2..eee961958021 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1612,7 +1612,7 @@ again:
1612 } 1612 }
1613 1613
1614 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 1614 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
1615 if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 && 1615 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
1616 !zone_is_fair_depleted(zone)) 1616 !zone_is_fair_depleted(zone))
1617 zone_set_flag(zone, ZONE_FAIR_DEPLETED); 1617 zone_set_flag(zone, ZONE_FAIR_DEPLETED);
1618 1618
@@ -5701,9 +5701,8 @@ static void __setup_per_zone_wmarks(void)
5701 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 5701 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5702 5702
5703 __mod_zone_page_state(zone, NR_ALLOC_BATCH, 5703 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
5704 high_wmark_pages(zone) - 5704 high_wmark_pages(zone) - low_wmark_pages(zone) -
5705 low_wmark_pages(zone) - 5705 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
5706 zone_page_state(zone, NR_ALLOC_BATCH));
5707 5706
5708 setup_zone_migrate_reserve(zone); 5707 setup_zone_migrate_reserve(zone);
5709 spin_unlock_irqrestore(&zone->lock, flags); 5708 spin_unlock_irqrestore(&zone->lock, flags);