aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorTomasz Stanislawski <t.stanislaws@samsung.com>2013-06-12 17:05:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-06-12 19:29:46 -0400
commit026b08147923142e925a7d0aaa39038055ae0156 (patch)
tree9b7529dcf8a06105dec4844c09065fb88a169985 /mm/page_alloc.c
parent282c4c0ecce9b9ac1b69acae32a4239441601405 (diff)
mm/page_alloc.c: fix watermark check in __zone_watermark_ok()
The watermark check consists of two sub-checks. The first one is: if (free_pages <= min + lowmem_reserve) return false; The check assures that there is minimal amount of RAM in the zone. If CMA is used then the free_pages is reduced by the number of free pages in CMA prior to the over-mentioned check. if (!(alloc_flags & ALLOC_CMA)) free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); This prevents the zone from being drained from pages available for non-movable allocations. The second check prevents the zone from getting too fragmented. for (o = 0; o < order; o++) { free_pages -= z->free_area[o].nr_free << o; min >>= 1; if (free_pages <= min) return false; } The field z->free_area[o].nr_free is equal to the number of free pages including free CMA pages. Therefore the CMA pages are subtracted twice. This may cause a false positive fail of __zone_watermark_ok() if the CMA area gets strongly fragmented. In such a case there are many 0-order free pages located in CMA. Those pages are subtracted twice therefore they will quickly drain free_pages during the check against fragmentation. The test fails even though there are many free non-cma pages in the zone. This patch fixes this issue by subtracting CMA pages only for a purpose of (free_pages <= min + lowmem_reserve) check. Laura said: We were observing allocation failures of higher order pages (order 5 = 128K typically) under tight memory conditions resulting in driver failure. The output from the page allocation failure showed plenty of free pages of the appropriate order/type/zone and mostly CMA pages in the lower orders. For full disclosure, we still observed some page allocation failures even after applying the patch but the number was drastically reduced and those failures were attributed to fragmentation/other system issues. Signed-off-by: Tomasz Stanislawski <t.stanislaws@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Tested-by: Laura Abbott <lauraa@codeaurora.org> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Acked-by: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mel@csn.ul.ie> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: <stable@vger.kernel.org> [3.7+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 378a15bcd649..c3edb624fccf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1628,6 +1628,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1628 long min = mark; 1628 long min = mark;
1629 long lowmem_reserve = z->lowmem_reserve[classzone_idx]; 1629 long lowmem_reserve = z->lowmem_reserve[classzone_idx];
1630 int o; 1630 int o;
1631 long free_cma = 0;
1631 1632
1632 free_pages -= (1 << order) - 1; 1633 free_pages -= (1 << order) - 1;
1633 if (alloc_flags & ALLOC_HIGH) 1634 if (alloc_flags & ALLOC_HIGH)
@@ -1637,9 +1638,10 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1637#ifdef CONFIG_CMA 1638#ifdef CONFIG_CMA
1638 /* If allocation can't use CMA areas don't use free CMA pages */ 1639 /* If allocation can't use CMA areas don't use free CMA pages */
1639 if (!(alloc_flags & ALLOC_CMA)) 1640 if (!(alloc_flags & ALLOC_CMA))
1640 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); 1641 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
1641#endif 1642#endif
1642 if (free_pages <= min + lowmem_reserve) 1643
1644 if (free_pages - free_cma <= min + lowmem_reserve)
1643 return false; 1645 return false;
1644 for (o = 0; o < order; o++) { 1646 for (o = 0; o < order; o++) {
1645 /* At the next order, this order's pages become unavailable */ 1647 /* At the next order, this order's pages become unavailable */