aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>2012-10-08 19:32:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:45 -0400
commitd95ea5d18e699515468368415c93ed49b1a3221b (patch)
tree5e4828e1ad279462c64c08dd305905e610418d90 /mm
parentd1ce749a0db12202b711d1aba1d29e823034648d (diff)
cma: fix watermark checking
* Add ALLOC_CMA alloc flag and pass it to [__]zone_watermark_ok() (from Minchan Kim). * During watermark check decrease available free pages number by free CMA pages number if necessary (unmovable allocations cannot use pages from CMA areas). Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c8
-rw-r--r--mm/internal.h14
-rw-r--r--mm/page_alloc.c31
3 files changed, 36 insertions, 17 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 0fbc6b73a522..1f61bcbd6262 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -934,6 +934,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
934 struct zoneref *z; 934 struct zoneref *z;
935 struct zone *zone; 935 struct zone *zone;
936 int rc = COMPACT_SKIPPED; 936 int rc = COMPACT_SKIPPED;
937 int alloc_flags = 0;
937 938
938 /* Check if the GFP flags allow compaction */ 939 /* Check if the GFP flags allow compaction */
939 if (!order || !may_enter_fs || !may_perform_io) 940 if (!order || !may_enter_fs || !may_perform_io)
@@ -941,6 +942,10 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
941 942
942 count_vm_event(COMPACTSTALL); 943 count_vm_event(COMPACTSTALL);
943 944
945#ifdef CONFIG_CMA
946 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
947 alloc_flags |= ALLOC_CMA;
948#endif
944 /* Compact each zone in the list */ 949 /* Compact each zone in the list */
945 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 950 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
946 nodemask) { 951 nodemask) {
@@ -951,7 +956,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
951 rc = max(status, rc); 956 rc = max(status, rc);
952 957
953 /* If a normal allocation would succeed, stop compacting */ 958 /* If a normal allocation would succeed, stop compacting */
954 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 959 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
960 alloc_flags))
955 break; 961 break;
956 } 962 }
957 963
diff --git a/mm/internal.h b/mm/internal.h
index 8312d4fadf59..96cda4c6ac56 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -358,4 +358,18 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
358extern void set_pageblock_order(void); 358extern void set_pageblock_order(void);
359unsigned long reclaim_clean_pages_from_list(struct zone *zone, 359unsigned long reclaim_clean_pages_from_list(struct zone *zone,
360 struct list_head *page_list); 360 struct list_head *page_list);
361/* The ALLOC_WMARK bits are used as an index to zone->watermark */
362#define ALLOC_WMARK_MIN WMARK_MIN
363#define ALLOC_WMARK_LOW WMARK_LOW
364#define ALLOC_WMARK_HIGH WMARK_HIGH
365#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
366
367/* Mask to get the watermark bits */
368#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
369
370#define ALLOC_HARDER 0x10 /* try to alloc harder */
371#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
372#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
373#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
374
361#endif /* __MM_INTERNAL_H */ 375#endif /* __MM_INTERNAL_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6969a8abdba2..f2c7cc6a3039 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1541,19 +1541,6 @@ failed:
1541 return NULL; 1541 return NULL;
1542} 1542}
1543 1543
1544/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1545#define ALLOC_WMARK_MIN WMARK_MIN
1546#define ALLOC_WMARK_LOW WMARK_LOW
1547#define ALLOC_WMARK_HIGH WMARK_HIGH
1548#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1549
1550/* Mask to get the watermark bits */
1551#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1552
1553#define ALLOC_HARDER 0x10 /* try to alloc harder */
1554#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1555#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
1556
1557#ifdef CONFIG_FAIL_PAGE_ALLOC 1544#ifdef CONFIG_FAIL_PAGE_ALLOC
1558 1545
1559static struct { 1546static struct {
@@ -1648,7 +1635,11 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1648 min -= min / 2; 1635 min -= min / 2;
1649 if (alloc_flags & ALLOC_HARDER) 1636 if (alloc_flags & ALLOC_HARDER)
1650 min -= min / 4; 1637 min -= min / 4;
1651 1638#ifdef CONFIG_CMA
1639 /* If allocation can't use CMA areas don't use free CMA pages */
1640 if (!(alloc_flags & ALLOC_CMA))
1641 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
1642#endif
1652 if (free_pages <= min + lowmem_reserve) 1643 if (free_pages <= min + lowmem_reserve)
1653 return false; 1644 return false;
1654 for (o = 0; o < order; o++) { 1645 for (o = 0; o < order; o++) {
@@ -2362,7 +2353,10 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
2362 unlikely(test_thread_flag(TIF_MEMDIE)))) 2353 unlikely(test_thread_flag(TIF_MEMDIE))))
2363 alloc_flags |= ALLOC_NO_WATERMARKS; 2354 alloc_flags |= ALLOC_NO_WATERMARKS;
2364 } 2355 }
2365 2356#ifdef CONFIG_CMA
2357 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2358 alloc_flags |= ALLOC_CMA;
2359#endif
2366 return alloc_flags; 2360 return alloc_flags;
2367} 2361}
2368 2362
@@ -2587,6 +2581,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2587 struct page *page = NULL; 2581 struct page *page = NULL;
2588 int migratetype = allocflags_to_migratetype(gfp_mask); 2582 int migratetype = allocflags_to_migratetype(gfp_mask);
2589 unsigned int cpuset_mems_cookie; 2583 unsigned int cpuset_mems_cookie;
2584 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
2590 2585
2591 gfp_mask &= gfp_allowed_mask; 2586 gfp_mask &= gfp_allowed_mask;
2592 2587
@@ -2615,9 +2610,13 @@ retry_cpuset:
2615 if (!preferred_zone) 2610 if (!preferred_zone)
2616 goto out; 2611 goto out;
2617 2612
2613#ifdef CONFIG_CMA
2614 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2615 alloc_flags |= ALLOC_CMA;
2616#endif
2618 /* First allocation attempt */ 2617 /* First allocation attempt */
2619 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 2618 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2620 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET, 2619 zonelist, high_zoneidx, alloc_flags,
2621 preferred_zone, migratetype); 2620 preferred_zone, migratetype);
2622 if (unlikely(!page)) 2621 if (unlikely(!page))
2623 page = __alloc_pages_slowpath(gfp_mask, order, 2622 page = __alloc_pages_slowpath(gfp_mask, order,