summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-01-25 06:49:24 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:09:36 -0400
commit49f223a9cd96c7293d7258ff88c2bdf83065f69c (patch)
tree4a141cbe4132ab2a5edfbc44165d091bb2289c75 /mm/page_alloc.c
parentbba9071087108d3de70bea274e35064cc480487b (diff)
mm: trigger page reclaim in alloc_contig_range() to stabilise watermarks
alloc_contig_range() performs memory allocation so it also should keep track on keeping the correct level of memory watermarks. This commit adds a call to *_slowpath style reclaim to grab enough pages to make sure that the final collection of contiguous pages from freelists will not starve the system. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> CC: Michal Nazarewicz <mina86@mina86.com> Tested-by: Rob Clark <rob.clark@linaro.org> Tested-by: Ohad Ben-Cohen <ohad@wizery.com> Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> Tested-by: Robert Nelson <robertcnelson@gmail.com> Tested-by: Barry Song <Baohua.Song@csr.com>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c60
1 files changed, 60 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4615531dcf66..22348ae1005d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5079,6 +5079,11 @@ static void __setup_per_zone_wmarks(void)
5079 5079
5080 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); 5080 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
5081 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 5081 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5082
5083 zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
5084 zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
5085 zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
5086
5082 setup_zone_migrate_reserve(zone); 5087 setup_zone_migrate_reserve(zone);
5083 spin_unlock_irqrestore(&zone->lock, flags); 5088 spin_unlock_irqrestore(&zone->lock, flags);
5084 } 5089 }
@@ -5684,6 +5689,54 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
5684 return ret > 0 ? 0 : ret; 5689 return ret > 0 ? 0 : ret;
5685} 5690}
5686 5691
5692/*
5693 * Update zone's cma pages counter used for watermark level calculation.
5694 */
5695static inline void __update_cma_watermarks(struct zone *zone, int count)
5696{
5697 unsigned long flags;
5698 spin_lock_irqsave(&zone->lock, flags);
5699 zone->min_cma_pages += count;
5700 spin_unlock_irqrestore(&zone->lock, flags);
5701 setup_per_zone_wmarks();
5702}
5703
5704/*
5705 * Trigger memory pressure bump to reclaim some pages in order to be able to
5706 * allocate 'count' pages in single page units. Does similar work as
5707 *__alloc_pages_slowpath() function.
5708 */
5709static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
5710{
5711 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
5712 struct zonelist *zonelist = node_zonelist(0, gfp_mask);
5713 int did_some_progress = 0;
5714 int order = 1;
5715
5716 /*
5717 * Increase level of watermarks to force kswapd do his job
5718 * to stabilise at new watermark level.
5719 */
5720 __update_cma_watermarks(zone, count);
5721
5722 /* Obey watermarks as if the page was being allocated */
5723 while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
5724 wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
5725
5726 did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
5727 NULL);
5728 if (!did_some_progress) {
5729 /* Exhausted what can be done so it's blamo time */
5730 out_of_memory(zonelist, gfp_mask, order, NULL, false);
5731 }
5732 }
5733
5734 /* Restore original watermark levels. */
5735 __update_cma_watermarks(zone, -count);
5736
5737 return count;
5738}
5739
5687/** 5740/**
5688 * alloc_contig_range() -- tries to allocate given range of pages 5741 * alloc_contig_range() -- tries to allocate given range of pages
5689 * @start: start PFN to allocate 5742 * @start: start PFN to allocate
@@ -5782,6 +5835,13 @@ int alloc_contig_range(unsigned long start, unsigned long end,
5782 goto done; 5835 goto done;
5783 } 5836 }
5784 5837
5838 /*
5839 * Reclaim enough pages to make sure that contiguous allocation
5840 * will not starve the system.
5841 */
5842 __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
5843
5844 /* Grab isolated pages from freelists. */
5785 outer_end = isolate_freepages_range(outer_start, end); 5845 outer_end = isolate_freepages_range(outer_start, end);
5786 if (!outer_end) { 5846 if (!outer_end) {
5787 ret = -EBUSY; 5847 ret = -EBUSY;