diff options
author | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-12-11 19:02:59 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 20:22:27 -0500 |
commit | bc357f431c836c6631751e3ef7dfe7882394ad67 (patch) | |
tree | b67904e354a30c9ecc0a53b8288a3a74c37b9bc2 /mm/page_alloc.c | |
parent | 2e30abd1730751d58463d88bc0844ab8fd7112a9 (diff) |
mm: cma: remove watermark hacks
Commits 2139cbe627b8 ("cma: fix counting of isolated pages") and
d95ea5d18e69 ("cma: fix watermark checking") introduced a reliable
method of free page accounting when memory is being allocated from CMA
regions, so the workaround introduced earlier by commit 49f223a9cd96
("mm: trigger page reclaim in alloc_contig_range() to stabilise
watermarks") can be finally removed.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Kyungmin Park <kyungmin.park@samsung.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Mel Gorman <mel@csn.ul.ie>
Acked-by: Michal Nazarewicz <mina86@mina86.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 58 |
1 files changed, 0 insertions, 58 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 265fea4fbc81..5a8d339d282a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -5218,10 +5218,6 @@ static void __setup_per_zone_wmarks(void) | |||
5218 | zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); | 5218 | zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); |
5219 | zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); | 5219 | zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); |
5220 | 5220 | ||
5221 | zone->watermark[WMARK_MIN] += cma_wmark_pages(zone); | ||
5222 | zone->watermark[WMARK_LOW] += cma_wmark_pages(zone); | ||
5223 | zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone); | ||
5224 | |||
5225 | setup_zone_migrate_reserve(zone); | 5221 | setup_zone_migrate_reserve(zone); |
5226 | spin_unlock_irqrestore(&zone->lock, flags); | 5222 | spin_unlock_irqrestore(&zone->lock, flags); |
5227 | } | 5223 | } |
@@ -5766,54 +5762,6 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, | |||
5766 | return ret > 0 ? 0 : ret; | 5762 | return ret > 0 ? 0 : ret; |
5767 | } | 5763 | } |
5768 | 5764 | ||
5769 | /* | ||
5770 | * Update zone's cma pages counter used for watermark level calculation. | ||
5771 | */ | ||
5772 | static inline void __update_cma_watermarks(struct zone *zone, int count) | ||
5773 | { | ||
5774 | unsigned long flags; | ||
5775 | spin_lock_irqsave(&zone->lock, flags); | ||
5776 | zone->min_cma_pages += count; | ||
5777 | spin_unlock_irqrestore(&zone->lock, flags); | ||
5778 | setup_per_zone_wmarks(); | ||
5779 | } | ||
5780 | |||
5781 | /* | ||
5782 | * Trigger memory pressure bump to reclaim some pages in order to be able to | ||
5783 | * allocate 'count' pages in single page units. Does similar work as | ||
5784 | *__alloc_pages_slowpath() function. | ||
5785 | */ | ||
5786 | static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count) | ||
5787 | { | ||
5788 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); | ||
5789 | struct zonelist *zonelist = node_zonelist(0, gfp_mask); | ||
5790 | int did_some_progress = 0; | ||
5791 | int order = 1; | ||
5792 | |||
5793 | /* | ||
5794 | * Increase level of watermarks to force kswapd do his job | ||
5795 | * to stabilise at new watermark level. | ||
5796 | */ | ||
5797 | __update_cma_watermarks(zone, count); | ||
5798 | |||
5799 | /* Obey watermarks as if the page was being allocated */ | ||
5800 | while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) { | ||
5801 | wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone)); | ||
5802 | |||
5803 | did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, | ||
5804 | NULL); | ||
5805 | if (!did_some_progress) { | ||
5806 | /* Exhausted what can be done so it's blamo time */ | ||
5807 | out_of_memory(zonelist, gfp_mask, order, NULL, false); | ||
5808 | } | ||
5809 | } | ||
5810 | |||
5811 | /* Restore original watermark levels. */ | ||
5812 | __update_cma_watermarks(zone, -count); | ||
5813 | |||
5814 | return count; | ||
5815 | } | ||
5816 | |||
5817 | /** | 5765 | /** |
5818 | * alloc_contig_range() -- tries to allocate given range of pages | 5766 | * alloc_contig_range() -- tries to allocate given range of pages |
5819 | * @start: start PFN to allocate | 5767 | * @start: start PFN to allocate |
@@ -5837,7 +5785,6 @@ static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count) | |||
5837 | int alloc_contig_range(unsigned long start, unsigned long end, | 5785 | int alloc_contig_range(unsigned long start, unsigned long end, |
5838 | unsigned migratetype) | 5786 | unsigned migratetype) |
5839 | { | 5787 | { |
5840 | struct zone *zone = page_zone(pfn_to_page(start)); | ||
5841 | unsigned long outer_start, outer_end; | 5788 | unsigned long outer_start, outer_end; |
5842 | int ret = 0, order; | 5789 | int ret = 0, order; |
5843 | 5790 | ||
@@ -5922,11 +5869,6 @@ int alloc_contig_range(unsigned long start, unsigned long end, | |||
5922 | goto done; | 5869 | goto done; |
5923 | } | 5870 | } |
5924 | 5871 | ||
5925 | /* | ||
5926 | * Reclaim enough pages to make sure that contiguous allocation | ||
5927 | * will not starve the system. | ||
5928 | */ | ||
5929 | __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start); | ||
5930 | 5872 | ||
5931 | /* Grab isolated pages from freelists. */ | 5873 | /* Grab isolated pages from freelists. */ |
5932 | outer_end = isolate_freepages_range(&cc, outer_start, end); | 5874 | outer_end = isolate_freepages_range(&cc, outer_start, end); |