summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLucas Stach <l.stach@pengutronix.de>2017-02-24 17:58:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-24 20:46:55 -0500
commitca96b625341027f611c3e61351a70311077ebcf5 (patch)
treeb037298eee0aabcf294f7d0327ad28590ea006c7 /mm
parent5a02026d390ea1bb0c16a0e214e45613a3e3d885 (diff)
mm: alloc_contig_range: allow to specify GFP mask
Currently alloc_contig_range assumes that the compaction should be done with the default GFP_KERNEL flags. This is probably right for all current uses of this interface, but may change as CMA is used in more use-cases (including being the default DMA memory allocator on some platforms). Change the function prototype, to allow for passing through the GFP mask set by upper layers. Also respect global restrictions by applying memalloc_noio_flags to the passed in flags. Link: http://lkml.kernel.org/r/20170127172328.18574-1-l.stach@pengutronix.de Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Alexander Graf <agraf@suse.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/cma.c3
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/page_alloc.c5
3 files changed, 7 insertions, 4 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 94b3460cd608..c6aed23ca6df 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -402,7 +402,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
402 402
403 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 403 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
404 mutex_lock(&cma_mutex); 404 mutex_lock(&cma_mutex);
405 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 405 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
406 GFP_KERNEL);
406 mutex_unlock(&cma_mutex); 407 mutex_unlock(&cma_mutex);
407 if (ret == 0) { 408 if (ret == 0) {
408 page = pfn_to_page(pfn); 409 page = pfn_to_page(pfn);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 167fd0722c15..2e0e8159ce8e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1052,7 +1052,8 @@ static int __alloc_gigantic_page(unsigned long start_pfn,
1052 unsigned long nr_pages) 1052 unsigned long nr_pages)
1053{ 1053{
1054 unsigned long end_pfn = start_pfn + nr_pages; 1054 unsigned long end_pfn = start_pfn + nr_pages;
1055 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1055 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1056 GFP_KERNEL);
1056} 1057}
1057 1058
1058static bool pfn_range_valid_gigantic(struct zone *z, 1059static bool pfn_range_valid_gigantic(struct zone *z,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2d34cdb70f1d..8a0f33624335 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7399,6 +7399,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
7399 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 7399 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
7400 * in range must have the same migratetype and it must 7400 * in range must have the same migratetype and it must
7401 * be either of the two. 7401 * be either of the two.
7402 * @gfp_mask: GFP mask to use during compaction
7402 * 7403 *
7403 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES 7404 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
7404 * aligned, however it's the caller's responsibility to guarantee that 7405 * aligned, however it's the caller's responsibility to guarantee that
@@ -7412,7 +7413,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
7412 * need to be freed with free_contig_range(). 7413 * need to be freed with free_contig_range().
7413 */ 7414 */
7414int alloc_contig_range(unsigned long start, unsigned long end, 7415int alloc_contig_range(unsigned long start, unsigned long end,
7415 unsigned migratetype) 7416 unsigned migratetype, gfp_t gfp_mask)
7416{ 7417{
7417 unsigned long outer_start, outer_end; 7418 unsigned long outer_start, outer_end;
7418 unsigned int order; 7419 unsigned int order;
@@ -7424,7 +7425,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
7424 .zone = page_zone(pfn_to_page(start)), 7425 .zone = page_zone(pfn_to_page(start)),
7425 .mode = MIGRATE_SYNC, 7426 .mode = MIGRATE_SYNC,
7426 .ignore_skip_hint = true, 7427 .ignore_skip_hint = true,
7427 .gfp_mask = GFP_KERNEL, 7428 .gfp_mask = memalloc_noio_flags(gfp_mask),
7428 }; 7429 };
7429 INIT_LIST_HEAD(&cc.migratepages); 7430 INIT_LIST_HEAD(&cc.migratepages);
7430 7431