aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/dma-contiguous.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-08-06 19:05:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:16 -0400
commita15bc0b89e8812d0db297bc771a85812c4fa83c1 (patch)
treeb13e084512724a270bc4ae3164f87fc0c02558a8 /drivers/base/dma-contiguous.c
parent3162bbd7e65b9cc57b660796dd3409807bfc9070 (diff)
DMA, CMA: support alignment constraint on CMA region
PPC KVM's CMA area management needs alignment constraint on CMA region. So support it to prepare generalization of CMA area management functionality. Additionally, add some comments which tell us why alignment constraint is needed on CMA region. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Alexander Graf <agraf@suse.de> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Gleb Natapov <gleb@kernel.org> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/base/dma-contiguous.c')
-rw-r--r--drivers/base/dma-contiguous.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 9021762227a7..5f62c284072c 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -32,6 +32,7 @@
32#include <linux/swap.h> 32#include <linux/swap.h>
33#include <linux/mm_types.h> 33#include <linux/mm_types.h>
34#include <linux/dma-contiguous.h> 34#include <linux/dma-contiguous.h>
35#include <linux/log2.h>
35 36
36struct cma { 37struct cma {
37 unsigned long base_pfn; 38 unsigned long base_pfn;
@@ -215,17 +216,16 @@ core_initcall(cma_init_reserved_areas);
215 216
216static int __init __dma_contiguous_reserve_area(phys_addr_t size, 217static int __init __dma_contiguous_reserve_area(phys_addr_t size,
217 phys_addr_t base, phys_addr_t limit, 218 phys_addr_t base, phys_addr_t limit,
219 phys_addr_t alignment,
218 struct cma **res_cma, bool fixed) 220 struct cma **res_cma, bool fixed)
219{ 221{
220 struct cma *cma = &cma_areas[cma_area_count]; 222 struct cma *cma = &cma_areas[cma_area_count];
221 phys_addr_t alignment;
222 int ret = 0; 223 int ret = 0;
223 224
224 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, 225 pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
225 (unsigned long)size, (unsigned long)base, 226 __func__, (unsigned long)size, (unsigned long)base,
226 (unsigned long)limit); 227 (unsigned long)limit, (unsigned long)alignment);
227 228
228 /* Sanity checks */
229 if (cma_area_count == ARRAY_SIZE(cma_areas)) { 229 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
230 pr_err("Not enough slots for CMA reserved regions!\n"); 230 pr_err("Not enough slots for CMA reserved regions!\n");
231 return -ENOSPC; 231 return -ENOSPC;
@@ -234,8 +234,17 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size,
234 if (!size) 234 if (!size)
235 return -EINVAL; 235 return -EINVAL;
236 236
237 /* Sanitise input arguments */ 237 if (alignment && !is_power_of_2(alignment))
238 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); 238 return -EINVAL;
239
240 /*
241 * Sanitise input arguments.
242 * Pages both ends in CMA area could be merged into adjacent unmovable
243 * migratetype page by page allocator's buddy algorithm. In the case,
244 * you couldn't get a contiguous memory, which is not what we want.
245 */
246 alignment = max(alignment,
247 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
239 base = ALIGN(base, alignment); 248 base = ALIGN(base, alignment);
240 size = ALIGN(size, alignment); 249 size = ALIGN(size, alignment);
241 limit &= ~(alignment - 1); 250 limit &= ~(alignment - 1);
@@ -299,7 +308,8 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
299{ 308{
300 int ret; 309 int ret;
301 310
302 ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed); 311 ret = __dma_contiguous_reserve_area(size, base, limit, 0,
312 res_cma, fixed);
303 if (ret) 313 if (ret)
304 return ret; 314 return ret;
305 315