aboutsummaryrefslogtreecommitdiffstats
path: root/mm/cma.c
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2014-10-13 18:51:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 20:18:12 -0400
commitde9e14eebf33a60712a52a0bc6e08c043c0aba53 (patch)
tree7b681bf9d5cb6a7909c8d9090aaa868d3e22ccf2 /mm/cma.c
parent7bfa5ab6fa1b18f53fb94f922e107e6fbdc5e485 (diff)
drivers: dma-contiguous: add initialization from device tree
Add a function to create CMA region from previously reserved memory and add support for handling 'shared-dma-pool' reserved-memory device tree nodes. Based on previous code provided by Josh Cartwright <joshc@codeaurora.org> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Grant Likely <grant.likely@linaro.org> Cc: Laura Abbott <lauraa@codeaurora.org> Cc: Josh Cartwright <joshc@codeaurora.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/cma.c')
-rw-r--r--mm/cma.c62
1 files changed, 51 insertions, 11 deletions
diff --git a/mm/cma.c b/mm/cma.c
index a951a3b3ed36..963bc4add9af 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -143,6 +143,54 @@ static int __init cma_init_reserved_areas(void)
143core_initcall(cma_init_reserved_areas); 143core_initcall(cma_init_reserved_areas);
144 144
145/** 145/**
146 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
147 * @base: Base address of the reserved area
148 * @size: Size of the reserved area (in bytes),
149 * @order_per_bit: Order of pages represented by one bit on bitmap.
150 * @res_cma: Pointer to store the created cma region.
151 *
152 * This function creates custom contiguous area from already reserved memory.
153 */
154int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
155 int order_per_bit, struct cma **res_cma)
156{
157 struct cma *cma;
158 phys_addr_t alignment;
159
160 /* Sanity checks */
161 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
162 pr_err("Not enough slots for CMA reserved regions!\n");
163 return -ENOSPC;
164 }
165
166 if (!size || !memblock_is_region_reserved(base, size))
167 return -EINVAL;
168
169 /* ensure minimal alignment requied by mm core */
170 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
171
172 /* alignment should be aligned with order_per_bit */
173 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
174 return -EINVAL;
175
176 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
177 return -EINVAL;
178
179 /*
180 * Each reserved area must be initialised later, when more kernel
181 * subsystems (like slab allocator) are available.
182 */
183 cma = &cma_areas[cma_area_count];
184 cma->base_pfn = PFN_DOWN(base);
185 cma->count = size >> PAGE_SHIFT;
186 cma->order_per_bit = order_per_bit;
187 *res_cma = cma;
188 cma_area_count++;
189
190 return 0;
191}
192
193/**
146 * cma_declare_contiguous() - reserve custom contiguous area 194 * cma_declare_contiguous() - reserve custom contiguous area
147 * @base: Base address of the reserved area optional, use 0 for any 195 * @base: Base address of the reserved area optional, use 0 for any
148 * @size: Size of the reserved area (in bytes), 196 * @size: Size of the reserved area (in bytes),
@@ -165,7 +213,6 @@ int __init cma_declare_contiguous(phys_addr_t base,
165 phys_addr_t alignment, unsigned int order_per_bit, 213 phys_addr_t alignment, unsigned int order_per_bit,
166 bool fixed, struct cma **res_cma) 214 bool fixed, struct cma **res_cma)
167{ 215{
168 struct cma *cma;
169 phys_addr_t memblock_end = memblock_end_of_DRAM(); 216 phys_addr_t memblock_end = memblock_end_of_DRAM();
170 phys_addr_t highmem_start = __pa(high_memory); 217 phys_addr_t highmem_start = __pa(high_memory);
171 int ret = 0; 218 int ret = 0;
@@ -237,16 +284,9 @@ int __init cma_declare_contiguous(phys_addr_t base,
237 } 284 }
238 } 285 }
239 286
240 /* 287 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
241 * Each reserved area must be initialised later, when more kernel 288 if (ret)
242 * subsystems (like slab allocator) are available. 289 goto err;
243 */
244 cma = &cma_areas[cma_area_count];
245 cma->base_pfn = PFN_DOWN(base);
246 cma->count = size >> PAGE_SHIFT;
247 cma->order_per_bit = order_per_bit;
248 *res_cma = cma;
249 cma_area_count++;
250 290
251 pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 291 pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
252 (unsigned long)base); 292 (unsigned long)base);