aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-08-06 19:05:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:16 -0400
commit3162bbd7e65b9cc57b660796dd3409807bfc9070 (patch)
tree2d279fca6a9c53432811f8b3bbdaed506a5af2cb
parentbc7f84c0e67c0ca90b6d0e95cc293ed5d8ad30c4 (diff)
DMA, CMA: separate core CMA management codes from DMA APIs
To prepare future generalization work on CMA area management code, we need to separate core CMA management codes from DMA APIs. We will extend these core functions to cover requirements of PPC KVM's CMA area management functionality in following patches. This separation helps us not to touch DMA APIs while extending core functions. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Alexander Graf <agraf@suse.de> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Gleb Natapov <gleb@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/base/dma-contiguous.c125
1 files changed, 77 insertions, 48 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 6467c919c509..9021762227a7 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -213,26 +213,9 @@ static int __init cma_init_reserved_areas(void)
213} 213}
214core_initcall(cma_init_reserved_areas); 214core_initcall(cma_init_reserved_areas);
215 215
216/** 216static int __init __dma_contiguous_reserve_area(phys_addr_t size,
217 * dma_contiguous_reserve_area() - reserve custom contiguous area 217 phys_addr_t base, phys_addr_t limit,
218 * @size: Size of the reserved area (in bytes), 218 struct cma **res_cma, bool fixed)
219 * @base: Base address of the reserved area optional, use 0 for any
220 * @limit: End address of the reserved memory (optional, 0 for any).
221 * @res_cma: Pointer to store the created cma region.
222 * @fixed: hint about where to place the reserved area
223 *
224 * This function reserves memory from early allocator. It should be
225 * called by arch specific code once the early allocator (memblock or bootmem)
226 * has been activated and all other subsystems have already allocated/reserved
227 * memory. This function allows to create custom reserved areas for specific
228 * devices.
229 *
230 * If @fixed is true, reserve contiguous area at exactly @base. If false,
231 * reserve in range from @base to @limit.
232 */
233int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
234 phys_addr_t limit, struct cma **res_cma,
235 bool fixed)
236{ 219{
237 struct cma *cma = &cma_areas[cma_area_count]; 220 struct cma *cma = &cma_areas[cma_area_count];
238 phys_addr_t alignment; 221 phys_addr_t alignment;
@@ -286,15 +269,47 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
286 269
287 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 270 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
288 (unsigned long)base); 271 (unsigned long)base);
289
290 /* Architecture specific contiguous memory fixup. */
291 dma_contiguous_early_fixup(base, size);
292 return 0; 272 return 0;
273
293err: 274err:
294 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 275 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
295 return ret; 276 return ret;
296} 277}
297 278
279/**
280 * dma_contiguous_reserve_area() - reserve custom contiguous area
281 * @size: Size of the reserved area (in bytes),
282 * @base: Base address of the reserved area optional, use 0 for any
283 * @limit: End address of the reserved memory (optional, 0 for any).
284 * @res_cma: Pointer to store the created cma region.
285 * @fixed: hint about where to place the reserved area
286 *
287 * This function reserves memory from early allocator. It should be
288 * called by arch specific code once the early allocator (memblock or bootmem)
289 * has been activated and all other subsystems have already allocated/reserved
290 * memory. This function allows to create custom reserved areas for specific
291 * devices.
292 *
293 * If @fixed is true, reserve contiguous area at exactly @base. If false,
294 * reserve in range from @base to @limit.
295 */
296int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
297 phys_addr_t limit, struct cma **res_cma,
298 bool fixed)
299{
300 int ret;
301
302 ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed);
303 if (ret)
304 return ret;
305
306 /* Architecture specific contiguous memory fixup. */
307 dma_contiguous_early_fixup(PFN_PHYS((*res_cma)->base_pfn),
308 (*res_cma)->count << PAGE_SHIFT);
309
310 return 0;
311}
312
298static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) 313static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
299{ 314{
300 mutex_lock(&cma->lock); 315 mutex_lock(&cma->lock);
@@ -302,31 +317,16 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
302 mutex_unlock(&cma->lock); 317 mutex_unlock(&cma->lock);
303} 318}
304 319
305/** 320static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
306 * dma_alloc_from_contiguous() - allocate pages from contiguous area
307 * @dev: Pointer to device for which the allocation is performed.
308 * @count: Requested number of pages.
309 * @align: Requested alignment of pages (in PAGE_SIZE order).
310 *
311 * This function allocates memory buffer for specified device. It uses
312 * device specific contiguous memory area if available or the default
313 * global one. Requires architecture specific dev_get_cma_area() helper
314 * function.
315 */
316struct page *dma_alloc_from_contiguous(struct device *dev, int count,
317 unsigned int align) 321 unsigned int align)
318{ 322{
319 unsigned long mask, pfn, pageno, start = 0; 323 unsigned long mask, pfn, pageno, start = 0;
320 struct cma *cma = dev_get_cma_area(dev);
321 struct page *page = NULL; 324 struct page *page = NULL;
322 int ret; 325 int ret;
323 326
324 if (!cma || !cma->count) 327 if (!cma || !cma->count)
325 return NULL; 328 return NULL;
326 329
327 if (align > CONFIG_CMA_ALIGNMENT)
328 align = CONFIG_CMA_ALIGNMENT;
329
330 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 330 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
331 count, align); 331 count, align);
332 332
@@ -375,19 +375,30 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
375} 375}
376 376
377/** 377/**
378 * dma_release_from_contiguous() - release allocated pages 378 * dma_alloc_from_contiguous() - allocate pages from contiguous area
379 * @dev: Pointer to device for which the pages were allocated. 379 * @dev: Pointer to device for which the allocation is performed.
380 * @pages: Allocated pages. 380 * @count: Requested number of pages.
381 * @count: Number of allocated pages. 381 * @align: Requested alignment of pages (in PAGE_SIZE order).
382 * 382 *
383 * This function releases memory allocated by dma_alloc_from_contiguous(). 383 * This function allocates memory buffer for specified device. It uses
384 * It returns false when provided pages do not belong to contiguous area and 384 * device specific contiguous memory area if available or the default
385 * true otherwise. 385 * global one. Requires architecture specific dev_get_cma_area() helper
386 * function.
386 */ 387 */
387bool dma_release_from_contiguous(struct device *dev, struct page *pages, 388struct page *dma_alloc_from_contiguous(struct device *dev, int count,
388 int count) 389 unsigned int align)
389{ 390{
390 struct cma *cma = dev_get_cma_area(dev); 391 struct cma *cma = dev_get_cma_area(dev);
392
393 if (align > CONFIG_CMA_ALIGNMENT)
394 align = CONFIG_CMA_ALIGNMENT;
395
396 return __dma_alloc_from_contiguous(cma, count, align);
397}
398
399static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
400 int count)
401{
391 unsigned long pfn; 402 unsigned long pfn;
392 403
393 if (!cma || !pages) 404 if (!cma || !pages)
@@ -407,3 +418,21 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
407 418
408 return true; 419 return true;
409} 420}
421
422/**
423 * dma_release_from_contiguous() - release allocated pages
424 * @dev: Pointer to device for which the pages were allocated.
425 * @pages: Allocated pages.
426 * @count: Number of allocated pages.
427 *
428 * This function releases memory allocated by dma_alloc_from_contiguous().
429 * It returns false when provided pages do not belong to contiguous area and
430 * true otherwise.
431 */
432bool dma_release_from_contiguous(struct device *dev, struct page *pages,
433 int count)
434{
435 struct cma *cma = dev_get_cma_area(dev);
436
437 return __dma_release_from_contiguous(cma, pages, count);
438}