aboutsummaryrefslogtreecommitdiffstats
path: root/mm/cma.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/cma.c')
-rw-r--r--mm/cma.c31
1 files changed, 29 insertions, 2 deletions
diff --git a/mm/cma.c b/mm/cma.c
index a6033e344430..978b4a1441ef 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -53,6 +53,11 @@ unsigned long cma_get_size(const struct cma *cma)
53 return cma->count << PAGE_SHIFT; 53 return cma->count << PAGE_SHIFT;
54} 54}
55 55
56const char *cma_get_name(const struct cma *cma)
57{
58 return cma->name ? cma->name : "(undefined)";
59}
60
56static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, 61static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
57 int align_order) 62 int align_order)
58{ 63{
@@ -168,6 +173,7 @@ core_initcall(cma_init_reserved_areas);
168 */ 173 */
169int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 174int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170 unsigned int order_per_bit, 175 unsigned int order_per_bit,
176 const char *name,
171 struct cma **res_cma) 177 struct cma **res_cma)
172{ 178{
173 struct cma *cma; 179 struct cma *cma;
@@ -198,6 +204,13 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
198 * subsystems (like slab allocator) are available. 204 * subsystems (like slab allocator) are available.
199 */ 205 */
200 cma = &cma_areas[cma_area_count]; 206 cma = &cma_areas[cma_area_count];
207 if (name) {
208 cma->name = name;
209 } else {
210 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
211 if (!cma->name)
212 return -ENOMEM;
213 }
201 cma->base_pfn = PFN_DOWN(base); 214 cma->base_pfn = PFN_DOWN(base);
202 cma->count = size >> PAGE_SHIFT; 215 cma->count = size >> PAGE_SHIFT;
203 cma->order_per_bit = order_per_bit; 216 cma->order_per_bit = order_per_bit;
@@ -229,7 +242,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
229int __init cma_declare_contiguous(phys_addr_t base, 242int __init cma_declare_contiguous(phys_addr_t base,
230 phys_addr_t size, phys_addr_t limit, 243 phys_addr_t size, phys_addr_t limit,
231 phys_addr_t alignment, unsigned int order_per_bit, 244 phys_addr_t alignment, unsigned int order_per_bit,
232 bool fixed, struct cma **res_cma) 245 bool fixed, const char *name, struct cma **res_cma)
233{ 246{
234 phys_addr_t memblock_end = memblock_end_of_DRAM(); 247 phys_addr_t memblock_end = memblock_end_of_DRAM();
235 phys_addr_t highmem_start; 248 phys_addr_t highmem_start;
@@ -335,7 +348,7 @@ int __init cma_declare_contiguous(phys_addr_t base,
335 base = addr; 348 base = addr;
336 } 349 }
337 350
338 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); 351 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
339 if (ret) 352 if (ret)
340 goto err; 353 goto err;
341 354
@@ -491,3 +504,17 @@ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
491 504
492 return true; 505 return true;
493} 506}
507
508int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
509{
510 int i;
511
512 for (i = 0; i < cma_area_count; i++) {
513 int ret = it(&cma_areas[i], data);
514
515 if (ret)
516 return ret;
517 }
518
519 return 0;
520}