aboutsummaryrefslogtreecommitdiffstats
path: root/mm/cma.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/cma.c')
-rw-r--r--mm/cma.c49
1 files changed, 24 insertions, 25 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 68ecb7a42983..47203faaf65e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -35,29 +35,24 @@
35#include <linux/highmem.h> 35#include <linux/highmem.h>
36#include <linux/io.h> 36#include <linux/io.h>
37 37
38struct cma { 38#include "cma.h"
39 unsigned long base_pfn; 39
40 unsigned long count; 40struct cma cma_areas[MAX_CMA_AREAS];
41 unsigned long *bitmap; 41unsigned cma_area_count;
42 unsigned int order_per_bit; /* Order of pages represented by one bit */
43 struct mutex lock;
44};
45
46static struct cma cma_areas[MAX_CMA_AREAS];
47static unsigned cma_area_count;
48static DEFINE_MUTEX(cma_mutex); 42static DEFINE_MUTEX(cma_mutex);
49 43
50phys_addr_t cma_get_base(struct cma *cma) 44phys_addr_t cma_get_base(const struct cma *cma)
51{ 45{
52 return PFN_PHYS(cma->base_pfn); 46 return PFN_PHYS(cma->base_pfn);
53} 47}
54 48
55unsigned long cma_get_size(struct cma *cma) 49unsigned long cma_get_size(const struct cma *cma)
56{ 50{
57 return cma->count << PAGE_SHIFT; 51 return cma->count << PAGE_SHIFT;
58} 52}
59 53
60static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) 54static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
55 int align_order)
61{ 56{
62 if (align_order <= cma->order_per_bit) 57 if (align_order <= cma->order_per_bit)
63 return 0; 58 return 0;
@@ -68,7 +63,8 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
68 * Find a PFN aligned to the specified order and return an offset represented in 63 * Find a PFN aligned to the specified order and return an offset represented in
69 * order_per_bits. 64 * order_per_bits.
70 */ 65 */
71static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) 66static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
67 int align_order)
72{ 68{
73 if (align_order <= cma->order_per_bit) 69 if (align_order <= cma->order_per_bit)
74 return 0; 70 return 0;
@@ -77,18 +73,14 @@ static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
77 - cma->base_pfn) >> cma->order_per_bit; 73 - cma->base_pfn) >> cma->order_per_bit;
78} 74}
79 75
80static unsigned long cma_bitmap_maxno(struct cma *cma) 76static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
81{ 77 unsigned long pages)
82 return cma->count >> cma->order_per_bit;
83}
84
85static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
86 unsigned long pages)
87{ 78{
88 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 79 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
89} 80}
90 81
91static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count) 82static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
83 unsigned int count)
92{ 84{
93 unsigned long bitmap_no, bitmap_count; 85 unsigned long bitmap_no, bitmap_count;
94 86
@@ -134,6 +126,12 @@ static int __init cma_activate_area(struct cma *cma)
134 } while (--i); 126 } while (--i);
135 127
136 mutex_init(&cma->lock); 128 mutex_init(&cma->lock);
129
130#ifdef CONFIG_CMA_DEBUGFS
131 INIT_HLIST_HEAD(&cma->mem_head);
132 spin_lock_init(&cma->mem_head_lock);
133#endif
134
137 return 0; 135 return 0;
138 136
139err: 137err:
@@ -167,7 +165,8 @@ core_initcall(cma_init_reserved_areas);
167 * This function creates custom contiguous area from already reserved memory. 165 * This function creates custom contiguous area from already reserved memory.
168 */ 166 */
169int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 167int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170 int order_per_bit, struct cma **res_cma) 168 unsigned int order_per_bit,
169 struct cma **res_cma)
171{ 170{
172 struct cma *cma; 171 struct cma *cma;
173 phys_addr_t alignment; 172 phys_addr_t alignment;
@@ -358,7 +357,7 @@ err:
358 * This function allocates part of contiguous memory on specific 357 * This function allocates part of contiguous memory on specific
359 * contiguous memory area. 358 * contiguous memory area.
360 */ 359 */
361struct page *cma_alloc(struct cma *cma, int count, unsigned int align) 360struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
362{ 361{
363 unsigned long mask, offset, pfn, start = 0; 362 unsigned long mask, offset, pfn, start = 0;
364 unsigned long bitmap_maxno, bitmap_no, bitmap_count; 363 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
@@ -429,7 +428,7 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
429 * It returns false when provided pages do not belong to contiguous area and 428 * It returns false when provided pages do not belong to contiguous area and
430 * true otherwise. 429 * true otherwise.
431 */ 430 */
432bool cma_release(struct cma *cma, struct page *pages, int count) 431bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
433{ 432{
434 unsigned long pfn; 433 unsigned long pfn;
435 434