diff options
Diffstat (limited to 'mm/cma.c')
-rw-r--r-- | mm/cma.c | 333 |
1 files changed, 333 insertions, 0 deletions
diff --git a/mm/cma.c b/mm/cma.c new file mode 100644 index 000000000000..656004216953 --- /dev/null +++ b/mm/cma.c | |||
@@ -0,0 +1,333 @@ | |||
1 | /* | ||
2 | * Contiguous Memory Allocator | ||
3 | * | ||
4 | * Copyright (c) 2010-2011 by Samsung Electronics. | ||
5 | * Copyright IBM Corporation, 2013 | ||
6 | * Copyright LG Electronics Inc., 2014 | ||
7 | * Written by: | ||
8 | * Marek Szyprowski <m.szyprowski@samsung.com> | ||
9 | * Michal Nazarewicz <mina86@mina86.com> | ||
10 | * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | ||
11 | * Joonsoo Kim <iamjoonsoo.kim@lge.com> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License as | ||
15 | * published by the Free Software Foundation; either version 2 of the | ||
16 | * License or (at your optional) any later version of the license. | ||
17 | */ | ||
18 | |||
19 | #define pr_fmt(fmt) "cma: " fmt | ||
20 | |||
21 | #ifdef CONFIG_CMA_DEBUG | ||
22 | #ifndef DEBUG | ||
23 | # define DEBUG | ||
24 | #endif | ||
25 | #endif | ||
26 | |||
27 | #include <linux/memblock.h> | ||
28 | #include <linux/err.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/mutex.h> | ||
31 | #include <linux/sizes.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/log2.h> | ||
34 | #include <linux/cma.h> | ||
35 | |||
36 | struct cma { | ||
37 | unsigned long base_pfn; | ||
38 | unsigned long count; | ||
39 | unsigned long *bitmap; | ||
40 | unsigned int order_per_bit; /* Order of pages represented by one bit */ | ||
41 | struct mutex lock; | ||
42 | }; | ||
43 | |||
44 | static struct cma cma_areas[MAX_CMA_AREAS]; | ||
45 | static unsigned cma_area_count; | ||
46 | static DEFINE_MUTEX(cma_mutex); | ||
47 | |||
48 | phys_addr_t cma_get_base(struct cma *cma) | ||
49 | { | ||
50 | return PFN_PHYS(cma->base_pfn); | ||
51 | } | ||
52 | |||
53 | unsigned long cma_get_size(struct cma *cma) | ||
54 | { | ||
55 | return cma->count << PAGE_SHIFT; | ||
56 | } | ||
57 | |||
58 | static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | ||
59 | { | ||
60 | return (1UL << (align_order >> cma->order_per_bit)) - 1; | ||
61 | } | ||
62 | |||
63 | static unsigned long cma_bitmap_maxno(struct cma *cma) | ||
64 | { | ||
65 | return cma->count >> cma->order_per_bit; | ||
66 | } | ||
67 | |||
68 | static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, | ||
69 | unsigned long pages) | ||
70 | { | ||
71 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; | ||
72 | } | ||
73 | |||
74 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count) | ||
75 | { | ||
76 | unsigned long bitmap_no, bitmap_count; | ||
77 | |||
78 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; | ||
79 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | ||
80 | |||
81 | mutex_lock(&cma->lock); | ||
82 | bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); | ||
83 | mutex_unlock(&cma->lock); | ||
84 | } | ||
85 | |||
86 | static int __init cma_activate_area(struct cma *cma) | ||
87 | { | ||
88 | int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); | ||
89 | unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; | ||
90 | unsigned i = cma->count >> pageblock_order; | ||
91 | struct zone *zone; | ||
92 | |||
93 | cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
94 | |||
95 | if (!cma->bitmap) | ||
96 | return -ENOMEM; | ||
97 | |||
98 | WARN_ON_ONCE(!pfn_valid(pfn)); | ||
99 | zone = page_zone(pfn_to_page(pfn)); | ||
100 | |||
101 | do { | ||
102 | unsigned j; | ||
103 | |||
104 | base_pfn = pfn; | ||
105 | for (j = pageblock_nr_pages; j; --j, pfn++) { | ||
106 | WARN_ON_ONCE(!pfn_valid(pfn)); | ||
107 | /* | ||
108 | * alloc_contig_range requires the pfn range | ||
109 | * specified to be in the same zone. Make this | ||
110 | * simple by forcing the entire CMA resv range | ||
111 | * to be in the same zone. | ||
112 | */ | ||
113 | if (page_zone(pfn_to_page(pfn)) != zone) | ||
114 | goto err; | ||
115 | } | ||
116 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); | ||
117 | } while (--i); | ||
118 | |||
119 | mutex_init(&cma->lock); | ||
120 | return 0; | ||
121 | |||
122 | err: | ||
123 | kfree(cma->bitmap); | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | |||
127 | static int __init cma_init_reserved_areas(void) | ||
128 | { | ||
129 | int i; | ||
130 | |||
131 | for (i = 0; i < cma_area_count; i++) { | ||
132 | int ret = cma_activate_area(&cma_areas[i]); | ||
133 | |||
134 | if (ret) | ||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | core_initcall(cma_init_reserved_areas); | ||
141 | |||
142 | /** | ||
143 | * cma_declare_contiguous() - reserve custom contiguous area | ||
144 | * @size: Size of the reserved area (in bytes), | ||
145 | * @base: Base address of the reserved area optional, use 0 for any | ||
146 | * @limit: End address of the reserved memory (optional, 0 for any). | ||
147 | * @alignment: Alignment for the CMA area, should be power of 2 or zero | ||
148 | * @order_per_bit: Order of pages represented by one bit on bitmap. | ||
149 | * @res_cma: Pointer to store the created cma region. | ||
150 | * @fixed: hint about where to place the reserved area | ||
151 | * | ||
152 | * This function reserves memory from early allocator. It should be | ||
153 | * called by arch specific code once the early allocator (memblock or bootmem) | ||
154 | * has been activated and all other subsystems have already allocated/reserved | ||
155 | * memory. This function allows to create custom reserved areas. | ||
156 | * | ||
157 | * If @fixed is true, reserve contiguous area at exactly @base. If false, | ||
158 | * reserve in range from @base to @limit. | ||
159 | */ | ||
160 | int __init cma_declare_contiguous(phys_addr_t size, | ||
161 | phys_addr_t base, phys_addr_t limit, | ||
162 | phys_addr_t alignment, unsigned int order_per_bit, | ||
163 | struct cma **res_cma, bool fixed) | ||
164 | { | ||
165 | struct cma *cma = &cma_areas[cma_area_count]; | ||
166 | int ret = 0; | ||
167 | |||
168 | pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n", | ||
169 | __func__, (unsigned long)size, (unsigned long)base, | ||
170 | (unsigned long)limit, (unsigned long)alignment); | ||
171 | |||
172 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { | ||
173 | pr_err("Not enough slots for CMA reserved regions!\n"); | ||
174 | return -ENOSPC; | ||
175 | } | ||
176 | |||
177 | if (!size) | ||
178 | return -EINVAL; | ||
179 | |||
180 | if (alignment && !is_power_of_2(alignment)) | ||
181 | return -EINVAL; | ||
182 | |||
183 | /* | ||
184 | * Sanitise input arguments. | ||
185 | * Pages both ends in CMA area could be merged into adjacent unmovable | ||
186 | * migratetype page by page allocator's buddy algorithm. In the case, | ||
187 | * you couldn't get a contiguous memory, which is not what we want. | ||
188 | */ | ||
189 | alignment = max(alignment, | ||
190 | (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); | ||
191 | base = ALIGN(base, alignment); | ||
192 | size = ALIGN(size, alignment); | ||
193 | limit &= ~(alignment - 1); | ||
194 | |||
195 | /* size should be aligned with order_per_bit */ | ||
196 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) | ||
197 | return -EINVAL; | ||
198 | |||
199 | /* Reserve memory */ | ||
200 | if (base && fixed) { | ||
201 | if (memblock_is_region_reserved(base, size) || | ||
202 | memblock_reserve(base, size) < 0) { | ||
203 | ret = -EBUSY; | ||
204 | goto err; | ||
205 | } | ||
206 | } else { | ||
207 | phys_addr_t addr = memblock_alloc_range(size, alignment, base, | ||
208 | limit); | ||
209 | if (!addr) { | ||
210 | ret = -ENOMEM; | ||
211 | goto err; | ||
212 | } else { | ||
213 | base = addr; | ||
214 | } | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Each reserved area must be initialised later, when more kernel | ||
219 | * subsystems (like slab allocator) are available. | ||
220 | */ | ||
221 | cma->base_pfn = PFN_DOWN(base); | ||
222 | cma->count = size >> PAGE_SHIFT; | ||
223 | cma->order_per_bit = order_per_bit; | ||
224 | *res_cma = cma; | ||
225 | cma_area_count++; | ||
226 | |||
227 | pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, | ||
228 | (unsigned long)base); | ||
229 | return 0; | ||
230 | |||
231 | err: | ||
232 | pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); | ||
233 | return ret; | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * cma_alloc() - allocate pages from contiguous area | ||
238 | * @cma: Contiguous memory region for which the allocation is performed. | ||
239 | * @count: Requested number of pages. | ||
240 | * @align: Requested alignment of pages (in PAGE_SIZE order). | ||
241 | * | ||
242 | * This function allocates part of contiguous memory on specific | ||
243 | * contiguous memory area. | ||
244 | */ | ||
245 | struct page *cma_alloc(struct cma *cma, int count, unsigned int align) | ||
246 | { | ||
247 | unsigned long mask, pfn, start = 0; | ||
248 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; | ||
249 | struct page *page = NULL; | ||
250 | int ret; | ||
251 | |||
252 | if (!cma || !cma->count) | ||
253 | return NULL; | ||
254 | |||
255 | pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, | ||
256 | count, align); | ||
257 | |||
258 | if (!count) | ||
259 | return NULL; | ||
260 | |||
261 | mask = cma_bitmap_aligned_mask(cma, align); | ||
262 | bitmap_maxno = cma_bitmap_maxno(cma); | ||
263 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | ||
264 | |||
265 | for (;;) { | ||
266 | mutex_lock(&cma->lock); | ||
267 | bitmap_no = bitmap_find_next_zero_area(cma->bitmap, | ||
268 | bitmap_maxno, start, bitmap_count, mask); | ||
269 | if (bitmap_no >= bitmap_maxno) { | ||
270 | mutex_unlock(&cma->lock); | ||
271 | break; | ||
272 | } | ||
273 | bitmap_set(cma->bitmap, bitmap_no, bitmap_count); | ||
274 | /* | ||
275 | * It's safe to drop the lock here. We've marked this region for | ||
276 | * our exclusive use. If the migration fails we will take the | ||
277 | * lock again and unmark it. | ||
278 | */ | ||
279 | mutex_unlock(&cma->lock); | ||
280 | |||
281 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); | ||
282 | mutex_lock(&cma_mutex); | ||
283 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); | ||
284 | mutex_unlock(&cma_mutex); | ||
285 | if (ret == 0) { | ||
286 | page = pfn_to_page(pfn); | ||
287 | break; | ||
288 | } else if (ret != -EBUSY) { | ||
289 | cma_clear_bitmap(cma, pfn, count); | ||
290 | break; | ||
291 | } | ||
292 | cma_clear_bitmap(cma, pfn, count); | ||
293 | pr_debug("%s(): memory range at %p is busy, retrying\n", | ||
294 | __func__, pfn_to_page(pfn)); | ||
295 | /* try again with a bit different memory target */ | ||
296 | start = bitmap_no + mask + 1; | ||
297 | } | ||
298 | |||
299 | pr_debug("%s(): returned %p\n", __func__, page); | ||
300 | return page; | ||
301 | } | ||
302 | |||
303 | /** | ||
304 | * cma_release() - release allocated pages | ||
305 | * @cma: Contiguous memory region for which the allocation is performed. | ||
306 | * @pages: Allocated pages. | ||
307 | * @count: Number of allocated pages. | ||
308 | * | ||
309 | * This function releases memory allocated by alloc_cma(). | ||
310 | * It returns false when provided pages do not belong to contiguous area and | ||
311 | * true otherwise. | ||
312 | */ | ||
313 | bool cma_release(struct cma *cma, struct page *pages, int count) | ||
314 | { | ||
315 | unsigned long pfn; | ||
316 | |||
317 | if (!cma || !pages) | ||
318 | return false; | ||
319 | |||
320 | pr_debug("%s(page %p)\n", __func__, (void *)pages); | ||
321 | |||
322 | pfn = page_to_pfn(pages); | ||
323 | |||
324 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) | ||
325 | return false; | ||
326 | |||
327 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); | ||
328 | |||
329 | free_contig_range(pfn, count); | ||
330 | cma_clear_bitmap(cma, pfn, count); | ||
331 | |||
332 | return true; | ||
333 | } | ||