aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/dma-contiguous.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/dma-contiguous.c')
-rw-r--r--drivers/base/dma-contiguous.c220
1 files changed, 12 insertions, 208 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 6467c919c509..6606abdf880c 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -24,23 +24,9 @@
24 24
25#include <linux/memblock.h> 25#include <linux/memblock.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/mm.h>
28#include <linux/mutex.h>
29#include <linux/page-isolation.h>
30#include <linux/sizes.h> 27#include <linux/sizes.h>
31#include <linux/slab.h>
32#include <linux/swap.h>
33#include <linux/mm_types.h>
34#include <linux/dma-contiguous.h> 28#include <linux/dma-contiguous.h>
35 29#include <linux/cma.h>
36struct cma {
37 unsigned long base_pfn;
38 unsigned long count;
39 unsigned long *bitmap;
40 struct mutex lock;
41};
42
43struct cma *dma_contiguous_default_area;
44 30
45#ifdef CONFIG_CMA_SIZE_MBYTES 31#ifdef CONFIG_CMA_SIZE_MBYTES
46#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES 32#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
@@ -48,6 +34,8 @@ struct cma *dma_contiguous_default_area;
48#define CMA_SIZE_MBYTES 0 34#define CMA_SIZE_MBYTES 0
49#endif 35#endif
50 36
37struct cma *dma_contiguous_default_area;
38
51/* 39/*
52 * Default global CMA area size can be defined in kernel's .config. 40 * Default global CMA area size can be defined in kernel's .config.
53 * This is useful mainly for distro maintainers to create a kernel 41 * This is useful mainly for distro maintainers to create a kernel
@@ -154,65 +142,6 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
154 } 142 }
155} 143}
156 144
157static DEFINE_MUTEX(cma_mutex);
158
159static int __init cma_activate_area(struct cma *cma)
160{
161 int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
162 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
163 unsigned i = cma->count >> pageblock_order;
164 struct zone *zone;
165
166 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
167
168 if (!cma->bitmap)
169 return -ENOMEM;
170
171 WARN_ON_ONCE(!pfn_valid(pfn));
172 zone = page_zone(pfn_to_page(pfn));
173
174 do {
175 unsigned j;
176 base_pfn = pfn;
177 for (j = pageblock_nr_pages; j; --j, pfn++) {
178 WARN_ON_ONCE(!pfn_valid(pfn));
179 /*
180 * alloc_contig_range requires the pfn range
181 * specified to be in the same zone. Make this
182 * simple by forcing the entire CMA resv range
183 * to be in the same zone.
184 */
185 if (page_zone(pfn_to_page(pfn)) != zone)
186 goto err;
187 }
188 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
189 } while (--i);
190
191 mutex_init(&cma->lock);
192 return 0;
193
194err:
195 kfree(cma->bitmap);
196 return -EINVAL;
197}
198
199static struct cma cma_areas[MAX_CMA_AREAS];
200static unsigned cma_area_count;
201
202static int __init cma_init_reserved_areas(void)
203{
204 int i;
205
206 for (i = 0; i < cma_area_count; i++) {
207 int ret = cma_activate_area(&cma_areas[i]);
208 if (ret)
209 return ret;
210 }
211
212 return 0;
213}
214core_initcall(cma_init_reserved_areas);
215
216/** 145/**
217 * dma_contiguous_reserve_area() - reserve custom contiguous area 146 * dma_contiguous_reserve_area() - reserve custom contiguous area
218 * @size: Size of the reserved area (in bytes), 147 * @size: Size of the reserved area (in bytes),
@@ -234,72 +163,17 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
234 phys_addr_t limit, struct cma **res_cma, 163 phys_addr_t limit, struct cma **res_cma,
235 bool fixed) 164 bool fixed)
236{ 165{
237 struct cma *cma = &cma_areas[cma_area_count]; 166 int ret;
238 phys_addr_t alignment;
239 int ret = 0;
240
241 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
242 (unsigned long)size, (unsigned long)base,
243 (unsigned long)limit);
244
245 /* Sanity checks */
246 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
247 pr_err("Not enough slots for CMA reserved regions!\n");
248 return -ENOSPC;
249 }
250
251 if (!size)
252 return -EINVAL;
253
254 /* Sanitise input arguments */
255 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
256 base = ALIGN(base, alignment);
257 size = ALIGN(size, alignment);
258 limit &= ~(alignment - 1);
259
260 /* Reserve memory */
261 if (base && fixed) {
262 if (memblock_is_region_reserved(base, size) ||
263 memblock_reserve(base, size) < 0) {
264 ret = -EBUSY;
265 goto err;
266 }
267 } else {
268 phys_addr_t addr = memblock_alloc_range(size, alignment, base,
269 limit);
270 if (!addr) {
271 ret = -ENOMEM;
272 goto err;
273 } else {
274 base = addr;
275 }
276 }
277
278 /*
279 * Each reserved area must be initialised later, when more kernel
280 * subsystems (like slab allocator) are available.
281 */
282 cma->base_pfn = PFN_DOWN(base);
283 cma->count = size >> PAGE_SHIFT;
284 *res_cma = cma;
285 cma_area_count++;
286 167
287 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 168 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
288 (unsigned long)base); 169 if (ret)
170 return ret;
289 171
290 /* Architecture specific contiguous memory fixup. */ 172 /* Architecture specific contiguous memory fixup. */
291 dma_contiguous_early_fixup(base, size); 173 dma_contiguous_early_fixup(cma_get_base(*res_cma),
292 return 0; 174 cma_get_size(*res_cma));
293err:
294 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
295 return ret;
296}
297 175
298static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) 176 return 0;
299{
300 mutex_lock(&cma->lock);
301 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
302 mutex_unlock(&cma->lock);
303} 177}
304 178
305/** 179/**
@@ -316,62 +190,10 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
316struct page *dma_alloc_from_contiguous(struct device *dev, int count, 190struct page *dma_alloc_from_contiguous(struct device *dev, int count,
317 unsigned int align) 191 unsigned int align)
318{ 192{
319 unsigned long mask, pfn, pageno, start = 0;
320 struct cma *cma = dev_get_cma_area(dev);
321 struct page *page = NULL;
322 int ret;
323
324 if (!cma || !cma->count)
325 return NULL;
326
327 if (align > CONFIG_CMA_ALIGNMENT) 193 if (align > CONFIG_CMA_ALIGNMENT)
328 align = CONFIG_CMA_ALIGNMENT; 194 align = CONFIG_CMA_ALIGNMENT;
329 195
330 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 196 return cma_alloc(dev_get_cma_area(dev), count, align);
331 count, align);
332
333 if (!count)
334 return NULL;
335
336 mask = (1 << align) - 1;
337
338
339 for (;;) {
340 mutex_lock(&cma->lock);
341 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
342 start, count, mask);
343 if (pageno >= cma->count) {
344 mutex_unlock(&cma->lock);
345 break;
346 }
347 bitmap_set(cma->bitmap, pageno, count);
348 /*
349 * It's safe to drop the lock here. We've marked this region for
350 * our exclusive use. If the migration fails we will take the
351 * lock again and unmark it.
352 */
353 mutex_unlock(&cma->lock);
354
355 pfn = cma->base_pfn + pageno;
356 mutex_lock(&cma_mutex);
357 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
358 mutex_unlock(&cma_mutex);
359 if (ret == 0) {
360 page = pfn_to_page(pfn);
361 break;
362 } else if (ret != -EBUSY) {
363 clear_cma_bitmap(cma, pfn, count);
364 break;
365 }
366 clear_cma_bitmap(cma, pfn, count);
367 pr_debug("%s(): memory range at %p is busy, retrying\n",
368 __func__, pfn_to_page(pfn));
369 /* try again with a bit different memory target */
370 start = pageno + mask + 1;
371 }
372
373 pr_debug("%s(): returned %p\n", __func__, page);
374 return page;
375} 197}
376 198
377/** 199/**
@@ -387,23 +209,5 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
387bool dma_release_from_contiguous(struct device *dev, struct page *pages, 209bool dma_release_from_contiguous(struct device *dev, struct page *pages,
388 int count) 210 int count)
389{ 211{
390 struct cma *cma = dev_get_cma_area(dev); 212 return cma_release(dev_get_cma_area(dev), pages, count);
391 unsigned long pfn;
392
393 if (!cma || !pages)
394 return false;
395
396 pr_debug("%s(page %p)\n", __func__, (void *)pages);
397
398 pfn = page_to_pfn(pages);
399
400 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
401 return false;
402
403 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
404
405 free_contig_range(pfn, count);
406 clear_cma_bitmap(cma, pfn, count);
407
408 return true;
409} 213}