aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-08-06 19:05:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:16 -0400
commita254129e8686bff7a340b58f35241b04927e81c0 (patch)
tree90828b720ab7221a0f13c2fe3b15f997e64ea0c3 /drivers/base
parente0bdb37d95dd44086159607e571fd70f6b62dc2d (diff)
CMA: generalize CMA reserved area management functionality
Currently, there are two users on CMA functionality, one is the DMA subsystem and the other is the KVM on powerpc. They have their own code to manage CMA reserved area even if they looks really similar. From my guess, it is caused by some needs on bitmap management. KVM side wants to maintain bitmap not for 1 page, but for more size. Eventually it use bitmap where one bit represents 64 pages. When I implement CMA related patches, I should change those two places to apply my change and it seem to be painful to me. I want to change this situation and reduce future code management overhead through this patch. This change could also help developer who want to use CMA in their new feature development, since they can use CMA easily without copying & pasting this reserved area management code. In previous patches, we have prepared some features to generalize CMA reserved area management and now it's time to do it. This patch moves core functions to mm/cma.c and change DMA APIs to use these functions. There is no functional change in DMA APIs. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Acked-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Acked-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Alexander Graf <agraf@suse.de> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Gleb Natapov <gleb@kernel.org> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig10
-rw-r--r--drivers/base/dma-contiguous.c280
2 files changed, 8 insertions, 282 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 88500fed3c7a..4e7f0ff83ae7 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -289,16 +289,6 @@ config CMA_ALIGNMENT
289 289
290 If unsure, leave the default value "8". 290 If unsure, leave the default value "8".
291 291
292config CMA_AREAS
293 int "Maximum count of the CMA device-private areas"
294 default 7
295 help
296 CMA allows to create CMA areas for particular devices. This parameter
297 sets the maximum number of such device private CMA areas in the
298 system.
299
300 If unsure, leave the default value "7".
301
302endif 292endif
303 293
304endmenu 294endmenu
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index ad8a85bf852f..0411c1c57005 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -24,25 +24,9 @@
24 24
25#include <linux/memblock.h> 25#include <linux/memblock.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/mm.h>
28#include <linux/mutex.h>
29#include <linux/page-isolation.h>
30#include <linux/sizes.h> 27#include <linux/sizes.h>
31#include <linux/slab.h>
32#include <linux/swap.h>
33#include <linux/mm_types.h>
34#include <linux/dma-contiguous.h> 28#include <linux/dma-contiguous.h>
35#include <linux/log2.h> 29#include <linux/cma.h>
36
37struct cma {
38 unsigned long base_pfn;
39 unsigned long count;
40 unsigned long *bitmap;
41 unsigned int order_per_bit; /* Order of pages represented by one bit */
42 struct mutex lock;
43};
44
45struct cma *dma_contiguous_default_area;
46 30
47#ifdef CONFIG_CMA_SIZE_MBYTES 31#ifdef CONFIG_CMA_SIZE_MBYTES
48#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES 32#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
@@ -50,6 +34,8 @@ struct cma *dma_contiguous_default_area;
50#define CMA_SIZE_MBYTES 0 34#define CMA_SIZE_MBYTES 0
51#endif 35#endif
52 36
37struct cma *dma_contiguous_default_area;
38
53/* 39/*
54 * Default global CMA area size can be defined in kernel's .config. 40 * Default global CMA area size can be defined in kernel's .config.
55 * This is useful mainly for distro maintainers to create a kernel 41 * This is useful mainly for distro maintainers to create a kernel
@@ -156,169 +142,6 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
156 } 142 }
157} 143}
158 144
159static DEFINE_MUTEX(cma_mutex);
160
161static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
162{
163 return (1UL << (align_order >> cma->order_per_bit)) - 1;
164}
165
166static unsigned long cma_bitmap_maxno(struct cma *cma)
167{
168 return cma->count >> cma->order_per_bit;
169}
170
171static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
172 unsigned long pages)
173{
174 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
175}
176
177static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
178{
179 unsigned long bitmap_no, bitmap_count;
180
181 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
182 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
183
184 mutex_lock(&cma->lock);
185 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
186 mutex_unlock(&cma->lock);
187}
188
189static int __init cma_activate_area(struct cma *cma)
190{
191 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
192 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
193 unsigned i = cma->count >> pageblock_order;
194 struct zone *zone;
195
196 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
197
198 if (!cma->bitmap)
199 return -ENOMEM;
200
201 WARN_ON_ONCE(!pfn_valid(pfn));
202 zone = page_zone(pfn_to_page(pfn));
203
204 do {
205 unsigned j;
206 base_pfn = pfn;
207 for (j = pageblock_nr_pages; j; --j, pfn++) {
208 WARN_ON_ONCE(!pfn_valid(pfn));
209 /*
210 * alloc_contig_range requires the pfn range
211 * specified to be in the same zone. Make this
212 * simple by forcing the entire CMA resv range
213 * to be in the same zone.
214 */
215 if (page_zone(pfn_to_page(pfn)) != zone)
216 goto err;
217 }
218 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
219 } while (--i);
220
221 mutex_init(&cma->lock);
222 return 0;
223
224err:
225 kfree(cma->bitmap);
226 return -EINVAL;
227}
228
229static struct cma cma_areas[MAX_CMA_AREAS];
230static unsigned cma_area_count;
231
232static int __init cma_init_reserved_areas(void)
233{
234 int i;
235
236 for (i = 0; i < cma_area_count; i++) {
237 int ret = cma_activate_area(&cma_areas[i]);
238 if (ret)
239 return ret;
240 }
241
242 return 0;
243}
244core_initcall(cma_init_reserved_areas);
245
246static int __init __dma_contiguous_reserve_area(phys_addr_t size,
247 phys_addr_t base, phys_addr_t limit,
248 phys_addr_t alignment, unsigned int order_per_bit,
249 struct cma **res_cma, bool fixed)
250{
251 struct cma *cma = &cma_areas[cma_area_count];
252 int ret = 0;
253
254 pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
255 __func__, (unsigned long)size, (unsigned long)base,
256 (unsigned long)limit, (unsigned long)alignment);
257
258 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
259 pr_err("Not enough slots for CMA reserved regions!\n");
260 return -ENOSPC;
261 }
262
263 if (!size)
264 return -EINVAL;
265
266 if (alignment && !is_power_of_2(alignment))
267 return -EINVAL;
268
269 /*
270 * Sanitise input arguments.
271 * Pages both ends in CMA area could be merged into adjacent unmovable
272 * migratetype page by page allocator's buddy algorithm. In the case,
273 * you couldn't get a contiguous memory, which is not what we want.
274 */
275 alignment = max(alignment,
276 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
277 base = ALIGN(base, alignment);
278 size = ALIGN(size, alignment);
279 limit &= ~(alignment - 1);
280
281 /* size should be aligned with order_per_bit */
282 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
283 return -EINVAL;
284
285 /* Reserve memory */
286 if (base && fixed) {
287 if (memblock_is_region_reserved(base, size) ||
288 memblock_reserve(base, size) < 0) {
289 ret = -EBUSY;
290 goto err;
291 }
292 } else {
293 phys_addr_t addr = memblock_alloc_range(size, alignment, base,
294 limit);
295 if (!addr) {
296 ret = -ENOMEM;
297 goto err;
298 } else {
299 base = addr;
300 }
301 }
302
303 /*
304 * Each reserved area must be initialised later, when more kernel
305 * subsystems (like slab allocator) are available.
306 */
307 cma->base_pfn = PFN_DOWN(base);
308 cma->count = size >> PAGE_SHIFT;
309 cma->order_per_bit = order_per_bit;
310 *res_cma = cma;
311 cma_area_count++;
312
313 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
314 (unsigned long)base);
315 return 0;
316
317err:
318 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
319 return ret;
320}
321
322/** 145/**
323 * dma_contiguous_reserve_area() - reserve custom contiguous area 146 * dma_contiguous_reserve_area() - reserve custom contiguous area
324 * @size: Size of the reserved area (in bytes), 147 * @size: Size of the reserved area (in bytes),
@@ -342,77 +165,17 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
342{ 165{
343 int ret; 166 int ret;
344 167
345 ret = __dma_contiguous_reserve_area(size, base, limit, 0, 0, 168 ret = cma_declare_contiguous(size, base, limit, 0, 0, res_cma, fixed);
346 res_cma, fixed);
347 if (ret) 169 if (ret)
348 return ret; 170 return ret;
349 171
350 /* Architecture specific contiguous memory fixup. */ 172 /* Architecture specific contiguous memory fixup. */
351 dma_contiguous_early_fixup(PFN_PHYS((*res_cma)->base_pfn), 173 dma_contiguous_early_fixup(cma_get_base(*res_cma),
352 (*res_cma)->count << PAGE_SHIFT); 174 cma_get_size(*res_cma));
353 175
354 return 0; 176 return 0;
355} 177}
356 178
357static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
358 unsigned int align)
359{
360 unsigned long mask, pfn, start = 0;
361 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
362 struct page *page = NULL;
363 int ret;
364
365 if (!cma || !cma->count)
366 return NULL;
367
368 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
369 count, align);
370
371 if (!count)
372 return NULL;
373
374 mask = cma_bitmap_aligned_mask(cma, align);
375 bitmap_maxno = cma_bitmap_maxno(cma);
376 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
377
378 for (;;) {
379 mutex_lock(&cma->lock);
380 bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
381 bitmap_maxno, start, bitmap_count, mask);
382 if (bitmap_no >= bitmap_maxno) {
383 mutex_unlock(&cma->lock);
384 break;
385 }
386 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
387 /*
388 * It's safe to drop the lock here. We've marked this region for
389 * our exclusive use. If the migration fails we will take the
390 * lock again and unmark it.
391 */
392 mutex_unlock(&cma->lock);
393
394 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
395 mutex_lock(&cma_mutex);
396 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
397 mutex_unlock(&cma_mutex);
398 if (ret == 0) {
399 page = pfn_to_page(pfn);
400 break;
401 } else if (ret != -EBUSY) {
402 cma_clear_bitmap(cma, pfn, count);
403 break;
404 }
405 cma_clear_bitmap(cma, pfn, count);
406 pr_debug("%s(): memory range at %p is busy, retrying\n",
407 __func__, pfn_to_page(pfn));
408 /* try again with a bit different memory target */
409 start = bitmap_no + mask + 1;
410 }
411
412 pr_debug("%s(): returned %p\n", __func__, page);
413 return page;
414}
415
416/** 179/**
417 * dma_alloc_from_contiguous() - allocate pages from contiguous area 180 * dma_alloc_from_contiguous() - allocate pages from contiguous area
418 * @dev: Pointer to device for which the allocation is performed. 181 * @dev: Pointer to device for which the allocation is performed.
@@ -427,35 +190,10 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
427struct page *dma_alloc_from_contiguous(struct device *dev, int count, 190struct page *dma_alloc_from_contiguous(struct device *dev, int count,
428 unsigned int align) 191 unsigned int align)
429{ 192{
430 struct cma *cma = dev_get_cma_area(dev);
431
432 if (align > CONFIG_CMA_ALIGNMENT) 193 if (align > CONFIG_CMA_ALIGNMENT)
433 align = CONFIG_CMA_ALIGNMENT; 194 align = CONFIG_CMA_ALIGNMENT;
434 195
435 return __dma_alloc_from_contiguous(cma, count, align); 196 return cma_alloc(dev_get_cma_area(dev), count, align);
436}
437
438static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
439 int count)
440{
441 unsigned long pfn;
442
443 if (!cma || !pages)
444 return false;
445
446 pr_debug("%s(page %p)\n", __func__, (void *)pages);
447
448 pfn = page_to_pfn(pages);
449
450 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
451 return false;
452
453 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
454
455 free_contig_range(pfn, count);
456 cma_clear_bitmap(cma, pfn, count);
457
458 return true;
459} 197}
460 198
461/** 199/**
@@ -471,7 +209,5 @@ static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
471bool dma_release_from_contiguous(struct device *dev, struct page *pages, 209bool dma_release_from_contiguous(struct device *dev, struct page *pages,
472 int count) 210 int count)
473{ 211{
474 struct cma *cma = dev_get_cma_area(dev); 212 return cma_release(dev_get_cma_area(dev), pages, count);
475
476 return __dma_release_from_contiguous(cma, pages, count);
477} 213}