aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-08-06 19:05:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:16 -0400
commite0bdb37d95dd44086159607e571fd70f6b62dc2d (patch)
treea5d5fcae8c3050ba074aed430504d2787a1f0c86 /drivers/base
parenta15bc0b89e8812d0db297bc771a85812c4fa83c1 (diff)
DMA, CMA: support arbitrary bitmap granularity
PPC KVM's CMA area management requires arbitrary bitmap granularity, since they want to reserve very large memory and manage this region with bitmap that one bit for several pages to reduce management overheads. So support arbitrary bitmap granularity for following generalization. [akpm@linux-foundation.org: s/1/1UL/] Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Acked-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Acked-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Alexander Graf <agraf@suse.de> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Gleb Natapov <gleb@kernel.org> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/dma-contiguous.c77
1 files changed, 53 insertions, 24 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 5f62c284072c..ad8a85bf852f 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -38,6 +38,7 @@ struct cma {
38 unsigned long base_pfn; 38 unsigned long base_pfn;
39 unsigned long count; 39 unsigned long count;
40 unsigned long *bitmap; 40 unsigned long *bitmap;
41 unsigned int order_per_bit; /* Order of pages represented by one bit */
41 struct mutex lock; 42 struct mutex lock;
42}; 43};
43 44
@@ -157,9 +158,37 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
157 158
158static DEFINE_MUTEX(cma_mutex); 159static DEFINE_MUTEX(cma_mutex);
159 160
161static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
162{
163 return (1UL << (align_order >> cma->order_per_bit)) - 1;
164}
165
166static unsigned long cma_bitmap_maxno(struct cma *cma)
167{
168 return cma->count >> cma->order_per_bit;
169}
170
171static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
172 unsigned long pages)
173{
174 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
175}
176
177static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
178{
179 unsigned long bitmap_no, bitmap_count;
180
181 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
182 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
183
184 mutex_lock(&cma->lock);
185 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
186 mutex_unlock(&cma->lock);
187}
188
160static int __init cma_activate_area(struct cma *cma) 189static int __init cma_activate_area(struct cma *cma)
161{ 190{
162 int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); 191 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
163 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; 192 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
164 unsigned i = cma->count >> pageblock_order; 193 unsigned i = cma->count >> pageblock_order;
165 struct zone *zone; 194 struct zone *zone;
@@ -215,9 +244,9 @@ static int __init cma_init_reserved_areas(void)
215core_initcall(cma_init_reserved_areas); 244core_initcall(cma_init_reserved_areas);
216 245
217static int __init __dma_contiguous_reserve_area(phys_addr_t size, 246static int __init __dma_contiguous_reserve_area(phys_addr_t size,
218 phys_addr_t base, phys_addr_t limit, 247 phys_addr_t base, phys_addr_t limit,
219 phys_addr_t alignment, 248 phys_addr_t alignment, unsigned int order_per_bit,
220 struct cma **res_cma, bool fixed) 249 struct cma **res_cma, bool fixed)
221{ 250{
222 struct cma *cma = &cma_areas[cma_area_count]; 251 struct cma *cma = &cma_areas[cma_area_count];
223 int ret = 0; 252 int ret = 0;
@@ -249,6 +278,10 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size,
249 size = ALIGN(size, alignment); 278 size = ALIGN(size, alignment);
250 limit &= ~(alignment - 1); 279 limit &= ~(alignment - 1);
251 280
281 /* size should be aligned with order_per_bit */
282 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
283 return -EINVAL;
284
252 /* Reserve memory */ 285 /* Reserve memory */
253 if (base && fixed) { 286 if (base && fixed) {
254 if (memblock_is_region_reserved(base, size) || 287 if (memblock_is_region_reserved(base, size) ||
@@ -273,6 +306,7 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size,
273 */ 306 */
274 cma->base_pfn = PFN_DOWN(base); 307 cma->base_pfn = PFN_DOWN(base);
275 cma->count = size >> PAGE_SHIFT; 308 cma->count = size >> PAGE_SHIFT;
309 cma->order_per_bit = order_per_bit;
276 *res_cma = cma; 310 *res_cma = cma;
277 cma_area_count++; 311 cma_area_count++;
278 312
@@ -308,7 +342,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
308{ 342{
309 int ret; 343 int ret;
310 344
311 ret = __dma_contiguous_reserve_area(size, base, limit, 0, 345 ret = __dma_contiguous_reserve_area(size, base, limit, 0, 0,
312 res_cma, fixed); 346 res_cma, fixed);
313 if (ret) 347 if (ret)
314 return ret; 348 return ret;
@@ -320,17 +354,11 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
320 return 0; 354 return 0;
321} 355}
322 356
323static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
324{
325 mutex_lock(&cma->lock);
326 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
327 mutex_unlock(&cma->lock);
328}
329
330static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, 357static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
331 unsigned int align) 358 unsigned int align)
332{ 359{
333 unsigned long mask, pfn, pageno, start = 0; 360 unsigned long mask, pfn, start = 0;
361 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
334 struct page *page = NULL; 362 struct page *page = NULL;
335 int ret; 363 int ret;
336 364
@@ -343,18 +371,19 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
343 if (!count) 371 if (!count)
344 return NULL; 372 return NULL;
345 373
346 mask = (1 << align) - 1; 374 mask = cma_bitmap_aligned_mask(cma, align);
347 375 bitmap_maxno = cma_bitmap_maxno(cma);
376 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
348 377
349 for (;;) { 378 for (;;) {
350 mutex_lock(&cma->lock); 379 mutex_lock(&cma->lock);
351 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, 380 bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
352 start, count, mask); 381 bitmap_maxno, start, bitmap_count, mask);
353 if (pageno >= cma->count) { 382 if (bitmap_no >= bitmap_maxno) {
354 mutex_unlock(&cma->lock); 383 mutex_unlock(&cma->lock);
355 break; 384 break;
356 } 385 }
357 bitmap_set(cma->bitmap, pageno, count); 386 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
358 /* 387 /*
359 * It's safe to drop the lock here. We've marked this region for 388 * It's safe to drop the lock here. We've marked this region for
360 * our exclusive use. If the migration fails we will take the 389 * our exclusive use. If the migration fails we will take the
@@ -362,7 +391,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
362 */ 391 */
363 mutex_unlock(&cma->lock); 392 mutex_unlock(&cma->lock);
364 393
365 pfn = cma->base_pfn + pageno; 394 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
366 mutex_lock(&cma_mutex); 395 mutex_lock(&cma_mutex);
367 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 396 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
368 mutex_unlock(&cma_mutex); 397 mutex_unlock(&cma_mutex);
@@ -370,14 +399,14 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
370 page = pfn_to_page(pfn); 399 page = pfn_to_page(pfn);
371 break; 400 break;
372 } else if (ret != -EBUSY) { 401 } else if (ret != -EBUSY) {
373 clear_cma_bitmap(cma, pfn, count); 402 cma_clear_bitmap(cma, pfn, count);
374 break; 403 break;
375 } 404 }
376 clear_cma_bitmap(cma, pfn, count); 405 cma_clear_bitmap(cma, pfn, count);
377 pr_debug("%s(): memory range at %p is busy, retrying\n", 406 pr_debug("%s(): memory range at %p is busy, retrying\n",
378 __func__, pfn_to_page(pfn)); 407 __func__, pfn_to_page(pfn));
379 /* try again with a bit different memory target */ 408 /* try again with a bit different memory target */
380 start = pageno + mask + 1; 409 start = bitmap_no + mask + 1;
381 } 410 }
382 411
383 pr_debug("%s(): returned %p\n", __func__, page); 412 pr_debug("%s(): returned %p\n", __func__, page);
@@ -424,7 +453,7 @@ static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
424 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 453 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
425 454
426 free_contig_range(pfn, count); 455 free_contig_range(pfn, count);
427 clear_cma_bitmap(cma, pfn, count); 456 cma_clear_bitmap(cma, pfn, count);
428 457
429 return true; 458 return true;
430} 459}