aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 14:51:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 14:51:51 -0400
commitff806d034ef8e9a95ff0b0532104dd65332e446b (patch)
tree6170ccd68e32ac47666879c35e9a2af922bbf446 /drivers/base
parent312c76f1a3989b8d0c0c13fee765bb2c41f2d114 (diff)
parentf70e3c4f8b6ab61f713e040822ec51f5de498146 (diff)
Merge branch 'for-v3.16' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping into next
Pull CMA and DMA-mapping fixes from Marek Szyprowski: "A few fixes for dma-mapping and CMA subsystems" * 'for-v3.16' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: CMA: correct unlock target drivers/base/dma-contiguous.c: erratum of dev_get_cma_area arm: dma-mapping: add checking cma area initialized arm: dma-iommu: Clean up redundant variable cma: Remove potential deadlock situation
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/dma-contiguous.c34
1 files changed, 26 insertions, 8 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 165c2c299e57..c34ec3364243 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -37,6 +37,7 @@ struct cma {
37 unsigned long base_pfn; 37 unsigned long base_pfn;
38 unsigned long count; 38 unsigned long count;
39 unsigned long *bitmap; 39 unsigned long *bitmap;
40 struct mutex lock;
40}; 41};
41 42
42struct cma *dma_contiguous_default_area; 43struct cma *dma_contiguous_default_area;
@@ -161,6 +162,7 @@ static int __init cma_activate_area(struct cma *cma)
161 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 162 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
162 } while (--i); 163 } while (--i);
163 164
165 mutex_init(&cma->lock);
164 return 0; 166 return 0;
165} 167}
166 168
@@ -261,6 +263,13 @@ err:
261 return ret; 263 return ret;
262} 264}
263 265
266static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
267{
268 mutex_lock(&cma->lock);
269 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
270 mutex_unlock(&cma->lock);
271}
272
264/** 273/**
265 * dma_alloc_from_contiguous() - allocate pages from contiguous area 274 * dma_alloc_from_contiguous() - allocate pages from contiguous area
266 * @dev: Pointer to device for which the allocation is performed. 275 * @dev: Pointer to device for which the allocation is performed.
@@ -269,7 +278,7 @@ err:
269 * 278 *
270 * This function allocates memory buffer for specified device. It uses 279 * This function allocates memory buffer for specified device. It uses
271 * device specific contiguous memory area if available or the default 280 * device specific contiguous memory area if available or the default
272 * global one. Requires architecture specific get_dev_cma_area() helper 281 * global one. Requires architecture specific dev_get_cma_area() helper
273 * function. 282 * function.
274 */ 283 */
275struct page *dma_alloc_from_contiguous(struct device *dev, int count, 284struct page *dma_alloc_from_contiguous(struct device *dev, int count,
@@ -294,30 +303,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
294 303
295 mask = (1 << align) - 1; 304 mask = (1 << align) - 1;
296 305
297 mutex_lock(&cma_mutex);
298 306
299 for (;;) { 307 for (;;) {
308 mutex_lock(&cma->lock);
300 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, 309 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
301 start, count, mask); 310 start, count, mask);
302 if (pageno >= cma->count) 311 if (pageno >= cma->count) {
312 mutex_unlock(&cma->lock);
303 break; 313 break;
314 }
315 bitmap_set(cma->bitmap, pageno, count);
316 /*
317 * It's safe to drop the lock here. We've marked this region for
318 * our exclusive use. If the migration fails we will take the
319 * lock again and unmark it.
320 */
321 mutex_unlock(&cma->lock);
304 322
305 pfn = cma->base_pfn + pageno; 323 pfn = cma->base_pfn + pageno;
324 mutex_lock(&cma_mutex);
306 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 325 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
326 mutex_unlock(&cma_mutex);
307 if (ret == 0) { 327 if (ret == 0) {
308 bitmap_set(cma->bitmap, pageno, count);
309 page = pfn_to_page(pfn); 328 page = pfn_to_page(pfn);
310 break; 329 break;
311 } else if (ret != -EBUSY) { 330 } else if (ret != -EBUSY) {
331 clear_cma_bitmap(cma, pfn, count);
312 break; 332 break;
313 } 333 }
334 clear_cma_bitmap(cma, pfn, count);
314 pr_debug("%s(): memory range at %p is busy, retrying\n", 335 pr_debug("%s(): memory range at %p is busy, retrying\n",
315 __func__, pfn_to_page(pfn)); 336 __func__, pfn_to_page(pfn));
316 /* try again with a bit different memory target */ 337 /* try again with a bit different memory target */
317 start = pageno + mask + 1; 338 start = pageno + mask + 1;
318 } 339 }
319 340
320 mutex_unlock(&cma_mutex);
321 pr_debug("%s(): returned %p\n", __func__, page); 341 pr_debug("%s(): returned %p\n", __func__, page);
322 return page; 342 return page;
323} 343}
@@ -350,10 +370,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
350 370
351 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 371 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
352 372
353 mutex_lock(&cma_mutex);
354 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
355 free_contig_range(pfn, count); 373 free_contig_range(pfn, count);
356 mutex_unlock(&cma_mutex); 374 clear_cma_bitmap(cma, pfn, count);
357 375
358 return true; 376 return true;
359} 377}