aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaura Abbott <lauraa@codeaurora.org>2014-02-25 14:01:19 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2014-05-15 06:14:21 -0400
commit7ee793a62fa8c544f8b844e6e87b2d8e8836b219 (patch)
tree5885be219592f9cf5a1cb2f81efda7148ad3019a
parentd6d211db37e75de2ddc3a4f979038c40df7cc79c (diff)
cma: Remove potential deadlock situation
CMA locking is currently very coarse. The cma_mutex protects both the bitmap and avoids concurrency with alloc_contig_range. There are several situations which may result in a deadlock on the CMA mutex currently, mostly involving AB/BA situations with alloc and free. Fix this issue by protecting the bitmap with a mutex per CMA region and use the existing mutex for protecting against concurrency with alloc_contig_range. Signed-off-by: Laura Abbott <lauraa@codeaurora.org> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
-rw-r--r--drivers/base/dma-contiguous.c32
1 files changed, 25 insertions, 7 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 165c2c299e57..fe72bac96275 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -37,6 +37,7 @@ struct cma {
37 unsigned long base_pfn; 37 unsigned long base_pfn;
38 unsigned long count; 38 unsigned long count;
39 unsigned long *bitmap; 39 unsigned long *bitmap;
40 struct mutex lock;
40}; 41};
41 42
42struct cma *dma_contiguous_default_area; 43struct cma *dma_contiguous_default_area;
@@ -161,6 +162,7 @@ static int __init cma_activate_area(struct cma *cma)
161 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 162 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
162 } while (--i); 163 } while (--i);
163 164
165 mutex_init(&cma->lock);
164 return 0; 166 return 0;
165} 167}
166 168
@@ -261,6 +263,13 @@ err:
261 return ret; 263 return ret;
262} 264}
263 265
266static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
267{
268 mutex_lock(&cma->lock);
269 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
270 mutex_unlock(&cma->lock);
271}
272
264/** 273/**
265 * dma_alloc_from_contiguous() - allocate pages from contiguous area 274 * dma_alloc_from_contiguous() - allocate pages from contiguous area
266 * @dev: Pointer to device for which the allocation is performed. 275 * @dev: Pointer to device for which the allocation is performed.
@@ -294,30 +303,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
294 303
295 mask = (1 << align) - 1; 304 mask = (1 << align) - 1;
296 305
297 mutex_lock(&cma_mutex);
298 306
299 for (;;) { 307 for (;;) {
308 mutex_lock(&cma->lock);
300 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, 309 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
301 start, count, mask); 310 start, count, mask);
302 if (pageno >= cma->count) 311 if (pageno >= cma->count) {
312 mutex_unlock(&cma_mutex);
303 break; 313 break;
314 }
315 bitmap_set(cma->bitmap, pageno, count);
316 /*
317 * It's safe to drop the lock here. We've marked this region for
318 * our exclusive use. If the migration fails we will take the
319 * lock again and unmark it.
320 */
321 mutex_unlock(&cma->lock);
304 322
305 pfn = cma->base_pfn + pageno; 323 pfn = cma->base_pfn + pageno;
324 mutex_lock(&cma_mutex);
306 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 325 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
326 mutex_unlock(&cma_mutex);
307 if (ret == 0) { 327 if (ret == 0) {
308 bitmap_set(cma->bitmap, pageno, count);
309 page = pfn_to_page(pfn); 328 page = pfn_to_page(pfn);
310 break; 329 break;
311 } else if (ret != -EBUSY) { 330 } else if (ret != -EBUSY) {
331 clear_cma_bitmap(cma, pfn, count);
312 break; 332 break;
313 } 333 }
334 clear_cma_bitmap(cma, pfn, count);
314 pr_debug("%s(): memory range at %p is busy, retrying\n", 335 pr_debug("%s(): memory range at %p is busy, retrying\n",
315 __func__, pfn_to_page(pfn)); 336 __func__, pfn_to_page(pfn));
316 /* try again with a bit different memory target */ 337 /* try again with a bit different memory target */
317 start = pageno + mask + 1; 338 start = pageno + mask + 1;
318 } 339 }
319 340
320 mutex_unlock(&cma_mutex);
321 pr_debug("%s(): returned %p\n", __func__, page); 341 pr_debug("%s(): returned %p\n", __func__, page);
322 return page; 342 return page;
323} 343}
@@ -350,10 +370,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
350 370
351 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 371 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
352 372
353 mutex_lock(&cma_mutex);
354 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
355 free_contig_range(pfn, count); 373 free_contig_range(pfn, count);
356 mutex_unlock(&cma_mutex); 374 clear_cma_bitmap(cma, pfn, count);
357 375
358 return true; 376 return true;
359} 377}