aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c114
1 files changed, 104 insertions, 10 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4e7d1182e8a3..051204fc4617 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
267 vunmap(cpu_addr); 267 vunmap(cpu_addr);
268} 268}
269 269
270#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
271
270struct dma_pool { 272struct dma_pool {
271 size_t size; 273 size_t size;
272 spinlock_t lock; 274 spinlock_t lock;
273 unsigned long *bitmap; 275 unsigned long *bitmap;
274 unsigned long nr_pages; 276 unsigned long nr_pages;
275 void *vaddr; 277 void *vaddr;
276 struct page *page; 278 struct page **pages;
277}; 279};
278 280
279static struct dma_pool atomic_pool = { 281static struct dma_pool atomic_pool = {
280 .size = SZ_256K, 282 .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
281}; 283};
282 284
283static int __init early_coherent_pool(char *p) 285static int __init early_coherent_pool(char *p)
@@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
287} 289}
288early_param("coherent_pool", early_coherent_pool); 290early_param("coherent_pool", early_coherent_pool);
289 291
292void __init init_dma_coherent_pool_size(unsigned long size)
293{
294 /*
295 * Catch any attempt to set the pool size too late.
296 */
297 BUG_ON(atomic_pool.vaddr);
298
299 /*
300 * Set architecture specific coherent pool size only if
301 * it has not been changed by kernel command line parameter.
302 */
303 if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
304 atomic_pool.size = size;
305}
306
290/* 307/*
291 * Initialise the coherent pool for atomic allocations. 308 * Initialise the coherent pool for atomic allocations.
292 */ 309 */
@@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
297 unsigned long nr_pages = pool->size >> PAGE_SHIFT; 314 unsigned long nr_pages = pool->size >> PAGE_SHIFT;
298 unsigned long *bitmap; 315 unsigned long *bitmap;
299 struct page *page; 316 struct page *page;
317 struct page **pages;
300 void *ptr; 318 void *ptr;
301 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); 319 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
302 320
@@ -304,21 +322,31 @@ static int __init atomic_pool_init(void)
304 if (!bitmap) 322 if (!bitmap)
305 goto no_bitmap; 323 goto no_bitmap;
306 324
325 pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
326 if (!pages)
327 goto no_pages;
328
307 if (IS_ENABLED(CONFIG_CMA)) 329 if (IS_ENABLED(CONFIG_CMA))
308 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); 330 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
309 else 331 else
310 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, 332 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
311 &page, NULL); 333 &page, NULL);
312 if (ptr) { 334 if (ptr) {
335 int i;
336
337 for (i = 0; i < nr_pages; i++)
338 pages[i] = page + i;
339
313 spin_lock_init(&pool->lock); 340 spin_lock_init(&pool->lock);
314 pool->vaddr = ptr; 341 pool->vaddr = ptr;
315 pool->page = page; 342 pool->pages = pages;
316 pool->bitmap = bitmap; 343 pool->bitmap = bitmap;
317 pool->nr_pages = nr_pages; 344 pool->nr_pages = nr_pages;
318 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", 345 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
319 (unsigned)pool->size / 1024); 346 (unsigned)pool->size / 1024);
320 return 0; 347 return 0;
321 } 348 }
349no_pages:
322 kfree(bitmap); 350 kfree(bitmap);
323no_bitmap: 351no_bitmap:
324 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", 352 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
@@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
443 if (pageno < pool->nr_pages) { 471 if (pageno < pool->nr_pages) {
444 bitmap_set(pool->bitmap, pageno, count); 472 bitmap_set(pool->bitmap, pageno, count);
445 ptr = pool->vaddr + PAGE_SIZE * pageno; 473 ptr = pool->vaddr + PAGE_SIZE * pageno;
446 *ret_page = pool->page + pageno; 474 *ret_page = pool->pages[pageno];
475 } else {
476 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
477 "Please increase it with coherent_pool= kernel parameter!\n",
478 (unsigned)pool->size / 1024);
447 } 479 }
448 spin_unlock_irqrestore(&pool->lock, flags); 480 spin_unlock_irqrestore(&pool->lock, flags);
449 481
450 return ptr; 482 return ptr;
451} 483}
452 484
485static bool __in_atomic_pool(void *start, size_t size)
486{
487 struct dma_pool *pool = &atomic_pool;
488 void *end = start + size;
489 void *pool_start = pool->vaddr;
490 void *pool_end = pool->vaddr + pool->size;
491
492 if (start < pool_start || start > pool_end)
493 return false;
494
495 if (end <= pool_end)
496 return true;
497
498 WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
499 start, end - 1, pool_start, pool_end - 1);
500
501 return false;
502}
503
453static int __free_from_pool(void *start, size_t size) 504static int __free_from_pool(void *start, size_t size)
454{ 505{
455 struct dma_pool *pool = &atomic_pool; 506 struct dma_pool *pool = &atomic_pool;
456 unsigned long pageno, count; 507 unsigned long pageno, count;
457 unsigned long flags; 508 unsigned long flags;
458 509
459 if (start < pool->vaddr || start > pool->vaddr + pool->size) 510 if (!__in_atomic_pool(start, size))
460 return 0; 511 return 0;
461 512
462 if (start + size > pool->vaddr + pool->size) {
463 WARN(1, "freeing wrong coherent size from pool\n");
464 return 0;
465 }
466
467 pageno = (start - pool->vaddr) >> PAGE_SHIFT; 513 pageno = (start - pool->vaddr) >> PAGE_SHIFT;
468 count = size >> PAGE_SHIFT; 514 count = size >> PAGE_SHIFT;
469 515
@@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
1090 return 0; 1136 return 0;
1091} 1137}
1092 1138
1139static struct page **__atomic_get_pages(void *addr)
1140{
1141 struct dma_pool *pool = &atomic_pool;
1142 struct page **pages = pool->pages;
1143 int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
1144
1145 return pages + offs;
1146}
1147
1093static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1148static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
1094{ 1149{
1095 struct vm_struct *area; 1150 struct vm_struct *area;
1096 1151
1152 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1153 return __atomic_get_pages(cpu_addr);
1154
1097 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1155 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1098 return cpu_addr; 1156 return cpu_addr;
1099 1157
@@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
1103 return NULL; 1161 return NULL;
1104} 1162}
1105 1163
1164static void *__iommu_alloc_atomic(struct device *dev, size_t size,
1165 dma_addr_t *handle)
1166{
1167 struct page *page;
1168 void *addr;
1169
1170 addr = __alloc_from_pool(size, &page);
1171 if (!addr)
1172 return NULL;
1173
1174 *handle = __iommu_create_mapping(dev, &page, size);
1175 if (*handle == DMA_ERROR_CODE)
1176 goto err_mapping;
1177
1178 return addr;
1179
1180err_mapping:
1181 __free_from_pool(addr, size);
1182 return NULL;
1183}
1184
1185static void __iommu_free_atomic(struct device *dev, struct page **pages,
1186 dma_addr_t handle, size_t size)
1187{
1188 __iommu_remove_mapping(dev, handle, size);
1189 __free_from_pool(page_address(pages[0]), size);
1190}
1191
1106static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1192static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1107 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 1193 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
1108{ 1194{
@@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1113 *handle = DMA_ERROR_CODE; 1199 *handle = DMA_ERROR_CODE;
1114 size = PAGE_ALIGN(size); 1200 size = PAGE_ALIGN(size);
1115 1201
1202 if (gfp & GFP_ATOMIC)
1203 return __iommu_alloc_atomic(dev, size, handle);
1204
1116 pages = __iommu_alloc_buffer(dev, size, gfp); 1205 pages = __iommu_alloc_buffer(dev, size, gfp);
1117 if (!pages) 1206 if (!pages)
1118 return NULL; 1207 return NULL;
@@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1179 return; 1268 return;
1180 } 1269 }
1181 1270
1271 if (__in_atomic_pool(cpu_addr, size)) {
1272 __iommu_free_atomic(dev, pages, handle, size);
1273 return;
1274 }
1275
1182 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1276 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
1183 unmap_kernel_range((unsigned long)cpu_addr, size); 1277 unmap_kernel_range((unsigned long)cpu_addr, size);
1184 vunmap(cpu_addr); 1278 vunmap(cpu_addr);