diff options
author | Laurent Pinchart <laurent.pinchart@ideasonboard.com> | 2014-01-02 18:06:08 -0500 |
---|---|---|
committer | Mauro Carvalho Chehab <m.chehab@samsung.com> | 2014-05-25 10:10:50 -0400 |
commit | 0e24e90f2ca72f7e68e41f3e99fc2838909c36e9 (patch) | |
tree | fc999a3a38feadb02fa6ecbb4afed1ee78cbdfa7 | |
parent | 84ac0f09aee6c534a86ba8e2598f5e022772f0eb (diff) |
[media] omap3isp: stat: Use the DMA API
Replace the OMAP-specific IOMMU API usage by the DMA API. All buffers
are now allocated using dma_alloc_coherent() and the related sg table is
retrieved using dma_get_sgtable() for sync operations.
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Acked-by: Sakari Ailus <sakari.ailus@iki.fi>
Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
-rw-r--r-- | drivers/media/platform/omap3isp/ispstat.c | 123 | ||||
-rw-r--r-- | drivers/media/platform/omap3isp/ispstat.h | 2 |
2 files changed, 53 insertions, 72 deletions
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c index 4cf7eb1866cd..e6cbc1eaf4ca 100644 --- a/drivers/media/platform/omap3isp/ispstat.c +++ b/drivers/media/platform/omap3isp/ispstat.c | |||
@@ -26,7 +26,6 @@ | |||
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/dma-mapping.h> | 28 | #include <linux/dma-mapping.h> |
29 | #include <linux/omap-iommu.h> | ||
30 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
31 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
32 | 31 | ||
@@ -77,21 +76,10 @@ static void __isp_stat_buf_sync_magic(struct ispstat *stat, | |||
77 | dma_addr_t, unsigned long, size_t, | 76 | dma_addr_t, unsigned long, size_t, |
78 | enum dma_data_direction)) | 77 | enum dma_data_direction)) |
79 | { | 78 | { |
80 | struct device *dev = stat->isp->dev; | 79 | /* Sync the initial and final magic words. */ |
81 | struct page *pg; | 80 | dma_sync(stat->isp->dev, buf->dma_addr, 0, MAGIC_SIZE, dir); |
82 | dma_addr_t dma_addr; | 81 | dma_sync(stat->isp->dev, buf->dma_addr + (buf_size & PAGE_MASK), |
83 | u32 offset; | 82 | buf_size & ~PAGE_MASK, MAGIC_SIZE, dir); |
84 | |||
85 | /* Initial magic words */ | ||
86 | pg = vmalloc_to_page(buf->virt_addr); | ||
87 | dma_addr = pfn_to_dma(dev, page_to_pfn(pg)); | ||
88 | dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir); | ||
89 | |||
90 | /* Final magic words */ | ||
91 | pg = vmalloc_to_page(buf->virt_addr + buf_size); | ||
92 | dma_addr = pfn_to_dma(dev, page_to_pfn(pg)); | ||
93 | offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK; | ||
94 | dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir); | ||
95 | } | 83 | } |
96 | 84 | ||
97 | static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, | 85 | static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, |
@@ -183,8 +171,8 @@ static void isp_stat_buf_sync_for_device(struct ispstat *stat, | |||
183 | if (ISP_STAT_USES_DMAENGINE(stat)) | 171 | if (ISP_STAT_USES_DMAENGINE(stat)) |
184 | return; | 172 | return; |
185 | 173 | ||
186 | dma_sync_sg_for_device(stat->isp->dev, buf->sgt->sgl, | 174 | dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl, |
187 | buf->sgt->nents, DMA_FROM_DEVICE); | 175 | buf->sgt.nents, DMA_FROM_DEVICE); |
188 | } | 176 | } |
189 | 177 | ||
190 | static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, | 178 | static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, |
@@ -193,8 +181,8 @@ static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, | |||
193 | if (ISP_STAT_USES_DMAENGINE(stat)) | 181 | if (ISP_STAT_USES_DMAENGINE(stat)) |
194 | return; | 182 | return; |
195 | 183 | ||
196 | dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt->sgl, | 184 | dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl, |
197 | buf->sgt->nents, DMA_FROM_DEVICE); | 185 | buf->sgt.nents, DMA_FROM_DEVICE); |
198 | } | 186 | } |
199 | 187 | ||
200 | static void isp_stat_buf_clear(struct ispstat *stat) | 188 | static void isp_stat_buf_clear(struct ispstat *stat) |
@@ -354,26 +342,21 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat, | |||
354 | 342 | ||
355 | static void isp_stat_bufs_free(struct ispstat *stat) | 343 | static void isp_stat_bufs_free(struct ispstat *stat) |
356 | { | 344 | { |
357 | struct isp_device *isp = stat->isp; | 345 | struct device *dev = ISP_STAT_USES_DMAENGINE(stat) |
358 | int i; | 346 | ? NULL : stat->isp->dev; |
347 | unsigned int i; | ||
359 | 348 | ||
360 | for (i = 0; i < STAT_MAX_BUFS; i++) { | 349 | for (i = 0; i < STAT_MAX_BUFS; i++) { |
361 | struct ispstat_buffer *buf = &stat->buf[i]; | 350 | struct ispstat_buffer *buf = &stat->buf[i]; |
362 | 351 | ||
363 | if (!ISP_STAT_USES_DMAENGINE(stat)) { | 352 | if (!buf->virt_addr) |
364 | if (IS_ERR_OR_NULL((void *)buf->dma_addr)) | 353 | continue; |
365 | continue; | 354 | |
366 | if (buf->sgt) | 355 | sg_free_table(&buf->sgt); |
367 | dma_unmap_sg(isp->dev, buf->sgt->sgl, | 356 | |
368 | buf->sgt->nents, DMA_FROM_DEVICE); | 357 | dma_free_coherent(dev, stat->buf_alloc_size, buf->virt_addr, |
369 | omap_iommu_vfree(isp->domain, isp->dev, buf->dma_addr); | 358 | buf->dma_addr); |
370 | } else { | 359 | |
371 | if (!buf->virt_addr) | ||
372 | continue; | ||
373 | dma_free_coherent(stat->isp->dev, stat->buf_alloc_size, | ||
374 | buf->virt_addr, buf->dma_addr); | ||
375 | } | ||
376 | buf->sgt = NULL; | ||
377 | buf->dma_addr = 0; | 360 | buf->dma_addr = 0; |
378 | buf->virt_addr = NULL; | 361 | buf->virt_addr = NULL; |
379 | buf->empty = 1; | 362 | buf->empty = 1; |
@@ -386,47 +369,49 @@ static void isp_stat_bufs_free(struct ispstat *stat) | |||
386 | stat->active_buf = NULL; | 369 | stat->active_buf = NULL; |
387 | } | 370 | } |
388 | 371 | ||
389 | static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, | 372 | static int isp_stat_bufs_alloc_one(struct device *dev, |
390 | struct ispstat_buffer *buf, | ||
391 | unsigned int size) | ||
392 | { | ||
393 | struct isp_device *isp = stat->isp; | ||
394 | struct iovm_struct *iovm; | ||
395 | |||
396 | buf->dma_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0, | ||
397 | size, IOMMU_FLAG); | ||
398 | if (IS_ERR_VALUE(buf->dma_addr)) | ||
399 | return -ENOMEM; | ||
400 | |||
401 | iovm = omap_find_iovm_area(isp->dev, buf->dma_addr); | ||
402 | if (!iovm) | ||
403 | return -ENOMEM; | ||
404 | |||
405 | if (!dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents, | ||
406 | DMA_FROM_DEVICE)) | ||
407 | return -ENOMEM; | ||
408 | |||
409 | buf->sgt = iovm->sgt; | ||
410 | buf->virt_addr = omap_da_to_va(stat->isp->dev, buf->dma_addr); | ||
411 | |||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | static int isp_stat_bufs_alloc_dma(struct ispstat *stat, | ||
416 | struct ispstat_buffer *buf, | 373 | struct ispstat_buffer *buf, |
417 | unsigned int size) | 374 | unsigned int size) |
418 | { | 375 | { |
419 | buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size, | 376 | int ret; |
420 | &buf->dma_addr, GFP_KERNEL | GFP_DMA); | ||
421 | 377 | ||
422 | if (!buf->virt_addr || !buf->dma_addr) | 378 | buf->virt_addr = dma_alloc_coherent(dev, size, &buf->dma_addr, |
379 | GFP_KERNEL | GFP_DMA); | ||
380 | if (!buf->virt_addr) | ||
423 | return -ENOMEM; | 381 | return -ENOMEM; |
424 | 382 | ||
383 | ret = dma_get_sgtable(dev, &buf->sgt, buf->virt_addr, buf->dma_addr, | ||
384 | size); | ||
385 | if (ret < 0) { | ||
386 | dma_free_coherent(dev, size, buf->virt_addr, buf->dma_addr); | ||
387 | buf->virt_addr = NULL; | ||
388 | buf->dma_addr = 0; | ||
389 | return ret; | ||
390 | } | ||
391 | |||
425 | return 0; | 392 | return 0; |
426 | } | 393 | } |
427 | 394 | ||
395 | /* | ||
396 | * The device passed to the DMA API depends on whether the statistics block uses | ||
397 | * ISP DMA, external DMA or PIO to transfer data. | ||
398 | * | ||
399 | * The first case (for the AEWB and AF engines) passes the ISP device, resulting | ||
400 | * in the DMA buffers being mapped through the ISP IOMMU. | ||
401 | * | ||
402 | * The second case (for the histogram engine) should pass the DMA engine device. | ||
403 | * As that device isn't accessible through the OMAP DMA engine API the driver | ||
404 | * passes NULL instead, resulting in the buffers being mapped directly as | ||
405 | * physical pages. | ||
406 | * | ||
407 | * The third case (for the histogram engine) doesn't require any mapping. The | ||
408 | * buffers could be allocated with kmalloc/vmalloc, but we still use | ||
409 | * dma_alloc_coherent() for consistency purpose. | ||
410 | */ | ||
428 | static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) | 411 | static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) |
429 | { | 412 | { |
413 | struct device *dev = ISP_STAT_USES_DMAENGINE(stat) | ||
414 | ? NULL : stat->isp->dev; | ||
430 | unsigned long flags; | 415 | unsigned long flags; |
431 | unsigned int i; | 416 | unsigned int i; |
432 | 417 | ||
@@ -458,11 +443,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) | |||
458 | struct ispstat_buffer *buf = &stat->buf[i]; | 443 | struct ispstat_buffer *buf = &stat->buf[i]; |
459 | int ret; | 444 | int ret; |
460 | 445 | ||
461 | if (ISP_STAT_USES_DMAENGINE(stat)) | 446 | ret = isp_stat_bufs_alloc_one(dev, buf, size); |
462 | ret = isp_stat_bufs_alloc_dma(stat, buf, size); | ||
463 | else | ||
464 | ret = isp_stat_bufs_alloc_iommu(stat, buf, size); | ||
465 | |||
466 | if (ret < 0) { | 447 | if (ret < 0) { |
467 | dev_err(stat->isp->dev, | 448 | dev_err(stat->isp->dev, |
468 | "%s: Failed to allocate DMA buffer %u\n", | 449 | "%s: Failed to allocate DMA buffer %u\n", |
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h index 857f45edc755..58d6ac7cb664 100644 --- a/drivers/media/platform/omap3isp/ispstat.h +++ b/drivers/media/platform/omap3isp/ispstat.h | |||
@@ -46,7 +46,7 @@ | |||
46 | struct ispstat; | 46 | struct ispstat; |
47 | 47 | ||
48 | struct ispstat_buffer { | 48 | struct ispstat_buffer { |
49 | const struct sg_table *sgt; | 49 | struct sg_table sgt; |
50 | void *virt_addr; | 50 | void *virt_addr; |
51 | dma_addr_t dma_addr; | 51 | dma_addr_t dma_addr; |
52 | struct timespec ts; | 52 | struct timespec ts; |