aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/media/platform/Kconfig4
-rw-r--r--drivers/media/platform/omap3isp/Makefile2
-rw-r--r--drivers/media/platform/omap3isp/isp.c108
-rw-r--r--drivers/media/platform/omap3isp/isp.h8
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c107
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.h16
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c4
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c4
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c2
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c2
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c8
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.c1161
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.h188
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c8
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c197
-rw-r--r--drivers/media/platform/omap3isp/ispstat.h3
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c325
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h29
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c24
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--include/media/videobuf2-core.h1
21 files changed, 458 insertions, 1745 deletions
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 20f1655e6d75..8108c698b548 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -93,7 +93,9 @@ config VIDEO_M32R_AR_M64278
93 93
94config VIDEO_OMAP3 94config VIDEO_OMAP3
95 tristate "OMAP 3 Camera support" 95 tristate "OMAP 3 Camera support"
96 depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 96 depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
97 select ARM_DMA_USE_IOMMU
98 select OMAP_IOMMU
97 ---help--- 99 ---help---
98 Driver for an OMAP 3 camera controller. 100 Driver for an OMAP 3 camera controller.
99 101
diff --git a/drivers/media/platform/omap3isp/Makefile b/drivers/media/platform/omap3isp/Makefile
index e8847e79e31a..254975a9174e 100644
--- a/drivers/media/platform/omap3isp/Makefile
+++ b/drivers/media/platform/omap3isp/Makefile
@@ -3,7 +3,7 @@
3ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG 3ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG
4 4
5omap3-isp-objs += \ 5omap3-isp-objs += \
6 isp.o ispqueue.o ispvideo.o \ 6 isp.o ispvideo.o \
7 ispcsiphy.o ispccp2.o ispcsi2.o \ 7 ispcsiphy.o ispccp2.o ispcsi2.o \
8 ispccdc.o isppreview.o ispresizer.o \ 8 ispccdc.o isppreview.o ispresizer.o \
9 ispstat.o isph3a_aewb.o isph3a_af.o isphist.o 9 ispstat.o isph3a_aewb.o isph3a_af.o isphist.o
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 06a0df434249..2c7aa6720569 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -69,6 +69,8 @@
69#include <linux/sched.h> 69#include <linux/sched.h>
70#include <linux/vmalloc.h> 70#include <linux/vmalloc.h>
71 71
72#include <asm/dma-iommu.h>
73
72#include <media/v4l2-common.h> 74#include <media/v4l2-common.h>
73#include <media/v4l2-device.h> 75#include <media/v4l2-device.h>
74 76
@@ -1397,14 +1399,14 @@ int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
1397 if (isp_pipeline_is_last(me)) { 1399 if (isp_pipeline_is_last(me)) {
1398 struct isp_video *video = pipe->output; 1400 struct isp_video *video = pipe->output;
1399 unsigned long flags; 1401 unsigned long flags;
1400 spin_lock_irqsave(&video->queue->irqlock, flags); 1402 spin_lock_irqsave(&video->irqlock, flags);
1401 if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) { 1403 if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
1402 spin_unlock_irqrestore(&video->queue->irqlock, flags); 1404 spin_unlock_irqrestore(&video->irqlock, flags);
1403 atomic_set(stopping, 0); 1405 atomic_set(stopping, 0);
1404 smp_mb(); 1406 smp_mb();
1405 return 0; 1407 return 0;
1406 } 1408 }
1407 spin_unlock_irqrestore(&video->queue->irqlock, flags); 1409 spin_unlock_irqrestore(&video->irqlock, flags);
1408 if (!wait_event_timeout(*wait, !atomic_read(stopping), 1410 if (!wait_event_timeout(*wait, !atomic_read(stopping),
1409 msecs_to_jiffies(1000))) { 1411 msecs_to_jiffies(1000))) {
1410 atomic_set(stopping, 0); 1412 atomic_set(stopping, 0);
@@ -1625,7 +1627,7 @@ struct isp_device *omap3isp_get(struct isp_device *isp)
1625 * Decrement the reference count on the ISP. If the last reference is released, 1627 * Decrement the reference count on the ISP. If the last reference is released,
1626 * power-down all submodules, disable clocks and free temporary buffers. 1628 * power-down all submodules, disable clocks and free temporary buffers.
1627 */ 1629 */
1628void omap3isp_put(struct isp_device *isp) 1630static void __omap3isp_put(struct isp_device *isp, bool save_ctx)
1629{ 1631{
1630 if (isp == NULL) 1632 if (isp == NULL)
1631 return; 1633 return;
@@ -1634,7 +1636,7 @@ void omap3isp_put(struct isp_device *isp)
1634 BUG_ON(isp->ref_count == 0); 1636 BUG_ON(isp->ref_count == 0);
1635 if (--isp->ref_count == 0) { 1637 if (--isp->ref_count == 0) {
1636 isp_disable_interrupts(isp); 1638 isp_disable_interrupts(isp);
1637 if (isp->domain) { 1639 if (save_ctx) {
1638 isp_save_ctx(isp); 1640 isp_save_ctx(isp);
1639 isp->has_context = 1; 1641 isp->has_context = 1;
1640 } 1642 }
@@ -1648,6 +1650,11 @@ void omap3isp_put(struct isp_device *isp)
1648 mutex_unlock(&isp->isp_mutex); 1650 mutex_unlock(&isp->isp_mutex);
1649} 1651}
1650 1652
1653void omap3isp_put(struct isp_device *isp)
1654{
1655 __omap3isp_put(isp, true);
1656}
1657
1651/* -------------------------------------------------------------------------- 1658/* --------------------------------------------------------------------------
1652 * Platform device driver 1659 * Platform device driver
1653 */ 1660 */
@@ -2120,6 +2127,61 @@ error_csiphy:
2120 return ret; 2127 return ret;
2121} 2128}
2122 2129
2130static void isp_detach_iommu(struct isp_device *isp)
2131{
2132 arm_iommu_release_mapping(isp->mapping);
2133 isp->mapping = NULL;
2134 iommu_group_remove_device(isp->dev);
2135}
2136
2137static int isp_attach_iommu(struct isp_device *isp)
2138{
2139 struct dma_iommu_mapping *mapping;
2140 struct iommu_group *group;
2141 int ret;
2142
2143 /* Create a device group and add the device to it. */
2144 group = iommu_group_alloc();
2145 if (IS_ERR(group)) {
2146 dev_err(isp->dev, "failed to allocate IOMMU group\n");
2147 return PTR_ERR(group);
2148 }
2149
2150 ret = iommu_group_add_device(group, isp->dev);
2151 iommu_group_put(group);
2152
2153 if (ret < 0) {
2154 dev_err(isp->dev, "failed to add device to IPMMU group\n");
2155 return ret;
2156 }
2157
2158 /*
2159 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
2160 * VAs. This will allocate a corresponding IOMMU domain.
2161 */
2162 mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
2163 if (IS_ERR(mapping)) {
2164 dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
2165 ret = PTR_ERR(mapping);
2166 goto error;
2167 }
2168
2169 isp->mapping = mapping;
2170
2171 /* Attach the ARM VA mapping to the device. */
2172 ret = arm_iommu_attach_device(isp->dev, mapping);
2173 if (ret < 0) {
2174 dev_err(isp->dev, "failed to attach device to VA mapping\n");
2175 goto error;
2176 }
2177
2178 return 0;
2179
2180error:
2181 isp_detach_iommu(isp);
2182 return ret;
2183}
2184
2123/* 2185/*
2124 * isp_remove - Remove ISP platform device 2186 * isp_remove - Remove ISP platform device
2125 * @pdev: Pointer to ISP platform device 2187 * @pdev: Pointer to ISP platform device
@@ -2135,10 +2197,8 @@ static int isp_remove(struct platform_device *pdev)
2135 isp_xclk_cleanup(isp); 2197 isp_xclk_cleanup(isp);
2136 2198
2137 __omap3isp_get(isp, false); 2199 __omap3isp_get(isp, false);
2138 iommu_detach_device(isp->domain, &pdev->dev); 2200 isp_detach_iommu(isp);
2139 iommu_domain_free(isp->domain); 2201 __omap3isp_put(isp, false);
2140 isp->domain = NULL;
2141 omap3isp_put(isp);
2142 2202
2143 return 0; 2203 return 0;
2144} 2204}
@@ -2265,39 +2325,32 @@ static int isp_probe(struct platform_device *pdev)
2265 } 2325 }
2266 } 2326 }
2267 2327
2268 isp->domain = iommu_domain_alloc(pdev->dev.bus); 2328 /* IOMMU */
2269 if (!isp->domain) { 2329 ret = isp_attach_iommu(isp);
2270 dev_err(isp->dev, "can't alloc iommu domain\n"); 2330 if (ret < 0) {
2271 ret = -ENOMEM; 2331 dev_err(&pdev->dev, "unable to attach to IOMMU\n");
2272 goto error_isp; 2332 goto error_isp;
2273 } 2333 }
2274 2334
2275 ret = iommu_attach_device(isp->domain, &pdev->dev);
2276 if (ret) {
2277 dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
2278 ret = -EPROBE_DEFER;
2279 goto free_domain;
2280 }
2281
2282 /* Interrupt */ 2335 /* Interrupt */
2283 isp->irq_num = platform_get_irq(pdev, 0); 2336 isp->irq_num = platform_get_irq(pdev, 0);
2284 if (isp->irq_num <= 0) { 2337 if (isp->irq_num <= 0) {
2285 dev_err(isp->dev, "No IRQ resource\n"); 2338 dev_err(isp->dev, "No IRQ resource\n");
2286 ret = -ENODEV; 2339 ret = -ENODEV;
2287 goto detach_dev; 2340 goto error_iommu;
2288 } 2341 }
2289 2342
2290 if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED, 2343 if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
2291 "OMAP3 ISP", isp)) { 2344 "OMAP3 ISP", isp)) {
2292 dev_err(isp->dev, "Unable to request IRQ\n"); 2345 dev_err(isp->dev, "Unable to request IRQ\n");
2293 ret = -EINVAL; 2346 ret = -EINVAL;
2294 goto detach_dev; 2347 goto error_iommu;
2295 } 2348 }
2296 2349
2297 /* Entities */ 2350 /* Entities */
2298 ret = isp_initialize_modules(isp); 2351 ret = isp_initialize_modules(isp);
2299 if (ret < 0) 2352 if (ret < 0)
2300 goto detach_dev; 2353 goto error_iommu;
2301 2354
2302 ret = isp_register_entities(isp); 2355 ret = isp_register_entities(isp);
2303 if (ret < 0) 2356 if (ret < 0)
@@ -2310,14 +2363,11 @@ static int isp_probe(struct platform_device *pdev)
2310 2363
2311error_modules: 2364error_modules:
2312 isp_cleanup_modules(isp); 2365 isp_cleanup_modules(isp);
2313detach_dev: 2366error_iommu:
2314 iommu_detach_device(isp->domain, &pdev->dev); 2367 isp_detach_iommu(isp);
2315free_domain:
2316 iommu_domain_free(isp->domain);
2317 isp->domain = NULL;
2318error_isp: 2368error_isp:
2319 isp_xclk_cleanup(isp); 2369 isp_xclk_cleanup(isp);
2320 omap3isp_put(isp); 2370 __omap3isp_put(isp, false);
2321error: 2371error:
2322 mutex_destroy(&isp->isp_mutex); 2372 mutex_destroy(&isp->isp_mutex);
2323 2373
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 6d5e69711907..2c314eea1252 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -45,8 +45,6 @@
45#include "ispcsi2.h" 45#include "ispcsi2.h"
46#include "ispccp2.h" 46#include "ispccp2.h"
47 47
48#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
49
50#define ISP_TOK_TERM 0xFFFFFFFF /* 48#define ISP_TOK_TERM 0xFFFFFFFF /*
51 * terminating token for ISP 49 * terminating token for ISP
52 * modules reg list 50 * modules reg list
@@ -152,6 +150,7 @@ struct isp_xclk {
152 * regions. 150 * regions.
153 * @mmio_base_phys: Array with physical L4 bus addresses for ISP register 151 * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
154 * regions. 152 * regions.
153 * @mapping: IOMMU mapping
155 * @stat_lock: Spinlock for handling statistics 154 * @stat_lock: Spinlock for handling statistics
156 * @isp_mutex: Mutex for serializing requests to ISP. 155 * @isp_mutex: Mutex for serializing requests to ISP.
157 * @stop_failure: Indicates that an entity failed to stop. 156 * @stop_failure: Indicates that an entity failed to stop.
@@ -171,7 +170,6 @@ struct isp_xclk {
171 * @isp_res: Pointer to current settings for ISP Resizer. 170 * @isp_res: Pointer to current settings for ISP Resizer.
172 * @isp_prev: Pointer to current settings for ISP Preview. 171 * @isp_prev: Pointer to current settings for ISP Preview.
173 * @isp_ccdc: Pointer to current settings for ISP CCDC. 172 * @isp_ccdc: Pointer to current settings for ISP CCDC.
174 * @iommu: Pointer to requested IOMMU instance for ISP.
175 * @platform_cb: ISP driver callback function pointers for platform code 173 * @platform_cb: ISP driver callback function pointers for platform code
176 * 174 *
177 * This structure is used to store the OMAP ISP Information. 175 * This structure is used to store the OMAP ISP Information.
@@ -189,6 +187,8 @@ struct isp_device {
189 void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST]; 187 void __iomem *mmio_base[OMAP3_ISP_IOMEM_LAST];
190 unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST]; 188 unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
191 189
190 struct dma_iommu_mapping *mapping;
191
192 /* ISP Obj */ 192 /* ISP Obj */
193 spinlock_t stat_lock; /* common lock for statistic drivers */ 193 spinlock_t stat_lock; /* common lock for statistic drivers */
194 struct mutex isp_mutex; /* For handling ref_count field */ 194 struct mutex isp_mutex; /* For handling ref_count field */
@@ -219,8 +219,6 @@ struct isp_device {
219 219
220 unsigned int sbl_resources; 220 unsigned int sbl_resources;
221 unsigned int subclk_resources; 221 unsigned int subclk_resources;
222
223 struct iommu_domain *domain;
224}; 222};
225 223
226#define v4l2_dev_to_isp_device(dev) \ 224#define v4l2_dev_to_isp_device(dev) \
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 4d920c800ff5..9f727d20f06d 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -30,7 +30,6 @@
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/omap-iommu.h>
34#include <linux/sched.h> 33#include <linux/sched.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
36#include <media/v4l2-event.h> 35#include <media/v4l2-event.h>
@@ -206,7 +205,8 @@ static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc,
206 * ccdc_lsc_program_table - Program Lens Shading Compensation table address. 205 * ccdc_lsc_program_table - Program Lens Shading Compensation table address.
207 * @ccdc: Pointer to ISP CCDC device. 206 * @ccdc: Pointer to ISP CCDC device.
208 */ 207 */
209static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc, u32 addr) 208static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc,
209 dma_addr_t addr)
210{ 210{
211 isp_reg_writel(to_isp_device(ccdc), addr, 211 isp_reg_writel(to_isp_device(ccdc), addr,
212 OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE); 212 OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE);
@@ -333,7 +333,7 @@ static int __ccdc_lsc_configure(struct isp_ccdc_device *ccdc,
333 return -EBUSY; 333 return -EBUSY;
334 334
335 ccdc_lsc_setup_regs(ccdc, &req->config); 335 ccdc_lsc_setup_regs(ccdc, &req->config);
336 ccdc_lsc_program_table(ccdc, req->table); 336 ccdc_lsc_program_table(ccdc, req->table.dma);
337 return 0; 337 return 0;
338} 338}
339 339
@@ -368,11 +368,12 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
368 if (req == NULL) 368 if (req == NULL)
369 return; 369 return;
370 370
371 if (req->iovm) 371 if (req->table.addr) {
372 dma_unmap_sg(isp->dev, req->iovm->sgt->sgl, 372 sg_free_table(&req->table.sgt);
373 req->iovm->sgt->nents, DMA_TO_DEVICE); 373 dma_free_coherent(isp->dev, req->config.size, req->table.addr,
374 if (req->table) 374 req->table.dma);
375 omap_iommu_vfree(isp->domain, isp->dev, req->table); 375 }
376
376 kfree(req); 377 kfree(req);
377} 378}
378 379
@@ -416,7 +417,6 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
416 struct isp_device *isp = to_isp_device(ccdc); 417 struct isp_device *isp = to_isp_device(ccdc);
417 struct ispccdc_lsc_config_req *req; 418 struct ispccdc_lsc_config_req *req;
418 unsigned long flags; 419 unsigned long flags;
419 void *table;
420 u16 update; 420 u16 update;
421 int ret; 421 int ret;
422 422
@@ -444,38 +444,31 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
444 444
445 req->enable = 1; 445 req->enable = 1;
446 446
447 req->table = omap_iommu_vmalloc(isp->domain, isp->dev, 0, 447 req->table.addr = dma_alloc_coherent(isp->dev, req->config.size,
448 req->config.size, IOMMU_FLAG); 448 &req->table.dma,
449 if (IS_ERR_VALUE(req->table)) { 449 GFP_KERNEL);
450 req->table = 0; 450 if (req->table.addr == NULL) {
451 ret = -ENOMEM;
452 goto done;
453 }
454
455 req->iovm = omap_find_iovm_area(isp->dev, req->table);
456 if (req->iovm == NULL) {
457 ret = -ENOMEM; 451 ret = -ENOMEM;
458 goto done; 452 goto done;
459 } 453 }
460 454
461 if (!dma_map_sg(isp->dev, req->iovm->sgt->sgl, 455 ret = dma_get_sgtable(isp->dev, &req->table.sgt,
462 req->iovm->sgt->nents, DMA_TO_DEVICE)) { 456 req->table.addr, req->table.dma,
463 ret = -ENOMEM; 457 req->config.size);
464 req->iovm = NULL; 458 if (ret < 0)
465 goto done; 459 goto done;
466 }
467 460
468 dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl, 461 dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl,
469 req->iovm->sgt->nents, DMA_TO_DEVICE); 462 req->table.sgt.nents, DMA_TO_DEVICE);
470 463
471 table = omap_da_to_va(isp->dev, req->table); 464 if (copy_from_user(req->table.addr, config->lsc,
472 if (copy_from_user(table, config->lsc, req->config.size)) { 465 req->config.size)) {
473 ret = -EFAULT; 466 ret = -EFAULT;
474 goto done; 467 goto done;
475 } 468 }
476 469
477 dma_sync_sg_for_device(isp->dev, req->iovm->sgt->sgl, 470 dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl,
478 req->iovm->sgt->nents, DMA_TO_DEVICE); 471 req->table.sgt.nents, DMA_TO_DEVICE);
479 } 472 }
480 473
481 spin_lock_irqsave(&ccdc->lsc.req_lock, flags); 474 spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
@@ -584,7 +577,7 @@ static void ccdc_configure_fpc(struct isp_ccdc_device *ccdc)
584 if (!ccdc->fpc_en) 577 if (!ccdc->fpc_en)
585 return; 578 return;
586 579
587 isp_reg_writel(isp, ccdc->fpc.fpcaddr, OMAP3_ISP_IOMEM_CCDC, 580 isp_reg_writel(isp, ccdc->fpc.dma, OMAP3_ISP_IOMEM_CCDC,
588 ISPCCDC_FPC_ADDR); 581 ISPCCDC_FPC_ADDR);
589 /* The FPNUM field must be set before enabling FPC. */ 582 /* The FPNUM field must be set before enabling FPC. */
590 isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT), 583 isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT),
@@ -724,8 +717,9 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
724 ccdc->shadow_update = 0; 717 ccdc->shadow_update = 0;
725 718
726 if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) { 719 if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) {
727 u32 table_old = 0; 720 struct omap3isp_ccdc_fpc fpc;
728 u32 table_new; 721 struct ispccdc_fpc fpc_old = { .addr = NULL, };
722 struct ispccdc_fpc fpc_new;
729 u32 size; 723 u32 size;
730 724
731 if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED) 725 if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
@@ -734,35 +728,39 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
734 ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag); 728 ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag);
735 729
736 if (ccdc->fpc_en) { 730 if (ccdc->fpc_en) {
737 if (copy_from_user(&ccdc->fpc, ccdc_struct->fpc, 731 if (copy_from_user(&fpc, ccdc_struct->fpc, sizeof(fpc)))
738 sizeof(ccdc->fpc)))
739 return -EFAULT; 732 return -EFAULT;
740 733
734 size = fpc.fpnum * 4;
735
741 /* 736 /*
742 * table_new must be 64-bytes aligned, but it's 737 * The table address must be 64-bytes aligned, which is
743 * already done by omap_iommu_vmalloc(). 738 * guaranteed by dma_alloc_coherent().
744 */ 739 */
745 size = ccdc->fpc.fpnum * 4; 740 fpc_new.fpnum = fpc.fpnum;
746 table_new = omap_iommu_vmalloc(isp->domain, isp->dev, 741 fpc_new.addr = dma_alloc_coherent(isp->dev, size,
747 0, size, IOMMU_FLAG); 742 &fpc_new.dma,
748 if (IS_ERR_VALUE(table_new)) 743 GFP_KERNEL);
744 if (fpc_new.addr == NULL)
749 return -ENOMEM; 745 return -ENOMEM;
750 746
751 if (copy_from_user(omap_da_to_va(isp->dev, table_new), 747 if (copy_from_user(fpc_new.addr,
752 (__force void __user *) 748 (__force void __user *)fpc.fpcaddr,
753 ccdc->fpc.fpcaddr, size)) { 749 size)) {
754 omap_iommu_vfree(isp->domain, isp->dev, 750 dma_free_coherent(isp->dev, size, fpc_new.addr,
755 table_new); 751 fpc_new.dma);
756 return -EFAULT; 752 return -EFAULT;
757 } 753 }
758 754
759 table_old = ccdc->fpc.fpcaddr; 755 fpc_old = ccdc->fpc;
760 ccdc->fpc.fpcaddr = table_new; 756 ccdc->fpc = fpc_new;
761 } 757 }
762 758
763 ccdc_configure_fpc(ccdc); 759 ccdc_configure_fpc(ccdc);
764 if (table_old != 0) 760
765 omap_iommu_vfree(isp->domain, isp->dev, table_old); 761 if (fpc_old.addr != NULL)
762 dma_free_coherent(isp->dev, fpc_old.fpnum * 4,
763 fpc_old.addr, fpc_old.dma);
766 } 764 }
767 765
768 return ccdc_lsc_config(ccdc, ccdc_struct); 766 return ccdc_lsc_config(ccdc, ccdc_struct);
@@ -1523,7 +1521,7 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
1523 1521
1524 buffer = omap3isp_video_buffer_next(&ccdc->video_out); 1522 buffer = omap3isp_video_buffer_next(&ccdc->video_out);
1525 if (buffer != NULL) { 1523 if (buffer != NULL) {
1526 ccdc_set_outaddr(ccdc, buffer->isp_addr); 1524 ccdc_set_outaddr(ccdc, buffer->dma);
1527 restart = 1; 1525 restart = 1;
1528 } 1526 }
1529 1527
@@ -1662,7 +1660,7 @@ static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer)
1662 if (!(ccdc->output & CCDC_OUTPUT_MEMORY)) 1660 if (!(ccdc->output & CCDC_OUTPUT_MEMORY))
1663 return -ENODEV; 1661 return -ENODEV;
1664 1662
1665 ccdc_set_outaddr(ccdc, buffer->isp_addr); 1663 ccdc_set_outaddr(ccdc, buffer->dma);
1666 1664
1667 /* We now have a buffer queued on the output, restart the pipeline 1665 /* We now have a buffer queued on the output, restart the pipeline
1668 * on the next CCDC interrupt if running in continuous mode (or when 1666 * on the next CCDC interrupt if running in continuous mode (or when
@@ -2580,8 +2578,9 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
2580 cancel_work_sync(&ccdc->lsc.table_work); 2578 cancel_work_sync(&ccdc->lsc.table_work);
2581 ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); 2579 ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
2582 2580
2583 if (ccdc->fpc.fpcaddr != 0) 2581 if (ccdc->fpc.addr != NULL)
2584 omap_iommu_vfree(isp->domain, isp->dev, ccdc->fpc.fpcaddr); 2582 dma_free_coherent(isp->dev, ccdc->fpc.fpnum * 4, ccdc->fpc.addr,
2583 ccdc->fpc.dma);
2585 2584
2586 mutex_destroy(&ccdc->ioctl_lock); 2585 mutex_destroy(&ccdc->ioctl_lock);
2587} 2586}
diff --git a/drivers/media/platform/omap3isp/ispccdc.h b/drivers/media/platform/omap3isp/ispccdc.h
index 9d24e4107864..f65061602c71 100644
--- a/drivers/media/platform/omap3isp/ispccdc.h
+++ b/drivers/media/platform/omap3isp/ispccdc.h
@@ -46,6 +46,12 @@ enum ccdc_input_entity {
46 46
47#define OMAP3ISP_CCDC_NEVENTS 16 47#define OMAP3ISP_CCDC_NEVENTS 16
48 48
49struct ispccdc_fpc {
50 void *addr;
51 dma_addr_t dma;
52 unsigned int fpnum;
53};
54
49enum ispccdc_lsc_state { 55enum ispccdc_lsc_state {
50 LSC_STATE_STOPPED = 0, 56 LSC_STATE_STOPPED = 0,
51 LSC_STATE_STOPPING = 1, 57 LSC_STATE_STOPPING = 1,
@@ -57,8 +63,12 @@ struct ispccdc_lsc_config_req {
57 struct list_head list; 63 struct list_head list;
58 struct omap3isp_ccdc_lsc_config config; 64 struct omap3isp_ccdc_lsc_config config;
59 unsigned char enable; 65 unsigned char enable;
60 u32 table; 66
61 struct iovm_struct *iovm; 67 struct {
68 void *addr;
69 dma_addr_t dma;
70 struct sg_table sgt;
71 } table;
62}; 72};
63 73
64/* 74/*
@@ -136,7 +146,7 @@ struct isp_ccdc_device {
136 fpc_en:1; 146 fpc_en:1;
137 struct omap3isp_ccdc_blcomp blcomp; 147 struct omap3isp_ccdc_blcomp blcomp;
138 struct omap3isp_ccdc_bclamp clamp; 148 struct omap3isp_ccdc_bclamp clamp;
139 struct omap3isp_ccdc_fpc fpc; 149 struct ispccdc_fpc fpc;
140 struct ispccdc_lsc lsc; 150 struct ispccdc_lsc lsc;
141 unsigned int update; 151 unsigned int update;
142 unsigned int shadow_update; 152 unsigned int shadow_update;
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index b30b67d22a58..f3801db9095c 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -549,7 +549,7 @@ static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2)
549 549
550 buffer = omap3isp_video_buffer_next(&ccp2->video_in); 550 buffer = omap3isp_video_buffer_next(&ccp2->video_in);
551 if (buffer != NULL) 551 if (buffer != NULL)
552 ccp2_set_inaddr(ccp2, buffer->isp_addr); 552 ccp2_set_inaddr(ccp2, buffer->dma);
553 553
554 pipe->state |= ISP_PIPELINE_IDLE_INPUT; 554 pipe->state |= ISP_PIPELINE_IDLE_INPUT;
555 555
@@ -940,7 +940,7 @@ static int ccp2_video_queue(struct isp_video *video, struct isp_buffer *buffer)
940{ 940{
941 struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2; 941 struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2;
942 942
943 ccp2_set_inaddr(ccp2, buffer->isp_addr); 943 ccp2_set_inaddr(ccp2, buffer->dma);
944 return 0; 944 return 0;
945} 945}
946 946
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 620560828a48..5a2e47e58b84 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -695,7 +695,7 @@ static void csi2_isr_buffer(struct isp_csi2_device *csi2)
695 if (buffer == NULL) 695 if (buffer == NULL)
696 return; 696 return;
697 697
698 csi2_set_outaddr(csi2, buffer->isp_addr); 698 csi2_set_outaddr(csi2, buffer->dma);
699 csi2_ctx_enable(isp, csi2, 0, 1); 699 csi2_ctx_enable(isp, csi2, 0, 1);
700} 700}
701 701
@@ -812,7 +812,7 @@ static int csi2_queue(struct isp_video *video, struct isp_buffer *buffer)
812 struct isp_device *isp = video->isp; 812 struct isp_device *isp = video->isp;
813 struct isp_csi2_device *csi2 = &isp->isp_csi2a; 813 struct isp_csi2_device *csi2 = &isp->isp_csi2a;
814 814
815 csi2_set_outaddr(csi2, buffer->isp_addr); 815 csi2_set_outaddr(csi2, buffer->dma);
816 816
817 /* 817 /*
818 * If streaming was enabled before there was a buffer queued 818 * If streaming was enabled before there was a buffer queued
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
index 75fd82b152ba..d6811ce263eb 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -47,7 +47,7 @@ static void h3a_aewb_setup_regs(struct ispstat *aewb, void *priv)
47 if (aewb->state == ISPSTAT_DISABLED) 47 if (aewb->state == ISPSTAT_DISABLED)
48 return; 48 return;
49 49
50 isp_reg_writel(aewb->isp, aewb->active_buf->iommu_addr, 50 isp_reg_writel(aewb->isp, aewb->active_buf->dma_addr,
51 OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST); 51 OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST);
52 52
53 if (!aewb->update) 53 if (!aewb->update)
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index a0bf5af32438..6fc960cd30f5 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -51,7 +51,7 @@ static void h3a_af_setup_regs(struct ispstat *af, void *priv)
51 if (af->state == ISPSTAT_DISABLED) 51 if (af->state == ISPSTAT_DISABLED)
52 return; 52 return;
53 53
54 isp_reg_writel(af->isp, af->active_buf->iommu_addr, OMAP3_ISP_IOMEM_H3A, 54 isp_reg_writel(af->isp, af->active_buf->dma_addr, OMAP3_ISP_IOMEM_H3A,
55 ISPH3A_AFBUFST); 55 ISPH3A_AFBUFST);
56 56
57 if (!af->update) 57 if (!af->update)
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 395b2b068c75..720809b07e75 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -1499,14 +1499,14 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
1499 if (prev->input == PREVIEW_INPUT_MEMORY) { 1499 if (prev->input == PREVIEW_INPUT_MEMORY) {
1500 buffer = omap3isp_video_buffer_next(&prev->video_in); 1500 buffer = omap3isp_video_buffer_next(&prev->video_in);
1501 if (buffer != NULL) 1501 if (buffer != NULL)
1502 preview_set_inaddr(prev, buffer->isp_addr); 1502 preview_set_inaddr(prev, buffer->dma);
1503 pipe->state |= ISP_PIPELINE_IDLE_INPUT; 1503 pipe->state |= ISP_PIPELINE_IDLE_INPUT;
1504 } 1504 }
1505 1505
1506 if (prev->output & PREVIEW_OUTPUT_MEMORY) { 1506 if (prev->output & PREVIEW_OUTPUT_MEMORY) {
1507 buffer = omap3isp_video_buffer_next(&prev->video_out); 1507 buffer = omap3isp_video_buffer_next(&prev->video_out);
1508 if (buffer != NULL) { 1508 if (buffer != NULL) {
1509 preview_set_outaddr(prev, buffer->isp_addr); 1509 preview_set_outaddr(prev, buffer->dma);
1510 restart = 1; 1510 restart = 1;
1511 } 1511 }
1512 pipe->state |= ISP_PIPELINE_IDLE_OUTPUT; 1512 pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
@@ -1577,10 +1577,10 @@ static int preview_video_queue(struct isp_video *video,
1577 struct isp_prev_device *prev = &video->isp->isp_prev; 1577 struct isp_prev_device *prev = &video->isp->isp_prev;
1578 1578
1579 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1579 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1580 preview_set_inaddr(prev, buffer->isp_addr); 1580 preview_set_inaddr(prev, buffer->dma);
1581 1581
1582 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1582 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1583 preview_set_outaddr(prev, buffer->isp_addr); 1583 preview_set_outaddr(prev, buffer->dma);
1584 1584
1585 return 0; 1585 return 0;
1586} 1586}
diff --git a/drivers/media/platform/omap3isp/ispqueue.c b/drivers/media/platform/omap3isp/ispqueue.c
deleted file mode 100644
index a5e65858e799..000000000000
--- a/drivers/media/platform/omap3isp/ispqueue.c
+++ /dev/null
@@ -1,1161 +0,0 @@
1/*
2 * ispqueue.c
3 *
4 * TI OMAP3 ISP - Video buffers queue handling
5 *
6 * Copyright (C) 2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <asm/cacheflush.h>
27#include <linux/dma-mapping.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/poll.h>
31#include <linux/scatterlist.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/vmalloc.h>
35
36#include "ispqueue.h"
37
38/* -----------------------------------------------------------------------------
39 * Video buffers management
40 */
41
42/*
43 * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
44 *
45 * The typical operation required here is Cache Invalidation across
46 * the (user space) buffer address range. And this _must_ be done
47 * at QBUF stage (and *only* at QBUF).
48 *
49 * We try to use optimal cache invalidation function:
50 * - dmac_map_area:
51 * - used when the number of pages are _low_.
52 * - it becomes quite slow as the number of pages increase.
53 * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
54 * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
55 *
56 * - flush_cache_all:
57 * - used when the number of pages are _high_.
58 * - time taken in the range of 500-900 us.
59 * - has a higher penalty but, as whole dcache + icache is invalidated
60 */
61/*
62 * FIXME: dmac_inv_range crashes randomly on the user space buffer
63 * address. Fall back to flush_cache_all for now.
64 */
65#define ISP_CACHE_FLUSH_PAGES_MAX 0
66
67static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
68{
69 if (buf->skip_cache)
70 return;
71
72 if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
73 buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
74 flush_cache_all();
75 else {
76 dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
77 DMA_FROM_DEVICE);
78 outer_inv_range(buf->vbuf.m.userptr,
79 buf->vbuf.m.userptr + buf->vbuf.length);
80 }
81}
82
83/*
84 * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
85 *
86 * Lock the VMAs underlying the given buffer into memory. This avoids the
87 * userspace buffer mapping from being swapped out, making VIPT cache handling
88 * easier.
89 *
90 * Note that the pages will not be freed as the buffers have been locked to
91 * memory using by a call to get_user_pages(), but the userspace mapping could
92 * still disappear if the VMAs are not locked. This is caused by the memory
93 * management code trying to be as lock-less as possible, which results in the
94 * userspace mapping manager not finding out that the pages are locked under
95 * some conditions.
96 */
97static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
98{
99 struct vm_area_struct *vma;
100 unsigned long start;
101 unsigned long end;
102 int ret = 0;
103
104 if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
105 return 0;
106
107 /* We can be called from workqueue context if the current task dies to
108 * unlock the VMAs. In that case there's no current memory management
109 * context so unlocking can't be performed, but the VMAs have been or
110 * are getting destroyed anyway so it doesn't really matter.
111 */
112 if (!current || !current->mm)
113 return lock ? -EINVAL : 0;
114
115 start = buf->vbuf.m.userptr;
116 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
117
118 down_write(&current->mm->mmap_sem);
119 spin_lock(&current->mm->page_table_lock);
120
121 do {
122 vma = find_vma(current->mm, start);
123 if (vma == NULL) {
124 ret = -EFAULT;
125 goto out;
126 }
127
128 if (lock)
129 vma->vm_flags |= VM_LOCKED;
130 else
131 vma->vm_flags &= ~VM_LOCKED;
132
133 start = vma->vm_end + 1;
134 } while (vma->vm_end < end);
135
136 if (lock)
137 buf->vm_flags |= VM_LOCKED;
138 else
139 buf->vm_flags &= ~VM_LOCKED;
140
141out:
142 spin_unlock(&current->mm->page_table_lock);
143 up_write(&current->mm->mmap_sem);
144 return ret;
145}
146
147/*
148 * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer
149 *
150 * Iterate over the vmalloc'ed area and create a scatter list entry for every
151 * page.
152 */
153static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
154{
155 struct scatterlist *sglist;
156 unsigned int npages;
157 unsigned int i;
158 void *addr;
159
160 addr = buf->vaddr;
161 npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
162
163 sglist = vmalloc(npages * sizeof(*sglist));
164 if (sglist == NULL)
165 return -ENOMEM;
166
167 sg_init_table(sglist, npages);
168
169 for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
170 struct page *page = vmalloc_to_page(addr);
171
172 if (page == NULL || PageHighMem(page)) {
173 vfree(sglist);
174 return -EINVAL;
175 }
176
177 sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
178 }
179
180 buf->sglen = npages;
181 buf->sglist = sglist;
182
183 return 0;
184}
185
186/*
187 * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer
188 *
189 * Walk the buffer pages list and create a 1:1 mapping to a scatter list.
190 */
191static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
192{
193 struct scatterlist *sglist;
194 unsigned int offset = buf->offset;
195 unsigned int i;
196
197 sglist = vmalloc(buf->npages * sizeof(*sglist));
198 if (sglist == NULL)
199 return -ENOMEM;
200
201 sg_init_table(sglist, buf->npages);
202
203 for (i = 0; i < buf->npages; ++i) {
204 if (PageHighMem(buf->pages[i])) {
205 vfree(sglist);
206 return -EINVAL;
207 }
208
209 sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
210 offset);
211 offset = 0;
212 }
213
214 buf->sglen = buf->npages;
215 buf->sglist = sglist;
216
217 return 0;
218}
219
220/*
221 * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer
222 *
223 * Create a scatter list of physically contiguous pages starting at the buffer
224 * memory physical address.
225 */
226static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
227{
228 struct scatterlist *sglist;
229 unsigned int offset = buf->offset;
230 unsigned long pfn = buf->paddr >> PAGE_SHIFT;
231 unsigned int i;
232
233 sglist = vmalloc(buf->npages * sizeof(*sglist));
234 if (sglist == NULL)
235 return -ENOMEM;
236
237 sg_init_table(sglist, buf->npages);
238
239 for (i = 0; i < buf->npages; ++i, ++pfn) {
240 sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
241 offset);
242 /* PFNMAP buffers will not get DMA-mapped, set the DMA address
243 * manually.
244 */
245 sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
246 offset = 0;
247 }
248
249 buf->sglen = buf->npages;
250 buf->sglist = sglist;
251
252 return 0;
253}
254
255/*
256 * isp_video_buffer_cleanup - Release pages for a userspace VMA.
257 *
258 * Release pages locked by a call isp_video_buffer_prepare_user and free the
259 * pages table.
260 */
261static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
262{
263 enum dma_data_direction direction;
264 unsigned int i;
265
266 if (buf->queue->ops->buffer_cleanup)
267 buf->queue->ops->buffer_cleanup(buf);
268
269 if (!(buf->vm_flags & VM_PFNMAP)) {
270 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
271 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
272 dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
273 direction);
274 }
275
276 vfree(buf->sglist);
277 buf->sglist = NULL;
278 buf->sglen = 0;
279
280 if (buf->pages != NULL) {
281 isp_video_buffer_lock_vma(buf, 0);
282
283 for (i = 0; i < buf->npages; ++i)
284 page_cache_release(buf->pages[i]);
285
286 vfree(buf->pages);
287 buf->pages = NULL;
288 }
289
290 buf->npages = 0;
291 buf->skip_cache = false;
292}
293
294/*
295 * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory.
296 *
297 * This function creates a list of pages for a userspace VMA. The number of
298 * pages is first computed based on the buffer size, and pages are then
299 * retrieved by a call to get_user_pages.
300 *
301 * Pages are pinned to memory by get_user_pages, making them available for DMA
302 * transfers. However, due to memory management optimization, it seems the
303 * get_user_pages doesn't guarantee that the pinned pages will not be written
304 * to swap and removed from the userspace mapping(s). When this happens, a page
305 * fault can be generated when accessing those unmapped pages.
306 *
307 * If the fault is triggered by a page table walk caused by VIPT cache
308 * management operations, the page fault handler might oops if the MM semaphore
309 * is held, as it can't handle kernel page faults in that case. To fix that, a
310 * fixup entry needs to be added to the cache management code, or the userspace
311 * VMA must be locked to avoid removing pages from the userspace mapping in the
312 * first place.
313 *
314 * If the number of pages retrieved is smaller than the number required by the
315 * buffer size, the function returns -EFAULT.
316 */
317static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
318{
319 unsigned long data;
320 unsigned int first;
321 unsigned int last;
322 int ret;
323
324 data = buf->vbuf.m.userptr;
325 first = (data & PAGE_MASK) >> PAGE_SHIFT;
326 last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
327
328 buf->offset = data & ~PAGE_MASK;
329 buf->npages = last - first + 1;
330 buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
331 if (buf->pages == NULL)
332 return -ENOMEM;
333
334 down_read(&current->mm->mmap_sem);
335 ret = get_user_pages(current, current->mm, data & PAGE_MASK,
336 buf->npages,
337 buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
338 buf->pages, NULL);
339 up_read(&current->mm->mmap_sem);
340
341 if (ret != buf->npages) {
342 buf->npages = ret < 0 ? 0 : ret;
343 isp_video_buffer_cleanup(buf);
344 return -EFAULT;
345 }
346
347 ret = isp_video_buffer_lock_vma(buf, 1);
348 if (ret < 0)
349 isp_video_buffer_cleanup(buf);
350
351 return ret;
352}
353
354/*
355 * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer
356 *
357 * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
358 * memory and if they span a single VMA.
359 *
360 * Return 0 if the buffer is valid, or -EFAULT otherwise.
361 */
362static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
363{
364 struct vm_area_struct *vma;
365 unsigned long prev_pfn;
366 unsigned long this_pfn;
367 unsigned long start;
368 unsigned long end;
369 dma_addr_t pa = 0;
370 int ret = -EFAULT;
371
372 start = buf->vbuf.m.userptr;
373 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
374
375 buf->offset = start & ~PAGE_MASK;
376 buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
377 buf->pages = NULL;
378
379 down_read(&current->mm->mmap_sem);
380 vma = find_vma(current->mm, start);
381 if (vma == NULL || vma->vm_end < end)
382 goto done;
383
384 for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
385 ret = follow_pfn(vma, start, &this_pfn);
386 if (ret)
387 goto done;
388
389 if (prev_pfn == 0)
390 pa = this_pfn << PAGE_SHIFT;
391 else if (this_pfn != prev_pfn + 1) {
392 ret = -EFAULT;
393 goto done;
394 }
395
396 prev_pfn = this_pfn;
397 }
398
399 buf->paddr = pa + buf->offset;
400 ret = 0;
401
402done:
403 up_read(&current->mm->mmap_sem);
404 return ret;
405}
406
407/*
408 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
409 *
410 * This function locates the VMAs for the buffer's userspace address and checks
411 * that their flags match. The only flag that we need to care for at the moment
412 * is VM_PFNMAP.
413 *
414 * The buffer vm_flags field is set to the first VMA flags.
415 *
416 * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
417 * have incompatible flags.
418 */
419static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
420{
421 struct vm_area_struct *vma;
422 pgprot_t uninitialized_var(vm_page_prot);
423 unsigned long start;
424 unsigned long end;
425 int ret = -EFAULT;
426
427 start = buf->vbuf.m.userptr;
428 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
429
430 down_read(&current->mm->mmap_sem);
431
432 do {
433 vma = find_vma(current->mm, start);
434 if (vma == NULL)
435 goto done;
436
437 if (start == buf->vbuf.m.userptr) {
438 buf->vm_flags = vma->vm_flags;
439 vm_page_prot = vma->vm_page_prot;
440 }
441
442 if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
443 goto done;
444
445 if (vm_page_prot != vma->vm_page_prot)
446 goto done;
447
448 start = vma->vm_end + 1;
449 } while (vma->vm_end < end);
450
451 /* Skip cache management to enhance performances for non-cached or
452 * write-combining buffers.
453 */
454 if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
455 vm_page_prot == pgprot_writecombine(vm_page_prot))
456 buf->skip_cache = true;
457
458 ret = 0;
459
460done:
461 up_read(&current->mm->mmap_sem);
462 return ret;
463}
464
465/*
466 * isp_video_buffer_prepare - Make a buffer ready for operation
467 *
468 * Preparing a buffer involves:
469 *
470 * - validating VMAs (userspace buffers only)
471 * - locking pages and VMAs into memory (userspace buffers only)
472 * - building page and scatter-gather lists
473 * - mapping buffers for DMA operation
474 * - performing driver-specific preparation
475 *
476 * The function must be called in userspace context with a valid mm context
477 * (this excludes cleanup paths such as sys_close when the userspace process
478 * segfaults).
479 */
480static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
481{
482 enum dma_data_direction direction;
483 int ret;
484
485 switch (buf->vbuf.memory) {
486 case V4L2_MEMORY_MMAP:
487 ret = isp_video_buffer_sglist_kernel(buf);
488 break;
489
490 case V4L2_MEMORY_USERPTR:
491 ret = isp_video_buffer_prepare_vm_flags(buf);
492 if (ret < 0)
493 return ret;
494
495 if (buf->vm_flags & VM_PFNMAP) {
496 ret = isp_video_buffer_prepare_pfnmap(buf);
497 if (ret < 0)
498 return ret;
499
500 ret = isp_video_buffer_sglist_pfnmap(buf);
501 } else {
502 ret = isp_video_buffer_prepare_user(buf);
503 if (ret < 0)
504 return ret;
505
506 ret = isp_video_buffer_sglist_user(buf);
507 }
508 break;
509
510 default:
511 return -EINVAL;
512 }
513
514 if (ret < 0)
515 goto done;
516
517 if (!(buf->vm_flags & VM_PFNMAP)) {
518 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
519 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
520 ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
521 direction);
522 if (ret != buf->sglen) {
523 ret = -EFAULT;
524 goto done;
525 }
526 }
527
528 if (buf->queue->ops->buffer_prepare)
529 ret = buf->queue->ops->buffer_prepare(buf);
530
531done:
532 if (ret < 0) {
533 isp_video_buffer_cleanup(buf);
534 return ret;
535 }
536
537 return ret;
538}
539
540/*
541 * isp_video_queue_query - Query the status of a given buffer
542 *
543 * Locking: must be called with the queue lock held.
544 */
545static void isp_video_buffer_query(struct isp_video_buffer *buf,
546 struct v4l2_buffer *vbuf)
547{
548 memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
549
550 if (buf->vma_use_count)
551 vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
552
553 switch (buf->state) {
554 case ISP_BUF_STATE_ERROR:
555 vbuf->flags |= V4L2_BUF_FLAG_ERROR;
556 /* Fallthrough */
557 case ISP_BUF_STATE_DONE:
558 vbuf->flags |= V4L2_BUF_FLAG_DONE;
559 break;
560 case ISP_BUF_STATE_QUEUED:
561 case ISP_BUF_STATE_ACTIVE:
562 vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
563 break;
564 case ISP_BUF_STATE_IDLE:
565 default:
566 break;
567 }
568}
569
570/*
571 * isp_video_buffer_wait - Wait for a buffer to be ready
572 *
573 * In non-blocking mode, return immediately with 0 if the buffer is ready or
574 * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
575 *
576 * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
577 * queue using the same condition.
578 */
579static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
580{
581 if (nonblocking) {
582 return (buf->state != ISP_BUF_STATE_QUEUED &&
583 buf->state != ISP_BUF_STATE_ACTIVE)
584 ? 0 : -EAGAIN;
585 }
586
587 return wait_event_interruptible(buf->wait,
588 buf->state != ISP_BUF_STATE_QUEUED &&
589 buf->state != ISP_BUF_STATE_ACTIVE);
590}
591
592/* -----------------------------------------------------------------------------
593 * Queue management
594 */
595
596/*
597 * isp_video_queue_free - Free video buffers memory
598 *
599 * Buffers can only be freed if the queue isn't streaming and if no buffer is
600 * mapped to userspace. Return -EBUSY if those conditions aren't satisfied.
601 *
602 * This function must be called with the queue lock held.
603 */
604static int isp_video_queue_free(struct isp_video_queue *queue)
605{
606 unsigned int i;
607
608 if (queue->streaming)
609 return -EBUSY;
610
611 for (i = 0; i < queue->count; ++i) {
612 if (queue->buffers[i]->vma_use_count != 0)
613 return -EBUSY;
614 }
615
616 for (i = 0; i < queue->count; ++i) {
617 struct isp_video_buffer *buf = queue->buffers[i];
618
619 isp_video_buffer_cleanup(buf);
620
621 vfree(buf->vaddr);
622 buf->vaddr = NULL;
623
624 kfree(buf);
625 queue->buffers[i] = NULL;
626 }
627
628 INIT_LIST_HEAD(&queue->queue);
629 queue->count = 0;
630 return 0;
631}
632
633/*
634 * isp_video_queue_alloc - Allocate video buffers memory
635 *
636 * This function must be called with the queue lock held.
637 */
638static int isp_video_queue_alloc(struct isp_video_queue *queue,
639 unsigned int nbuffers,
640 unsigned int size, enum v4l2_memory memory)
641{
642 struct isp_video_buffer *buf;
643 unsigned int i;
644 void *mem;
645 int ret;
646
647 /* Start by freeing the buffers. */
648 ret = isp_video_queue_free(queue);
649 if (ret < 0)
650 return ret;
651
652 /* Bail out if no buffers should be allocated. */
653 if (nbuffers == 0)
654 return 0;
655
656 /* Initialize the allocated buffers. */
657 for (i = 0; i < nbuffers; ++i) {
658 buf = kzalloc(queue->bufsize, GFP_KERNEL);
659 if (buf == NULL)
660 break;
661
662 if (memory == V4L2_MEMORY_MMAP) {
663 /* Allocate video buffers memory for mmap mode. Align
664 * the size to the page size.
665 */
666 mem = vmalloc_32_user(PAGE_ALIGN(size));
667 if (mem == NULL) {
668 kfree(buf);
669 break;
670 }
671
672 buf->vbuf.m.offset = i * PAGE_ALIGN(size);
673 buf->vaddr = mem;
674 }
675
676 buf->vbuf.index = i;
677 buf->vbuf.length = size;
678 buf->vbuf.type = queue->type;
679 buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
680 buf->vbuf.field = V4L2_FIELD_NONE;
681 buf->vbuf.memory = memory;
682
683 buf->queue = queue;
684 init_waitqueue_head(&buf->wait);
685
686 queue->buffers[i] = buf;
687 }
688
689 if (i == 0)
690 return -ENOMEM;
691
692 queue->count = i;
693 return nbuffers;
694}
695
696/**
697 * omap3isp_video_queue_cleanup - Clean up the video buffers queue
698 * @queue: Video buffers queue
699 *
700 * Free all allocated resources and clean up the video buffers queue. The queue
701 * must not be busy (no ongoing video stream) and buffers must have been
702 * unmapped.
703 *
704 * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
705 * unmapped.
706 */
707int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
708{
709 return isp_video_queue_free(queue);
710}
711
712/**
713 * omap3isp_video_queue_init - Initialize the video buffers queue
714 * @queue: Video buffers queue
715 * @type: V4L2 buffer type (capture or output)
716 * @ops: Driver-specific queue operations
717 * @dev: Device used for DMA operations
718 * @bufsize: Size of the driver-specific buffer structure
719 *
720 * Initialize the video buffers queue with the supplied parameters.
721 *
722 * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
723 * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
724 *
725 * Buffer objects will be allocated using the given buffer size to allow room
726 * for driver-specific fields. Driver-specific buffer structures must start
727 * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
728 * structure must pass the size of the isp_video_buffer structure in the bufsize
729 * parameter.
730 *
731 * Return 0 on success.
732 */
733int omap3isp_video_queue_init(struct isp_video_queue *queue,
734 enum v4l2_buf_type type,
735 const struct isp_video_queue_operations *ops,
736 struct device *dev, unsigned int bufsize)
737{
738 INIT_LIST_HEAD(&queue->queue);
739 mutex_init(&queue->lock);
740 spin_lock_init(&queue->irqlock);
741
742 queue->type = type;
743 queue->ops = ops;
744 queue->dev = dev;
745 queue->bufsize = bufsize;
746
747 return 0;
748}
749
750/* -----------------------------------------------------------------------------
751 * V4L2 operations
752 */
753
754/**
755 * omap3isp_video_queue_reqbufs - Allocate video buffers memory
756 *
757 * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
758 * allocated video buffer objects and, for MMAP buffers, buffer memory.
759 *
760 * If the number of buffers is 0, all buffers are freed and the function returns
761 * without performing any allocation.
762 *
763 * If the number of buffers is not 0, currently allocated buffers (if any) are
764 * freed and the requested number of buffers are allocated. Depending on
765 * driver-specific requirements and on memory availability, a number of buffer
766 * smaller or bigger than requested can be allocated. This isn't considered as
767 * an error.
768 *
769 * Return 0 on success or one of the following error codes:
770 *
771 * -EINVAL if the buffer type or index are invalid
772 * -EBUSY if the queue is busy (streaming or buffers mapped)
773 * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
774 */
775int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
776 struct v4l2_requestbuffers *rb)
777{
778 unsigned int nbuffers = rb->count;
779 unsigned int size;
780 int ret;
781
782 if (rb->type != queue->type)
783 return -EINVAL;
784
785 queue->ops->queue_prepare(queue, &nbuffers, &size);
786 if (size == 0)
787 return -EINVAL;
788
789 nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
790
791 mutex_lock(&queue->lock);
792
793 ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
794 if (ret < 0)
795 goto done;
796
797 rb->count = ret;
798 ret = 0;
799
800done:
801 mutex_unlock(&queue->lock);
802 return ret;
803}
804
805/**
806 * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
807 *
808 * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
809 * returns the status of a given video buffer.
810 *
811 * Return 0 on success or -EINVAL if the buffer type or index are invalid.
812 */
813int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
814 struct v4l2_buffer *vbuf)
815{
816 struct isp_video_buffer *buf;
817 int ret = 0;
818
819 if (vbuf->type != queue->type)
820 return -EINVAL;
821
822 mutex_lock(&queue->lock);
823
824 if (vbuf->index >= queue->count) {
825 ret = -EINVAL;
826 goto done;
827 }
828
829 buf = queue->buffers[vbuf->index];
830 isp_video_buffer_query(buf, vbuf);
831
832done:
833 mutex_unlock(&queue->lock);
834 return ret;
835}
836
837/**
838 * omap3isp_video_queue_qbuf - Queue a buffer
839 *
840 * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
841 *
842 * The v4l2_buffer structure passed from userspace is first sanity tested. If
843 * sane, the buffer is then processed and added to the main queue and, if the
844 * queue is streaming, to the IRQ queue.
845 *
846 * Before being enqueued, USERPTR buffers are checked for address changes. If
847 * the buffer has a different userspace address, the old memory area is unlocked
848 * and the new memory area is locked.
849 */
850int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
851 struct v4l2_buffer *vbuf)
852{
853 struct isp_video_buffer *buf;
854 unsigned long flags;
855 int ret = -EINVAL;
856
857 if (vbuf->type != queue->type)
858 goto done;
859
860 mutex_lock(&queue->lock);
861
862 if (vbuf->index >= queue->count)
863 goto done;
864
865 buf = queue->buffers[vbuf->index];
866
867 if (vbuf->memory != buf->vbuf.memory)
868 goto done;
869
870 if (buf->state != ISP_BUF_STATE_IDLE)
871 goto done;
872
873 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
874 vbuf->length < buf->vbuf.length)
875 goto done;
876
877 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
878 vbuf->m.userptr != buf->vbuf.m.userptr) {
879 isp_video_buffer_cleanup(buf);
880 buf->vbuf.m.userptr = vbuf->m.userptr;
881 buf->prepared = 0;
882 }
883
884 if (!buf->prepared) {
885 ret = isp_video_buffer_prepare(buf);
886 if (ret < 0)
887 goto done;
888 buf->prepared = 1;
889 }
890
891 isp_video_buffer_cache_sync(buf);
892
893 buf->state = ISP_BUF_STATE_QUEUED;
894 list_add_tail(&buf->stream, &queue->queue);
895
896 if (queue->streaming) {
897 spin_lock_irqsave(&queue->irqlock, flags);
898 queue->ops->buffer_queue(buf);
899 spin_unlock_irqrestore(&queue->irqlock, flags);
900 }
901
902 ret = 0;
903
904done:
905 mutex_unlock(&queue->lock);
906 return ret;
907}
908
909/**
910 * omap3isp_video_queue_dqbuf - Dequeue a buffer
911 *
912 * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
913 *
914 * Wait until a buffer is ready to be dequeued, remove it from the queue and
915 * copy its information to the v4l2_buffer structure.
916 *
917 * If the nonblocking argument is not zero and no buffer is ready, return
918 * -EAGAIN immediately instead of waiting.
919 *
920 * If no buffer has been enqueued, or if the requested buffer type doesn't match
921 * the queue type, return -EINVAL.
922 */
923int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
924 struct v4l2_buffer *vbuf, int nonblocking)
925{
926 struct isp_video_buffer *buf;
927 int ret;
928
929 if (vbuf->type != queue->type)
930 return -EINVAL;
931
932 mutex_lock(&queue->lock);
933
934 if (list_empty(&queue->queue)) {
935 ret = -EINVAL;
936 goto done;
937 }
938
939 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
940 ret = isp_video_buffer_wait(buf, nonblocking);
941 if (ret < 0)
942 goto done;
943
944 list_del(&buf->stream);
945
946 isp_video_buffer_query(buf, vbuf);
947 buf->state = ISP_BUF_STATE_IDLE;
948 vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
949
950done:
951 mutex_unlock(&queue->lock);
952 return ret;
953}
954
955/**
956 * omap3isp_video_queue_streamon - Start streaming
957 *
958 * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
959 * starts streaming on the queue and calls the buffer_queue operation for all
960 * queued buffers.
961 *
962 * Return 0 on success.
963 */
964int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
965{
966 struct isp_video_buffer *buf;
967 unsigned long flags;
968
969 mutex_lock(&queue->lock);
970
971 if (queue->streaming)
972 goto done;
973
974 queue->streaming = 1;
975
976 spin_lock_irqsave(&queue->irqlock, flags);
977 list_for_each_entry(buf, &queue->queue, stream)
978 queue->ops->buffer_queue(buf);
979 spin_unlock_irqrestore(&queue->irqlock, flags);
980
981done:
982 mutex_unlock(&queue->lock);
983 return 0;
984}
985
986/**
987 * omap3isp_video_queue_streamoff - Stop streaming
988 *
989 * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
990 * stops streaming on the queue and wakes up all the buffers.
991 *
992 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
993 * delayed works before calling this function to make sure no buffer will be
994 * touched by the driver and/or hardware.
995 */
996void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
997{
998 struct isp_video_buffer *buf;
999 unsigned long flags;
1000 unsigned int i;
1001
1002 mutex_lock(&queue->lock);
1003
1004 if (!queue->streaming)
1005 goto done;
1006
1007 queue->streaming = 0;
1008
1009 spin_lock_irqsave(&queue->irqlock, flags);
1010 for (i = 0; i < queue->count; ++i) {
1011 buf = queue->buffers[i];
1012
1013 if (buf->state == ISP_BUF_STATE_ACTIVE)
1014 wake_up(&buf->wait);
1015
1016 buf->state = ISP_BUF_STATE_IDLE;
1017 }
1018 spin_unlock_irqrestore(&queue->irqlock, flags);
1019
1020 INIT_LIST_HEAD(&queue->queue);
1021
1022done:
1023 mutex_unlock(&queue->lock);
1024}
1025
1026/**
1027 * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
1028 *
1029 * This function is intended to be used with suspend/resume operations. It
1030 * discards all 'done' buffers as they would be too old to be requested after
1031 * resume.
1032 *
1033 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
1034 * delayed works before calling this function to make sure no buffer will be
1035 * touched by the driver and/or hardware.
1036 */
1037void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
1038{
1039 struct isp_video_buffer *buf;
1040 unsigned int i;
1041
1042 mutex_lock(&queue->lock);
1043
1044 if (!queue->streaming)
1045 goto done;
1046
1047 for (i = 0; i < queue->count; ++i) {
1048 buf = queue->buffers[i];
1049
1050 if (buf->state == ISP_BUF_STATE_DONE)
1051 buf->state = ISP_BUF_STATE_ERROR;
1052 }
1053
1054done:
1055 mutex_unlock(&queue->lock);
1056}
1057
1058static void isp_video_queue_vm_open(struct vm_area_struct *vma)
1059{
1060 struct isp_video_buffer *buf = vma->vm_private_data;
1061
1062 buf->vma_use_count++;
1063}
1064
1065static void isp_video_queue_vm_close(struct vm_area_struct *vma)
1066{
1067 struct isp_video_buffer *buf = vma->vm_private_data;
1068
1069 buf->vma_use_count--;
1070}
1071
1072static const struct vm_operations_struct isp_video_queue_vm_ops = {
1073 .open = isp_video_queue_vm_open,
1074 .close = isp_video_queue_vm_close,
1075};
1076
1077/**
1078 * omap3isp_video_queue_mmap - Map buffers to userspace
1079 *
1080 * This function is intended to be used as an mmap() file operation handler. It
1081 * maps a buffer to userspace based on the VMA offset.
1082 *
1083 * Only buffers of memory type MMAP are supported.
1084 */
1085int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
1086 struct vm_area_struct *vma)
1087{
1088 struct isp_video_buffer *uninitialized_var(buf);
1089 unsigned long size;
1090 unsigned int i;
1091 int ret = 0;
1092
1093 mutex_lock(&queue->lock);
1094
1095 for (i = 0; i < queue->count; ++i) {
1096 buf = queue->buffers[i];
1097 if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
1098 break;
1099 }
1100
1101 if (i == queue->count) {
1102 ret = -EINVAL;
1103 goto done;
1104 }
1105
1106 size = vma->vm_end - vma->vm_start;
1107
1108 if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
1109 size != PAGE_ALIGN(buf->vbuf.length)) {
1110 ret = -EINVAL;
1111 goto done;
1112 }
1113
1114 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
1115 if (ret < 0)
1116 goto done;
1117
1118 vma->vm_ops = &isp_video_queue_vm_ops;
1119 vma->vm_private_data = buf;
1120 isp_video_queue_vm_open(vma);
1121
1122done:
1123 mutex_unlock(&queue->lock);
1124 return ret;
1125}
1126
1127/**
1128 * omap3isp_video_queue_poll - Poll video queue state
1129 *
1130 * This function is intended to be used as a poll() file operation handler. It
1131 * polls the state of the video buffer at the front of the queue and returns an
1132 * events mask.
1133 *
1134 * If no buffer is present at the front of the queue, POLLERR is returned.
1135 */
1136unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
1137 struct file *file, poll_table *wait)
1138{
1139 struct isp_video_buffer *buf;
1140 unsigned int mask = 0;
1141
1142 mutex_lock(&queue->lock);
1143 if (list_empty(&queue->queue)) {
1144 mask |= POLLERR;
1145 goto done;
1146 }
1147 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
1148
1149 poll_wait(file, &buf->wait, wait);
1150 if (buf->state == ISP_BUF_STATE_DONE ||
1151 buf->state == ISP_BUF_STATE_ERROR) {
1152 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1153 mask |= POLLIN | POLLRDNORM;
1154 else
1155 mask |= POLLOUT | POLLWRNORM;
1156 }
1157
1158done:
1159 mutex_unlock(&queue->lock);
1160 return mask;
1161}
diff --git a/drivers/media/platform/omap3isp/ispqueue.h b/drivers/media/platform/omap3isp/ispqueue.h
deleted file mode 100644
index 3e048ad65647..000000000000
--- a/drivers/media/platform/omap3isp/ispqueue.h
+++ /dev/null
@@ -1,188 +0,0 @@
1/*
2 * ispqueue.h
3 *
4 * TI OMAP3 ISP - Video buffers queue handling
5 *
6 * Copyright (C) 2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#ifndef OMAP3_ISP_QUEUE_H
27#define OMAP3_ISP_QUEUE_H
28
29#include <linux/kernel.h>
30#include <linux/list.h>
31#include <linux/mm_types.h>
32#include <linux/mutex.h>
33#include <linux/videodev2.h>
34#include <linux/wait.h>
35
36struct isp_video_queue;
37struct page;
38struct scatterlist;
39
40#define ISP_VIDEO_MAX_BUFFERS 16
41
42/**
43 * enum isp_video_buffer_state - ISP video buffer state
44 * @ISP_BUF_STATE_IDLE: The buffer is under userspace control (dequeued
45 * or not queued yet).
46 * @ISP_BUF_STATE_QUEUED: The buffer has been queued but isn't used by the
47 * device yet.
48 * @ISP_BUF_STATE_ACTIVE: The buffer is in use for an active video transfer.
49 * @ISP_BUF_STATE_ERROR: The device is done with the buffer and an error
50 * occurred. For capture device the buffer likely contains corrupted data or
51 * no data at all.
52 * @ISP_BUF_STATE_DONE: The device is done with the buffer and no error occurred.
53 * For capture devices the buffer contains valid data.
54 */
55enum isp_video_buffer_state {
56 ISP_BUF_STATE_IDLE,
57 ISP_BUF_STATE_QUEUED,
58 ISP_BUF_STATE_ACTIVE,
59 ISP_BUF_STATE_ERROR,
60 ISP_BUF_STATE_DONE,
61};
62
63/**
64 * struct isp_video_buffer - ISP video buffer
65 * @vma_use_count: Number of times the buffer is mmap'ed to userspace
66 * @stream: List head for insertion into main queue
67 * @queue: ISP buffers queue this buffer belongs to
68 * @prepared: Whether the buffer has been prepared
69 * @skip_cache: Whether to skip cache management operations for this buffer
70 * @vaddr: Memory virtual address (for kernel buffers)
71 * @vm_flags: Buffer VMA flags (for userspace buffers)
72 * @offset: Offset inside the first page (for userspace buffers)
73 * @npages: Number of pages (for userspace buffers)
74 * @pages: Pages table (for userspace non-VM_PFNMAP buffers)
75 * @paddr: Memory physical address (for userspace VM_PFNMAP buffers)
76 * @sglen: Number of elements in the scatter list (for non-VM_PFNMAP buffers)
77 * @sglist: Scatter list (for non-VM_PFNMAP buffers)
78 * @vbuf: V4L2 buffer
79 * @irqlist: List head for insertion into IRQ queue
80 * @state: Current buffer state
81 * @wait: Wait queue to signal buffer completion
82 */
83struct isp_video_buffer {
84 unsigned long vma_use_count;
85 struct list_head stream;
86 struct isp_video_queue *queue;
87 unsigned int prepared:1;
88 bool skip_cache;
89
90 /* For kernel buffers. */
91 void *vaddr;
92
93 /* For userspace buffers. */
94 vm_flags_t vm_flags;
95 unsigned long offset;
96 unsigned int npages;
97 struct page **pages;
98 dma_addr_t paddr;
99
100 /* For all buffers except VM_PFNMAP. */
101 unsigned int sglen;
102 struct scatterlist *sglist;
103
104 /* Touched by the interrupt handler. */
105 struct v4l2_buffer vbuf;
106 struct list_head irqlist;
107 enum isp_video_buffer_state state;
108 wait_queue_head_t wait;
109};
110
111#define to_isp_video_buffer(vb) container_of(vb, struct isp_video_buffer, vb)
112
113/**
114 * struct isp_video_queue_operations - Driver-specific operations
115 * @queue_prepare: Called before allocating buffers. Drivers should clamp the
116 * number of buffers according to their requirements, and must return the
117 * buffer size in bytes.
118 * @buffer_prepare: Called the first time a buffer is queued, or after changing
119 * the userspace memory address for a USERPTR buffer, with the queue lock
120 * held. Drivers should perform device-specific buffer preparation (such as
121 * mapping the buffer memory in an IOMMU). This operation is optional.
122 * @buffer_queue: Called when a buffer is being added to the queue with the
123 * queue irqlock spinlock held.
124 * @buffer_cleanup: Called before freeing buffers, or before changing the
125 * userspace memory address for a USERPTR buffer, with the queue lock held.
126 * Drivers must perform cleanup operations required to undo the
127 * buffer_prepare call. This operation is optional.
128 */
129struct isp_video_queue_operations {
130 void (*queue_prepare)(struct isp_video_queue *queue,
131 unsigned int *nbuffers, unsigned int *size);
132 int (*buffer_prepare)(struct isp_video_buffer *buf);
133 void (*buffer_queue)(struct isp_video_buffer *buf);
134 void (*buffer_cleanup)(struct isp_video_buffer *buf);
135};
136
137/**
138 * struct isp_video_queue - ISP video buffers queue
139 * @type: Type of video buffers handled by this queue
140 * @ops: Queue operations
141 * @dev: Device used for DMA operations
142 * @bufsize: Size of a driver-specific buffer object
143 * @count: Number of currently allocated buffers
144 * @buffers: ISP video buffers
145 * @lock: Mutex to protect access to the buffers, main queue and state
146 * @irqlock: Spinlock to protect access to the IRQ queue
147 * @streaming: Queue state, indicates whether the queue is streaming
148 * @queue: List of all queued buffers
149 */
150struct isp_video_queue {
151 enum v4l2_buf_type type;
152 const struct isp_video_queue_operations *ops;
153 struct device *dev;
154 unsigned int bufsize;
155
156 unsigned int count;
157 struct isp_video_buffer *buffers[ISP_VIDEO_MAX_BUFFERS];
158 struct mutex lock;
159 spinlock_t irqlock;
160
161 unsigned int streaming:1;
162
163 struct list_head queue;
164};
165
166int omap3isp_video_queue_cleanup(struct isp_video_queue *queue);
167int omap3isp_video_queue_init(struct isp_video_queue *queue,
168 enum v4l2_buf_type type,
169 const struct isp_video_queue_operations *ops,
170 struct device *dev, unsigned int bufsize);
171
172int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
173 struct v4l2_requestbuffers *rb);
174int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
175 struct v4l2_buffer *vbuf);
176int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
177 struct v4l2_buffer *vbuf);
178int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
179 struct v4l2_buffer *vbuf, int nonblocking);
180int omap3isp_video_queue_streamon(struct isp_video_queue *queue);
181void omap3isp_video_queue_streamoff(struct isp_video_queue *queue);
182void omap3isp_video_queue_discard_done(struct isp_video_queue *queue);
183int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
184 struct vm_area_struct *vma);
185unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
186 struct file *file, poll_table *wait);
187
188#endif /* OMAP3_ISP_QUEUE_H */
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 86369df81d74..6f077c2377db 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -1040,7 +1040,7 @@ static void resizer_isr_buffer(struct isp_res_device *res)
1040 */ 1040 */
1041 buffer = omap3isp_video_buffer_next(&res->video_out); 1041 buffer = omap3isp_video_buffer_next(&res->video_out);
1042 if (buffer != NULL) { 1042 if (buffer != NULL) {
1043 resizer_set_outaddr(res, buffer->isp_addr); 1043 resizer_set_outaddr(res, buffer->dma);
1044 restart = 1; 1044 restart = 1;
1045 } 1045 }
1046 1046
@@ -1049,7 +1049,7 @@ static void resizer_isr_buffer(struct isp_res_device *res)
1049 if (res->input == RESIZER_INPUT_MEMORY) { 1049 if (res->input == RESIZER_INPUT_MEMORY) {
1050 buffer = omap3isp_video_buffer_next(&res->video_in); 1050 buffer = omap3isp_video_buffer_next(&res->video_in);
1051 if (buffer != NULL) 1051 if (buffer != NULL)
1052 resizer_set_inaddr(res, buffer->isp_addr); 1052 resizer_set_inaddr(res, buffer->dma);
1053 pipe->state |= ISP_PIPELINE_IDLE_INPUT; 1053 pipe->state |= ISP_PIPELINE_IDLE_INPUT;
1054 } 1054 }
1055 1055
@@ -1101,7 +1101,7 @@ static int resizer_video_queue(struct isp_video *video,
1101 struct isp_res_device *res = &video->isp->isp_res; 1101 struct isp_res_device *res = &video->isp->isp_res;
1102 1102
1103 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1103 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1104 resizer_set_inaddr(res, buffer->isp_addr); 1104 resizer_set_inaddr(res, buffer->dma);
1105 1105
1106 /* 1106 /*
1107 * We now have a buffer queued on the output. Despite what the 1107 * We now have a buffer queued on the output. Despite what the
@@ -1116,7 +1116,7 @@ static int resizer_video_queue(struct isp_video *video,
1116 * continuous mode or when starting the stream. 1116 * continuous mode or when starting the stream.
1117 */ 1117 */
1118 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1118 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1119 resizer_set_outaddr(res, buffer->isp_addr); 1119 resizer_set_outaddr(res, buffer->dma);
1120 1120
1121 return 0; 1121 return 0;
1122} 1122}
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 5707f85c4cc4..e6cbc1eaf4ca 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -26,13 +26,12 @@
26 */ 26 */
27 27
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/omap-iommu.h>
30#include <linux/slab.h> 29#include <linux/slab.h>
31#include <linux/uaccess.h> 30#include <linux/uaccess.h>
32 31
33#include "isp.h" 32#include "isp.h"
34 33
35#define IS_COHERENT_BUF(stat) ((stat)->dma_ch >= 0) 34#define ISP_STAT_USES_DMAENGINE(stat) ((stat)->dma_ch >= 0)
36 35
37/* 36/*
38 * MAGIC_SIZE must always be the greatest common divisor of 37 * MAGIC_SIZE must always be the greatest common divisor of
@@ -77,21 +76,10 @@ static void __isp_stat_buf_sync_magic(struct ispstat *stat,
77 dma_addr_t, unsigned long, size_t, 76 dma_addr_t, unsigned long, size_t,
78 enum dma_data_direction)) 77 enum dma_data_direction))
79{ 78{
80 struct device *dev = stat->isp->dev; 79 /* Sync the initial and final magic words. */
81 struct page *pg; 80 dma_sync(stat->isp->dev, buf->dma_addr, 0, MAGIC_SIZE, dir);
82 dma_addr_t dma_addr; 81 dma_sync(stat->isp->dev, buf->dma_addr + (buf_size & PAGE_MASK),
83 u32 offset; 82 buf_size & ~PAGE_MASK, MAGIC_SIZE, dir);
84
85 /* Initial magic words */
86 pg = vmalloc_to_page(buf->virt_addr);
87 dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
88 dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);
89
90 /* Final magic words */
91 pg = vmalloc_to_page(buf->virt_addr + buf_size);
92 dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
93 offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
94 dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
95} 83}
96 84
97static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, 85static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
@@ -99,7 +87,7 @@ static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
99 u32 buf_size, 87 u32 buf_size,
100 enum dma_data_direction dir) 88 enum dma_data_direction dir)
101{ 89{
102 if (IS_COHERENT_BUF(stat)) 90 if (ISP_STAT_USES_DMAENGINE(stat))
103 return; 91 return;
104 92
105 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, 93 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
@@ -111,7 +99,7 @@ static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat,
111 u32 buf_size, 99 u32 buf_size,
112 enum dma_data_direction dir) 100 enum dma_data_direction dir)
113{ 101{
114 if (IS_COHERENT_BUF(stat)) 102 if (ISP_STAT_USES_DMAENGINE(stat))
115 return; 103 return;
116 104
117 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, 105 __isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
@@ -180,21 +168,21 @@ static void isp_stat_buf_insert_magic(struct ispstat *stat,
180static void isp_stat_buf_sync_for_device(struct ispstat *stat, 168static void isp_stat_buf_sync_for_device(struct ispstat *stat,
181 struct ispstat_buffer *buf) 169 struct ispstat_buffer *buf)
182{ 170{
183 if (IS_COHERENT_BUF(stat)) 171 if (ISP_STAT_USES_DMAENGINE(stat))
184 return; 172 return;
185 173
186 dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl, 174 dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl,
187 buf->iovm->sgt->nents, DMA_FROM_DEVICE); 175 buf->sgt.nents, DMA_FROM_DEVICE);
188} 176}
189 177
190static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, 178static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
191 struct ispstat_buffer *buf) 179 struct ispstat_buffer *buf)
192{ 180{
193 if (IS_COHERENT_BUF(stat)) 181 if (ISP_STAT_USES_DMAENGINE(stat))
194 return; 182 return;
195 183
196 dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl, 184 dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
197 buf->iovm->sgt->nents, DMA_FROM_DEVICE); 185 buf->sgt.nents, DMA_FROM_DEVICE);
198} 186}
199 187
200static void isp_stat_buf_clear(struct ispstat *stat) 188static void isp_stat_buf_clear(struct ispstat *stat)
@@ -354,29 +342,21 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
354 342
355static void isp_stat_bufs_free(struct ispstat *stat) 343static void isp_stat_bufs_free(struct ispstat *stat)
356{ 344{
357 struct isp_device *isp = stat->isp; 345 struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
358 int i; 346 ? NULL : stat->isp->dev;
347 unsigned int i;
359 348
360 for (i = 0; i < STAT_MAX_BUFS; i++) { 349 for (i = 0; i < STAT_MAX_BUFS; i++) {
361 struct ispstat_buffer *buf = &stat->buf[i]; 350 struct ispstat_buffer *buf = &stat->buf[i];
362 351
363 if (!IS_COHERENT_BUF(stat)) { 352 if (!buf->virt_addr)
364 if (IS_ERR_OR_NULL((void *)buf->iommu_addr)) 353 continue;
365 continue; 354
366 if (buf->iovm) 355 sg_free_table(&buf->sgt);
367 dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, 356
368 buf->iovm->sgt->nents, 357 dma_free_coherent(dev, stat->buf_alloc_size, buf->virt_addr,
369 DMA_FROM_DEVICE); 358 buf->dma_addr);
370 omap_iommu_vfree(isp->domain, isp->dev, 359
371 buf->iommu_addr);
372 } else {
373 if (!buf->virt_addr)
374 continue;
375 dma_free_coherent(stat->isp->dev, stat->buf_alloc_size,
376 buf->virt_addr, buf->dma_addr);
377 }
378 buf->iommu_addr = 0;
379 buf->iovm = NULL;
380 buf->dma_addr = 0; 360 buf->dma_addr = 0;
381 buf->virt_addr = NULL; 361 buf->virt_addr = NULL;
382 buf->empty = 1; 362 buf->empty = 1;
@@ -389,83 +369,51 @@ static void isp_stat_bufs_free(struct ispstat *stat)
389 stat->active_buf = NULL; 369 stat->active_buf = NULL;
390} 370}
391 371
392static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) 372static int isp_stat_bufs_alloc_one(struct device *dev,
393{ 373 struct ispstat_buffer *buf,
394 struct isp_device *isp = stat->isp; 374 unsigned int size)
395 int i;
396
397 stat->buf_alloc_size = size;
398
399 for (i = 0; i < STAT_MAX_BUFS; i++) {
400 struct ispstat_buffer *buf = &stat->buf[i];
401 struct iovm_struct *iovm;
402
403 WARN_ON(buf->dma_addr);
404 buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
405 size, IOMMU_FLAG);
406 if (IS_ERR((void *)buf->iommu_addr)) {
407 dev_err(stat->isp->dev,
408 "%s: Can't acquire memory for "
409 "buffer %d\n", stat->subdev.name, i);
410 isp_stat_bufs_free(stat);
411 return -ENOMEM;
412 }
413
414 iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr);
415 if (!iovm ||
416 !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
417 DMA_FROM_DEVICE)) {
418 isp_stat_bufs_free(stat);
419 return -ENOMEM;
420 }
421 buf->iovm = iovm;
422
423 buf->virt_addr = omap_da_to_va(stat->isp->dev,
424 (u32)buf->iommu_addr);
425 buf->empty = 1;
426 dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
427 "iommu_addr=0x%08lx virt_addr=0x%08lx",
428 stat->subdev.name, i, buf->iommu_addr,
429 (unsigned long)buf->virt_addr);
430 }
431
432 return 0;
433}
434
435static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size)
436{ 375{
437 int i; 376 int ret;
438
439 stat->buf_alloc_size = size;
440
441 for (i = 0; i < STAT_MAX_BUFS; i++) {
442 struct ispstat_buffer *buf = &stat->buf[i];
443
444 WARN_ON(buf->iommu_addr);
445 buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size,
446 &buf->dma_addr, GFP_KERNEL | GFP_DMA);
447 377
448 if (!buf->virt_addr || !buf->dma_addr) { 378 buf->virt_addr = dma_alloc_coherent(dev, size, &buf->dma_addr,
449 dev_info(stat->isp->dev, 379 GFP_KERNEL | GFP_DMA);
450 "%s: Can't acquire memory for " 380 if (!buf->virt_addr)
451 "DMA buffer %d\n", stat->subdev.name, i); 381 return -ENOMEM;
452 isp_stat_bufs_free(stat);
453 return -ENOMEM;
454 }
455 buf->empty = 1;
456 382
457 dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." 383 ret = dma_get_sgtable(dev, &buf->sgt, buf->virt_addr, buf->dma_addr,
458 "dma_addr=0x%08lx virt_addr=0x%08lx\n", 384 size);
459 stat->subdev.name, i, (unsigned long)buf->dma_addr, 385 if (ret < 0) {
460 (unsigned long)buf->virt_addr); 386 dma_free_coherent(dev, size, buf->virt_addr, buf->dma_addr);
387 buf->virt_addr = NULL;
388 buf->dma_addr = 0;
389 return ret;
461 } 390 }
462 391
463 return 0; 392 return 0;
464} 393}
465 394
395/*
396 * The device passed to the DMA API depends on whether the statistics block uses
397 * ISP DMA, external DMA or PIO to transfer data.
398 *
399 * The first case (for the AEWB and AF engines) passes the ISP device, resulting
400 * in the DMA buffers being mapped through the ISP IOMMU.
401 *
402 * The second case (for the histogram engine) should pass the DMA engine device.
403 * As that device isn't accessible through the OMAP DMA engine API the driver
404 * passes NULL instead, resulting in the buffers being mapped directly as
405 * physical pages.
406 *
407 * The third case (for the histogram engine) doesn't require any mapping. The
408 * buffers could be allocated with kmalloc/vmalloc, but we still use
409 * dma_alloc_coherent() for consistency purpose.
410 */
466static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) 411static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
467{ 412{
413 struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
414 ? NULL : stat->isp->dev;
468 unsigned long flags; 415 unsigned long flags;
416 unsigned int i;
469 417
470 spin_lock_irqsave(&stat->isp->stat_lock, flags); 418 spin_lock_irqsave(&stat->isp->stat_lock, flags);
471 419
@@ -489,10 +437,31 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
489 437
490 isp_stat_bufs_free(stat); 438 isp_stat_bufs_free(stat);
491 439
492 if (IS_COHERENT_BUF(stat)) 440 stat->buf_alloc_size = size;
493 return isp_stat_bufs_alloc_dma(stat, size); 441
494 else 442 for (i = 0; i < STAT_MAX_BUFS; i++) {
495 return isp_stat_bufs_alloc_iommu(stat, size); 443 struct ispstat_buffer *buf = &stat->buf[i];
444 int ret;
445
446 ret = isp_stat_bufs_alloc_one(dev, buf, size);
447 if (ret < 0) {
448 dev_err(stat->isp->dev,
449 "%s: Failed to allocate DMA buffer %u\n",
450 stat->subdev.name, i);
451 isp_stat_bufs_free(stat);
452 return ret;
453 }
454
455 buf->empty = 1;
456
457 dev_dbg(stat->isp->dev,
458 "%s: buffer[%u] allocated. dma=0x%08lx virt=0x%08lx",
459 stat->subdev.name, i,
460 (unsigned long)buf->dma_addr,
461 (unsigned long)buf->virt_addr);
462 }
463
464 return 0;
496} 465}
497 466
498static void isp_stat_queue_event(struct ispstat *stat, int err) 467static void isp_stat_queue_event(struct ispstat *stat, int err)
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h
index 9a047c929b9f..58d6ac7cb664 100644
--- a/drivers/media/platform/omap3isp/ispstat.h
+++ b/drivers/media/platform/omap3isp/ispstat.h
@@ -46,8 +46,7 @@
46struct ispstat; 46struct ispstat;
47 47
48struct ispstat_buffer { 48struct ispstat_buffer {
49 unsigned long iommu_addr; 49 struct sg_table sgt;
50 struct iovm_struct *iovm;
51 void *virt_addr; 50 void *virt_addr;
52 dma_addr_t dma_addr; 51 dma_addr_t dma_addr;
53 struct timespec ts; 52 struct timespec ts;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 85b4036ba5e4..e36bac26476c 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -27,7 +27,6 @@
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/omap-iommu.h>
31#include <linux/pagemap.h> 30#include <linux/pagemap.h>
32#include <linux/scatterlist.h> 31#include <linux/scatterlist.h>
33#include <linux/sched.h> 32#include <linux/sched.h>
@@ -35,6 +34,7 @@
35#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
36#include <media/v4l2-dev.h> 35#include <media/v4l2-dev.h>
37#include <media/v4l2-ioctl.h> 36#include <media/v4l2-ioctl.h>
37#include <media/videobuf2-dma-contig.h>
38 38
39#include "ispvideo.h" 39#include "ispvideo.h"
40#include "isp.h" 40#include "isp.h"
@@ -326,90 +326,36 @@ isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
326} 326}
327 327
328/* ----------------------------------------------------------------------------- 328/* -----------------------------------------------------------------------------
329 * IOMMU management
330 */
331
332#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
333
334/*
335 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
336 * @isp: Device pointer specific to the OMAP3 ISP.
337 * @sglist: Pointer to source Scatter gather list to allocate.
338 * @sglen: Number of elements of the scatter-gatter list.
339 *
340 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
341 * we ran out of memory.
342 */
343static dma_addr_t
344ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
345{
346 struct sg_table *sgt;
347 u32 da;
348
349 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
350 if (sgt == NULL)
351 return -ENOMEM;
352
353 sgt->sgl = (struct scatterlist *)sglist;
354 sgt->nents = sglen;
355 sgt->orig_nents = sglen;
356
357 da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
358 if (IS_ERR_VALUE(da))
359 kfree(sgt);
360
361 return da;
362}
363
364/*
365 * ispmmu_vunmap - Unmap a device address from the ISP MMU
366 * @isp: Device pointer specific to the OMAP3 ISP.
367 * @da: Device address generated from a ispmmu_vmap call.
368 */
369static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
370{
371 struct sg_table *sgt;
372
373 sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
374 kfree(sgt);
375}
376
377/* -----------------------------------------------------------------------------
378 * Video queue operations 329 * Video queue operations
379 */ 330 */
380 331
381static void isp_video_queue_prepare(struct isp_video_queue *queue, 332static int isp_video_queue_setup(struct vb2_queue *queue,
382 unsigned int *nbuffers, unsigned int *size) 333 const struct v4l2_format *fmt,
334 unsigned int *count, unsigned int *num_planes,
335 unsigned int sizes[], void *alloc_ctxs[])
383{ 336{
384 struct isp_video_fh *vfh = 337 struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
385 container_of(queue, struct isp_video_fh, queue);
386 struct isp_video *video = vfh->video; 338 struct isp_video *video = vfh->video;
387 339
388 *size = vfh->format.fmt.pix.sizeimage; 340 *num_planes = 1;
389 if (*size == 0)
390 return;
391 341
392 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size)); 342 sizes[0] = vfh->format.fmt.pix.sizeimage;
393} 343 if (sizes[0] == 0)
344 return -EINVAL;
394 345
395static void isp_video_buffer_cleanup(struct isp_video_buffer *buf) 346 alloc_ctxs[0] = video->alloc_ctx;
396{
397 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
398 struct isp_buffer *buffer = to_isp_buffer(buf);
399 struct isp_video *video = vfh->video;
400 347
401 if (buffer->isp_addr) { 348 *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
402 ispmmu_vunmap(video->isp, buffer->isp_addr); 349
403 buffer->isp_addr = 0; 350 return 0;
404 }
405} 351}
406 352
407static int isp_video_buffer_prepare(struct isp_video_buffer *buf) 353static int isp_video_buffer_prepare(struct vb2_buffer *buf)
408{ 354{
409 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue); 355 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
410 struct isp_buffer *buffer = to_isp_buffer(buf); 356 struct isp_buffer *buffer = to_isp_buffer(buf);
411 struct isp_video *video = vfh->video; 357 struct isp_video *video = vfh->video;
412 unsigned long addr; 358 dma_addr_t addr;
413 359
414 /* Refuse to prepare the buffer is the video node has registered an 360 /* Refuse to prepare the buffer is the video node has registered an
415 * error. We don't need to take any lock here as the operation is 361 * error. We don't need to take any lock here as the operation is
@@ -420,19 +366,16 @@ static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
420 if (unlikely(video->error)) 366 if (unlikely(video->error))
421 return -EIO; 367 return -EIO;
422 368
423 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen); 369 addr = vb2_dma_contig_plane_dma_addr(buf, 0);
424 if (IS_ERR_VALUE(addr))
425 return -EIO;
426
427 if (!IS_ALIGNED(addr, 32)) { 370 if (!IS_ALIGNED(addr, 32)) {
428 dev_dbg(video->isp->dev, "Buffer address must be " 371 dev_dbg(video->isp->dev,
429 "aligned to 32 bytes boundary.\n"); 372 "Buffer address must be aligned to 32 bytes boundary.\n");
430 ispmmu_vunmap(video->isp, buffer->isp_addr);
431 return -EINVAL; 373 return -EINVAL;
432 } 374 }
433 375
434 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage; 376 vb2_set_plane_payload(&buffer->vb, 0, vfh->format.fmt.pix.sizeimage);
435 buffer->isp_addr = addr; 377 buffer->dma = addr;
378
436 return 0; 379 return 0;
437} 380}
438 381
@@ -445,9 +388,9 @@ static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
445 * If the pipeline is busy, it will be restarted in the output module interrupt 388 * If the pipeline is busy, it will be restarted in the output module interrupt
446 * handler. 389 * handler.
447 */ 390 */
448static void isp_video_buffer_queue(struct isp_video_buffer *buf) 391static void isp_video_buffer_queue(struct vb2_buffer *buf)
449{ 392{
450 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue); 393 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
451 struct isp_buffer *buffer = to_isp_buffer(buf); 394 struct isp_buffer *buffer = to_isp_buffer(buf);
452 struct isp_video *video = vfh->video; 395 struct isp_video *video = vfh->video;
453 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 396 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
@@ -456,14 +399,18 @@ static void isp_video_buffer_queue(struct isp_video_buffer *buf)
456 unsigned int empty; 399 unsigned int empty;
457 unsigned int start; 400 unsigned int start;
458 401
402 spin_lock_irqsave(&video->irqlock, flags);
403
459 if (unlikely(video->error)) { 404 if (unlikely(video->error)) {
460 buf->state = ISP_BUF_STATE_ERROR; 405 vb2_buffer_done(&buffer->vb, VB2_BUF_STATE_ERROR);
461 wake_up(&buf->wait); 406 spin_unlock_irqrestore(&video->irqlock, flags);
462 return; 407 return;
463 } 408 }
464 409
465 empty = list_empty(&video->dmaqueue); 410 empty = list_empty(&video->dmaqueue);
466 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue); 411 list_add_tail(&buffer->irqlist, &video->dmaqueue);
412
413 spin_unlock_irqrestore(&video->irqlock, flags);
467 414
468 if (empty) { 415 if (empty) {
469 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 416 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -487,23 +434,22 @@ static void isp_video_buffer_queue(struct isp_video_buffer *buf)
487 } 434 }
488} 435}
489 436
490static const struct isp_video_queue_operations isp_video_queue_ops = { 437static const struct vb2_ops isp_video_queue_ops = {
491 .queue_prepare = &isp_video_queue_prepare, 438 .queue_setup = isp_video_queue_setup,
492 .buffer_prepare = &isp_video_buffer_prepare, 439 .buf_prepare = isp_video_buffer_prepare,
493 .buffer_queue = &isp_video_buffer_queue, 440 .buf_queue = isp_video_buffer_queue,
494 .buffer_cleanup = &isp_video_buffer_cleanup,
495}; 441};
496 442
497/* 443/*
498 * omap3isp_video_buffer_next - Complete the current buffer and return the next 444 * omap3isp_video_buffer_next - Complete the current buffer and return the next
499 * @video: ISP video object 445 * @video: ISP video object
500 * 446 *
501 * Remove the current video buffer from the DMA queue and fill its timestamp, 447 * Remove the current video buffer from the DMA queue and fill its timestamp and
502 * field count and state fields before waking up its completion handler. 448 * field count before handing it back to videobuf2.
503 * 449 *
504 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no 450 * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no
505 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise. 451 * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
506 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE. 452 * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE.
507 * 453 *
508 * The DMA queue is expected to contain at least one buffer. 454 * The DMA queue is expected to contain at least one buffer.
509 * 455 *
@@ -513,26 +459,25 @@ static const struct isp_video_queue_operations isp_video_queue_ops = {
513struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) 459struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
514{ 460{
515 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 461 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
516 struct isp_video_queue *queue = video->queue;
517 enum isp_pipeline_state state; 462 enum isp_pipeline_state state;
518 struct isp_video_buffer *buf; 463 struct isp_buffer *buf;
519 unsigned long flags; 464 unsigned long flags;
520 struct timespec ts; 465 struct timespec ts;
521 466
522 spin_lock_irqsave(&queue->irqlock, flags); 467 spin_lock_irqsave(&video->irqlock, flags);
523 if (WARN_ON(list_empty(&video->dmaqueue))) { 468 if (WARN_ON(list_empty(&video->dmaqueue))) {
524 spin_unlock_irqrestore(&queue->irqlock, flags); 469 spin_unlock_irqrestore(&video->irqlock, flags);
525 return NULL; 470 return NULL;
526 } 471 }
527 472
528 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer, 473 buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
529 irqlist); 474 irqlist);
530 list_del(&buf->irqlist); 475 list_del(&buf->irqlist);
531 spin_unlock_irqrestore(&queue->irqlock, flags); 476 spin_unlock_irqrestore(&video->irqlock, flags);
532 477
533 ktime_get_ts(&ts); 478 ktime_get_ts(&ts);
534 buf->vbuf.timestamp.tv_sec = ts.tv_sec; 479 buf->vb.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
535 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC; 480 buf->vb.v4l2_buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
536 481
537 /* Do frame number propagation only if this is the output video node. 482 /* Do frame number propagation only if this is the output video node.
538 * Frame number either comes from the CSI receivers or it gets 483 * Frame number either comes from the CSI receivers or it gets
@@ -541,22 +486,27 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
541 * first, so the input number might lag behind by 1 in some cases. 486 * first, so the input number might lag behind by 1 in some cases.
542 */ 487 */
543 if (video == pipe->output && !pipe->do_propagation) 488 if (video == pipe->output && !pipe->do_propagation)
544 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number); 489 buf->vb.v4l2_buf.sequence =
490 atomic_inc_return(&pipe->frame_number);
545 else 491 else
546 buf->vbuf.sequence = atomic_read(&pipe->frame_number); 492 buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
547 493
548 /* Report pipeline errors to userspace on the capture device side. */ 494 /* Report pipeline errors to userspace on the capture device side. */
549 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) { 495 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
550 buf->state = ISP_BUF_STATE_ERROR; 496 state = VB2_BUF_STATE_ERROR;
551 pipe->error = false; 497 pipe->error = false;
552 } else { 498 } else {
553 buf->state = ISP_BUF_STATE_DONE; 499 state = VB2_BUF_STATE_DONE;
554 } 500 }
555 501
556 wake_up(&buf->wait); 502 vb2_buffer_done(&buf->vb, state);
503
504 spin_lock_irqsave(&video->irqlock, flags);
557 505
558 if (list_empty(&video->dmaqueue)) { 506 if (list_empty(&video->dmaqueue)) {
559 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 507 spin_unlock_irqrestore(&video->irqlock, flags);
508
509 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
560 state = ISP_PIPELINE_QUEUE_OUTPUT 510 state = ISP_PIPELINE_QUEUE_OUTPUT
561 | ISP_PIPELINE_STREAM; 511 | ISP_PIPELINE_STREAM;
562 else 512 else
@@ -571,16 +521,19 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
571 return NULL; 521 return NULL;
572 } 522 }
573 523
574 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) { 524 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
575 spin_lock_irqsave(&pipe->lock, flags); 525 spin_lock(&pipe->lock);
576 pipe->state &= ~ISP_PIPELINE_STREAM; 526 pipe->state &= ~ISP_PIPELINE_STREAM;
577 spin_unlock_irqrestore(&pipe->lock, flags); 527 spin_unlock(&pipe->lock);
578 } 528 }
579 529
580 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer, 530 buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
581 irqlist); 531 irqlist);
582 buf->state = ISP_BUF_STATE_ACTIVE; 532 buf->vb.state = VB2_BUF_STATE_ACTIVE;
583 return to_isp_buffer(buf); 533
534 spin_unlock_irqrestore(&video->irqlock, flags);
535
536 return buf;
584} 537}
585 538
586/* 539/*
@@ -592,25 +545,22 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
592 */ 545 */
593void omap3isp_video_cancel_stream(struct isp_video *video) 546void omap3isp_video_cancel_stream(struct isp_video *video)
594{ 547{
595 struct isp_video_queue *queue = video->queue;
596 unsigned long flags; 548 unsigned long flags;
597 549
598 spin_lock_irqsave(&queue->irqlock, flags); 550 spin_lock_irqsave(&video->irqlock, flags);
599 551
600 while (!list_empty(&video->dmaqueue)) { 552 while (!list_empty(&video->dmaqueue)) {
601 struct isp_video_buffer *buf; 553 struct isp_buffer *buf;
602 554
603 buf = list_first_entry(&video->dmaqueue, 555 buf = list_first_entry(&video->dmaqueue,
604 struct isp_video_buffer, irqlist); 556 struct isp_buffer, irqlist);
605 list_del(&buf->irqlist); 557 list_del(&buf->irqlist);
606 558 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
607 buf->state = ISP_BUF_STATE_ERROR;
608 wake_up(&buf->wait);
609 } 559 }
610 560
611 video->error = true; 561 video->error = true;
612 562
613 spin_unlock_irqrestore(&queue->irqlock, flags); 563 spin_unlock_irqrestore(&video->irqlock, flags);
614} 564}
615 565
616/* 566/*
@@ -627,12 +577,15 @@ void omap3isp_video_resume(struct isp_video *video, int continuous)
627{ 577{
628 struct isp_buffer *buf = NULL; 578 struct isp_buffer *buf = NULL;
629 579
630 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 580 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
631 omap3isp_video_queue_discard_done(video->queue); 581 mutex_lock(&video->queue_lock);
582 vb2_discard_done(video->queue);
583 mutex_unlock(&video->queue_lock);
584 }
632 585
633 if (!list_empty(&video->dmaqueue)) { 586 if (!list_empty(&video->dmaqueue)) {
634 buf = list_first_entry(&video->dmaqueue, 587 buf = list_first_entry(&video->dmaqueue,
635 struct isp_buffer, buffer.irqlist); 588 struct isp_buffer, irqlist);
636 video->ops->queue(video, buf); 589 video->ops->queue(video, buf);
637 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 590 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
638 } else { 591 } else {
@@ -840,33 +793,56 @@ static int
840isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) 793isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
841{ 794{
842 struct isp_video_fh *vfh = to_isp_video_fh(fh); 795 struct isp_video_fh *vfh = to_isp_video_fh(fh);
796 struct isp_video *video = video_drvdata(file);
797 int ret;
843 798
844 return omap3isp_video_queue_reqbufs(&vfh->queue, rb); 799 mutex_lock(&video->queue_lock);
800 ret = vb2_reqbufs(&vfh->queue, rb);
801 mutex_unlock(&video->queue_lock);
802
803 return ret;
845} 804}
846 805
847static int 806static int
848isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) 807isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
849{ 808{
850 struct isp_video_fh *vfh = to_isp_video_fh(fh); 809 struct isp_video_fh *vfh = to_isp_video_fh(fh);
810 struct isp_video *video = video_drvdata(file);
811 int ret;
812
813 mutex_lock(&video->queue_lock);
814 ret = vb2_querybuf(&vfh->queue, b);
815 mutex_unlock(&video->queue_lock);
851 816
852 return omap3isp_video_queue_querybuf(&vfh->queue, b); 817 return ret;
853} 818}
854 819
855static int 820static int
856isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) 821isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
857{ 822{
858 struct isp_video_fh *vfh = to_isp_video_fh(fh); 823 struct isp_video_fh *vfh = to_isp_video_fh(fh);
824 struct isp_video *video = video_drvdata(file);
825 int ret;
859 826
860 return omap3isp_video_queue_qbuf(&vfh->queue, b); 827 mutex_lock(&video->queue_lock);
828 ret = vb2_qbuf(&vfh->queue, b);
829 mutex_unlock(&video->queue_lock);
830
831 return ret;
861} 832}
862 833
863static int 834static int
864isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) 835isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
865{ 836{
866 struct isp_video_fh *vfh = to_isp_video_fh(fh); 837 struct isp_video_fh *vfh = to_isp_video_fh(fh);
838 struct isp_video *video = video_drvdata(file);
839 int ret;
840
841 mutex_lock(&video->queue_lock);
842 ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
843 mutex_unlock(&video->queue_lock);
867 844
868 return omap3isp_video_queue_dqbuf(&vfh->queue, b, 845 return ret;
869 file->f_flags & O_NONBLOCK);
870} 846}
871 847
872static int isp_video_check_external_subdevs(struct isp_video *video, 848static int isp_video_check_external_subdevs(struct isp_video *video,
@@ -1006,11 +982,6 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1006 982
1007 mutex_lock(&video->stream_lock); 983 mutex_lock(&video->stream_lock);
1008 984
1009 if (video->streaming) {
1010 mutex_unlock(&video->stream_lock);
1011 return -EBUSY;
1012 }
1013
1014 /* Start streaming on the pipeline. No link touching an entity in the 985 /* Start streaming on the pipeline. No link touching an entity in the
1015 * pipeline can be activated or deactivated once streaming is started. 986 * pipeline can be activated or deactivated once streaming is started.
1016 */ 987 */
@@ -1069,7 +1040,9 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1069 INIT_LIST_HEAD(&video->dmaqueue); 1040 INIT_LIST_HEAD(&video->dmaqueue);
1070 atomic_set(&pipe->frame_number, -1); 1041 atomic_set(&pipe->frame_number, -1);
1071 1042
1072 ret = omap3isp_video_queue_streamon(&vfh->queue); 1043 mutex_lock(&video->queue_lock);
1044 ret = vb2_streamon(&vfh->queue, type);
1045 mutex_unlock(&video->queue_lock);
1073 if (ret < 0) 1046 if (ret < 0)
1074 goto err_check_format; 1047 goto err_check_format;
1075 1048
@@ -1082,19 +1055,19 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1082 ISP_PIPELINE_STREAM_CONTINUOUS); 1055 ISP_PIPELINE_STREAM_CONTINUOUS);
1083 if (ret < 0) 1056 if (ret < 0)
1084 goto err_set_stream; 1057 goto err_set_stream;
1085 spin_lock_irqsave(&video->queue->irqlock, flags); 1058 spin_lock_irqsave(&video->irqlock, flags);
1086 if (list_empty(&video->dmaqueue)) 1059 if (list_empty(&video->dmaqueue))
1087 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 1060 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1088 spin_unlock_irqrestore(&video->queue->irqlock, flags); 1061 spin_unlock_irqrestore(&video->irqlock, flags);
1089 } 1062 }
1090 1063
1091 video->streaming = 1;
1092
1093 mutex_unlock(&video->stream_lock); 1064 mutex_unlock(&video->stream_lock);
1094 return 0; 1065 return 0;
1095 1066
1096err_set_stream: 1067err_set_stream:
1097 omap3isp_video_queue_streamoff(&vfh->queue); 1068 mutex_lock(&video->queue_lock);
1069 vb2_streamoff(&vfh->queue, type);
1070 mutex_unlock(&video->queue_lock);
1098err_check_format: 1071err_check_format:
1099 media_entity_pipeline_stop(&video->video.entity); 1072 media_entity_pipeline_stop(&video->video.entity);
1100err_pipeline_start: 1073err_pipeline_start:
@@ -1130,9 +1103,9 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1130 mutex_lock(&video->stream_lock); 1103 mutex_lock(&video->stream_lock);
1131 1104
1132 /* Make sure we're not streaming yet. */ 1105 /* Make sure we're not streaming yet. */
1133 mutex_lock(&vfh->queue.lock); 1106 mutex_lock(&video->queue_lock);
1134 streaming = vfh->queue.streaming; 1107 streaming = vb2_is_streaming(&vfh->queue);
1135 mutex_unlock(&vfh->queue.lock); 1108 mutex_unlock(&video->queue_lock);
1136 1109
1137 if (!streaming) 1110 if (!streaming)
1138 goto done; 1111 goto done;
@@ -1151,9 +1124,12 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1151 1124
1152 /* Stop the stream. */ 1125 /* Stop the stream. */
1153 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED); 1126 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1154 omap3isp_video_queue_streamoff(&vfh->queue); 1127 omap3isp_video_cancel_stream(video);
1128
1129 mutex_lock(&video->queue_lock);
1130 vb2_streamoff(&vfh->queue, type);
1131 mutex_unlock(&video->queue_lock);
1155 video->queue = NULL; 1132 video->queue = NULL;
1156 video->streaming = 0;
1157 video->error = false; 1133 video->error = false;
1158 1134
1159 if (video->isp->pdata->set_constraints) 1135 if (video->isp->pdata->set_constraints)
@@ -1223,6 +1199,7 @@ static int isp_video_open(struct file *file)
1223{ 1199{
1224 struct isp_video *video = video_drvdata(file); 1200 struct isp_video *video = video_drvdata(file);
1225 struct isp_video_fh *handle; 1201 struct isp_video_fh *handle;
1202 struct vb2_queue *queue;
1226 int ret = 0; 1203 int ret = 0;
1227 1204
1228 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 1205 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
@@ -1244,9 +1221,20 @@ static int isp_video_open(struct file *file)
1244 goto done; 1221 goto done;
1245 } 1222 }
1246 1223
1247 omap3isp_video_queue_init(&handle->queue, video->type, 1224 queue = &handle->queue;
1248 &isp_video_queue_ops, video->isp->dev, 1225 queue->type = video->type;
1249 sizeof(struct isp_buffer)); 1226 queue->io_modes = VB2_MMAP | VB2_USERPTR;
1227 queue->drv_priv = handle;
1228 queue->ops = &isp_video_queue_ops;
1229 queue->mem_ops = &vb2_dma_contig_memops;
1230 queue->buf_struct_size = sizeof(struct isp_buffer);
1231 queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1232
1233 ret = vb2_queue_init(&handle->queue);
1234 if (ret < 0) {
1235 omap3isp_put(video->isp);
1236 goto done;
1237 }
1250 1238
1251 memset(&handle->format, 0, sizeof(handle->format)); 1239 memset(&handle->format, 0, sizeof(handle->format));
1252 handle->format.type = video->type; 1240 handle->format.type = video->type;
@@ -1273,9 +1261,9 @@ static int isp_video_release(struct file *file)
1273 /* Disable streaming and free the buffers queue resources. */ 1261 /* Disable streaming and free the buffers queue resources. */
1274 isp_video_streamoff(file, vfh, video->type); 1262 isp_video_streamoff(file, vfh, video->type);
1275 1263
1276 mutex_lock(&handle->queue.lock); 1264 mutex_lock(&video->queue_lock);
1277 omap3isp_video_queue_cleanup(&handle->queue); 1265 vb2_queue_release(&handle->queue);
1278 mutex_unlock(&handle->queue.lock); 1266 mutex_unlock(&video->queue_lock);
1279 1267
1280 omap3isp_pipeline_pm_use(&video->video.entity, 0); 1268 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1281 1269
@@ -1292,16 +1280,27 @@ static int isp_video_release(struct file *file)
1292static unsigned int isp_video_poll(struct file *file, poll_table *wait) 1280static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1293{ 1281{
1294 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1282 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1295 struct isp_video_queue *queue = &vfh->queue; 1283 struct isp_video *video = video_drvdata(file);
1284 int ret;
1296 1285
1297 return omap3isp_video_queue_poll(queue, file, wait); 1286 mutex_lock(&video->queue_lock);
1287 ret = vb2_poll(&vfh->queue, file, wait);
1288 mutex_unlock(&video->queue_lock);
1289
1290 return ret;
1298} 1291}
1299 1292
1300static int isp_video_mmap(struct file *file, struct vm_area_struct *vma) 1293static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1301{ 1294{
1302 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1295 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1296 struct isp_video *video = video_drvdata(file);
1297 int ret;
1298
1299 mutex_lock(&video->queue_lock);
1300 ret = vb2_mmap(&vfh->queue, vma);
1301 mutex_unlock(&video->queue_lock);
1303 1302
1304 return omap3isp_video_queue_mmap(&vfh->queue, vma); 1303 return ret;
1305} 1304}
1306 1305
1307static struct v4l2_file_operations isp_video_fops = { 1306static struct v4l2_file_operations isp_video_fops = {
@@ -1342,15 +1341,23 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
1342 return -EINVAL; 1341 return -EINVAL;
1343 } 1342 }
1344 1343
1344 video->alloc_ctx = vb2_dma_contig_init_ctx(video->isp->dev);
1345 if (IS_ERR(video->alloc_ctx))
1346 return PTR_ERR(video->alloc_ctx);
1347
1345 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0); 1348 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1346 if (ret < 0) 1349 if (ret < 0) {
1350 vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
1347 return ret; 1351 return ret;
1352 }
1348 1353
1349 mutex_init(&video->mutex); 1354 mutex_init(&video->mutex);
1350 atomic_set(&video->active, 0); 1355 atomic_set(&video->active, 0);
1351 1356
1352 spin_lock_init(&video->pipe.lock); 1357 spin_lock_init(&video->pipe.lock);
1353 mutex_init(&video->stream_lock); 1358 mutex_init(&video->stream_lock);
1359 mutex_init(&video->queue_lock);
1360 spin_lock_init(&video->irqlock);
1354 1361
1355 /* Initialize the video device. */ 1362 /* Initialize the video device. */
1356 if (video->ops == NULL) 1363 if (video->ops == NULL)
@@ -1371,7 +1378,9 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
1371 1378
1372void omap3isp_video_cleanup(struct isp_video *video) 1379void omap3isp_video_cleanup(struct isp_video *video)
1373{ 1380{
1381 vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
1374 media_entity_cleanup(&video->video.entity); 1382 media_entity_cleanup(&video->video.entity);
1383 mutex_destroy(&video->queue_lock);
1375 mutex_destroy(&video->stream_lock); 1384 mutex_destroy(&video->stream_lock);
1376 mutex_destroy(&video->mutex); 1385 mutex_destroy(&video->mutex);
1377} 1386}
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 4e194076cc60..7d2e82122ecd 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -30,8 +30,7 @@
30#include <media/media-entity.h> 30#include <media/media-entity.h>
31#include <media/v4l2-dev.h> 31#include <media/v4l2-dev.h>
32#include <media/v4l2-fh.h> 32#include <media/v4l2-fh.h>
33 33#include <media/videobuf2-core.h>
34#include "ispqueue.h"
35 34
36#define ISP_VIDEO_DRIVER_NAME "ispvideo" 35#define ISP_VIDEO_DRIVER_NAME "ispvideo"
37#define ISP_VIDEO_DRIVER_VERSION "0.0.2" 36#define ISP_VIDEO_DRIVER_VERSION "0.0.2"
@@ -124,17 +123,19 @@ static inline int isp_pipeline_ready(struct isp_pipeline *pipe)
124 ISP_PIPELINE_IDLE_OUTPUT); 123 ISP_PIPELINE_IDLE_OUTPUT);
125} 124}
126 125
127/* 126/**
128 * struct isp_buffer - ISP buffer 127 * struct isp_buffer - ISP video buffer
129 * @buffer: ISP video buffer 128 * @vb: videobuf2 buffer
130 * @isp_addr: MMU mapped address (a.k.a. device address) of the buffer. 129 * @irqlist: List head for insertion into IRQ queue
130 * @dma: DMA address
131 */ 131 */
132struct isp_buffer { 132struct isp_buffer {
133 struct isp_video_buffer buffer; 133 struct vb2_buffer vb;
134 dma_addr_t isp_addr; 134 struct list_head irqlist;
135 dma_addr_t dma;
135}; 136};
136 137
137#define to_isp_buffer(buf) container_of(buf, struct isp_buffer, buffer) 138#define to_isp_buffer(buf) container_of(buf, struct isp_buffer, vb)
138 139
139enum isp_video_dmaqueue_flags { 140enum isp_video_dmaqueue_flags {
140 /* Set if DMA queue becomes empty when ISP_PIPELINE_STREAM_CONTINUOUS */ 141 /* Set if DMA queue becomes empty when ISP_PIPELINE_STREAM_CONTINUOUS */
@@ -172,16 +173,16 @@ struct isp_video {
172 unsigned int bpl_value; /* bytes per line value */ 173 unsigned int bpl_value; /* bytes per line value */
173 unsigned int bpl_padding; /* padding at end of line */ 174 unsigned int bpl_padding; /* padding at end of line */
174 175
175 /* Entity video node streaming */
176 unsigned int streaming:1;
177
178 /* Pipeline state */ 176 /* Pipeline state */
179 struct isp_pipeline pipe; 177 struct isp_pipeline pipe;
180 struct mutex stream_lock; /* pipeline and stream states */ 178 struct mutex stream_lock; /* pipeline and stream states */
181 bool error; 179 bool error;
182 180
183 /* Video buffers queue */ 181 /* Video buffers queue */
184 struct isp_video_queue *queue; 182 void *alloc_ctx;
183 struct vb2_queue *queue;
184 struct mutex queue_lock; /* protects the queue */
185 spinlock_t irqlock; /* protects dmaqueue */
185 struct list_head dmaqueue; 186 struct list_head dmaqueue;
186 enum isp_video_dmaqueue_flags dmaqueue_flags; 187 enum isp_video_dmaqueue_flags dmaqueue_flags;
187 188
@@ -193,7 +194,7 @@ struct isp_video {
193struct isp_video_fh { 194struct isp_video_fh {
194 struct v4l2_fh vfh; 195 struct v4l2_fh vfh;
195 struct isp_video *video; 196 struct isp_video *video;
196 struct isp_video_queue queue; 197 struct vb2_queue queue;
197 struct v4l2_format format; 198 struct v4l2_format format;
198 struct v4l2_fract timeperframe; 199 struct v4l2_fract timeperframe;
199}; 200};
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 349e659d75fb..7c4489c42365 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1200,6 +1200,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1200EXPORT_SYMBOL_GPL(vb2_buffer_done); 1200EXPORT_SYMBOL_GPL(vb2_buffer_done);
1201 1201
1202/** 1202/**
1203 * vb2_discard_done() - discard all buffers marked as DONE
1204 * @q: videobuf2 queue
1205 *
1206 * This function is intended to be used with suspend/resume operations. It
1207 * discards all 'done' buffers as they would be too old to be requested after
1208 * resume.
1209 *
1210 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
1211 * delayed works before calling this function to make sure no buffer will be
1212 * touched by the driver and/or hardware.
1213 */
1214void vb2_discard_done(struct vb2_queue *q)
1215{
1216 struct vb2_buffer *vb;
1217 unsigned long flags;
1218
1219 spin_lock_irqsave(&q->done_lock, flags);
1220 list_for_each_entry(vb, &q->done_list, done_entry)
1221 vb->state = VB2_BUF_STATE_ERROR;
1222 spin_unlock_irqrestore(&q->done_lock, flags);
1223}
1224EXPORT_SYMBOL_GPL(vb2_discard_done);
1225
1226/**
1203 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a 1227 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
1204 * v4l2_buffer by the userspace. The caller has already verified that struct 1228 * v4l2_buffer by the userspace. The caller has already verified that struct
1205 * v4l2_buffer has a valid number of planes. 1229 * v4l2_buffer has a valid number of planes.
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index ded31ea6bd39..cbf455d66f73 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -396,7 +396,7 @@ static void iss_video_buf_queue(struct vb2_buffer *vb)
396 } 396 }
397} 397}
398 398
399static struct vb2_ops iss_video_vb2ops = { 399static const struct vb2_ops iss_video_vb2ops = {
400 .queue_setup = iss_video_queue_setup, 400 .queue_setup = iss_video_queue_setup,
401 .buf_prepare = iss_video_buf_prepare, 401 .buf_prepare = iss_video_buf_prepare,
402 .buf_queue = iss_video_buf_queue, 402 .buf_queue = iss_video_buf_queue,
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index bca25dc53f9d..8fab6fa0dbfb 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -432,6 +432,7 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
432void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no); 432void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
433 433
434void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); 434void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
435void vb2_discard_done(struct vb2_queue *q);
435int vb2_wait_for_all_buffers(struct vb2_queue *q); 436int vb2_wait_for_all_buffers(struct vb2_queue *q);
436 437
437int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); 438int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);