aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/platform/omap3isp/ispqueue.c
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart@ideasonboard.com>2014-03-09 19:36:15 -0400
committerMauro Carvalho Chehab <m.chehab@samsung.com>2014-05-25 10:38:53 -0400
commitfbac1400bd1a7a88191dd71442ef2c282ad1816c (patch)
treeef2aa81db8f8e11c1e374f6d5eea5d1c74fe3db6 /drivers/media/platform/omap3isp/ispqueue.c
parent34ea4d4417bb726245fdaeb2f8951eaa0c18fc4c (diff)
[media] omap3isp: Move to videobuf2
Replace the custom buffers queue implementation with a videobuf2 queue. Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Sakari Ailus <sakari.ailus@iki.fi> Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
Diffstat (limited to 'drivers/media/platform/omap3isp/ispqueue.c')
-rw-r--r--drivers/media/platform/omap3isp/ispqueue.c1031
1 files changed, 0 insertions, 1031 deletions
diff --git a/drivers/media/platform/omap3isp/ispqueue.c b/drivers/media/platform/omap3isp/ispqueue.c
deleted file mode 100644
index 77afb6370443..000000000000
--- a/drivers/media/platform/omap3isp/ispqueue.c
+++ /dev/null
@@ -1,1031 +0,0 @@
1/*
2 * ispqueue.c
3 *
4 * TI OMAP3 ISP - Video buffers queue handling
5 *
6 * Copyright (C) 2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <asm/cacheflush.h>
27#include <linux/dma-mapping.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/poll.h>
31#include <linux/scatterlist.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/vmalloc.h>
35
36#include "isp.h"
37#include "ispqueue.h"
38#include "ispvideo.h"
39
40/* -----------------------------------------------------------------------------
41 * Video buffers management
42 */
43
44/*
45 * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
46 *
47 * The typical operation required here is Cache Invalidation across
48 * the (user space) buffer address range. And this _must_ be done
49 * at QBUF stage (and *only* at QBUF).
50 *
51 * We try to use optimal cache invalidation function:
52 * - dmac_map_area:
53 * - used when the number of pages are _low_.
54 * - it becomes quite slow as the number of pages increase.
55 * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
56 * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
57 *
58 * - flush_cache_all:
59 * - used when the number of pages are _high_.
60 * - time taken in the range of 500-900 us.
61 * - has a higher penalty but, as whole dcache + icache is invalidated
62 */
63/*
64 * FIXME: dmac_inv_range crashes randomly on the user space buffer
65 * address. Fall back to flush_cache_all for now.
66 */
67#define ISP_CACHE_FLUSH_PAGES_MAX 0
68
69static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
70{
71 if (buf->skip_cache)
72 return;
73
74 if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
75 buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
76 flush_cache_all();
77 else {
78 dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
79 DMA_FROM_DEVICE);
80 outer_inv_range(buf->vbuf.m.userptr,
81 buf->vbuf.m.userptr + buf->vbuf.length);
82 }
83}
84
85/*
86 * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
87 *
88 * Lock the VMAs underlying the given buffer into memory. This avoids the
89 * userspace buffer mapping from being swapped out, making VIPT cache handling
90 * easier.
91 *
92 * Note that the pages will not be freed as the buffers have been locked to
93 * memory using by a call to get_user_pages(), but the userspace mapping could
94 * still disappear if the VMAs are not locked. This is caused by the memory
95 * management code trying to be as lock-less as possible, which results in the
96 * userspace mapping manager not finding out that the pages are locked under
97 * some conditions.
98 */
99static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
100{
101 struct vm_area_struct *vma;
102 unsigned long start;
103 unsigned long end;
104 int ret = 0;
105
106 if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
107 return 0;
108
109 /* We can be called from workqueue context if the current task dies to
110 * unlock the VMAs. In that case there's no current memory management
111 * context so unlocking can't be performed, but the VMAs have been or
112 * are getting destroyed anyway so it doesn't really matter.
113 */
114 if (!current || !current->mm)
115 return lock ? -EINVAL : 0;
116
117 start = buf->vbuf.m.userptr;
118 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
119
120 down_write(&current->mm->mmap_sem);
121 spin_lock(&current->mm->page_table_lock);
122
123 do {
124 vma = find_vma(current->mm, start);
125 if (vma == NULL) {
126 ret = -EFAULT;
127 goto out;
128 }
129
130 if (lock)
131 vma->vm_flags |= VM_LOCKED;
132 else
133 vma->vm_flags &= ~VM_LOCKED;
134
135 start = vma->vm_end + 1;
136 } while (vma->vm_end < end);
137
138 if (lock)
139 buf->vm_flags |= VM_LOCKED;
140 else
141 buf->vm_flags &= ~VM_LOCKED;
142
143out:
144 spin_unlock(&current->mm->page_table_lock);
145 up_write(&current->mm->mmap_sem);
146 return ret;
147}
148
149/*
150 * isp_video_buffer_cleanup - Release pages for a userspace VMA.
151 *
152 * Release pages locked by a call isp_video_buffer_prepare_user and free the
153 * pages table.
154 */
155static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
156{
157 enum dma_data_direction direction;
158 DEFINE_DMA_ATTRS(attrs);
159 unsigned int i;
160
161 if (buf->vbuf.memory == V4L2_MEMORY_USERPTR) {
162 if (buf->skip_cache)
163 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
164
165 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
166 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
167 dma_unmap_sg_attrs(buf->queue->dev, buf->sgt.sgl,
168 buf->sgt.orig_nents, direction, &attrs);
169 sg_free_table(&buf->sgt);
170 }
171
172 if (buf->pages != NULL) {
173 isp_video_buffer_lock_vma(buf, 0);
174
175 for (i = 0; i < buf->npages; ++i)
176 page_cache_release(buf->pages[i]);
177
178 vfree(buf->pages);
179 buf->pages = NULL;
180 }
181
182 buf->npages = 0;
183 buf->skip_cache = false;
184}
185
186/*
187 * isp_video_buffer_prepare_user - Prepare a userspace buffer.
188 *
189 * This function creates a scatter list with a 1:1 mapping for a userspace VMA.
190 * The number of pages is first computed based on the buffer size, and pages are
191 * then retrieved by a call to get_user_pages.
192 *
193 * Pages are pinned to memory by get_user_pages, making them available for DMA
194 * transfers. However, due to memory management optimization, it seems the
195 * get_user_pages doesn't guarantee that the pinned pages will not be written
196 * to swap and removed from the userspace mapping(s). When this happens, a page
197 * fault can be generated when accessing those unmapped pages.
198 *
199 * If the fault is triggered by a page table walk caused by VIPT cache
200 * management operations, the page fault handler might oops if the MM semaphore
201 * is held, as it can't handle kernel page faults in that case. To fix that, a
202 * fixup entry needs to be added to the cache management code, or the userspace
203 * VMA must be locked to avoid removing pages from the userspace mapping in the
204 * first place.
205 *
206 * If the number of pages retrieved is smaller than the number required by the
207 * buffer size, the function returns -EFAULT.
208 */
209static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
210{
211 unsigned int offset;
212 unsigned long data;
213 unsigned int first;
214 unsigned int last;
215 int ret;
216
217 data = buf->vbuf.m.userptr;
218 first = (data & PAGE_MASK) >> PAGE_SHIFT;
219 last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
220 offset = data & ~PAGE_MASK;
221
222 buf->npages = last - first + 1;
223 buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
224 if (buf->pages == NULL)
225 return -ENOMEM;
226
227 down_read(&current->mm->mmap_sem);
228 ret = get_user_pages(current, current->mm, data & PAGE_MASK,
229 buf->npages,
230 buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
231 buf->pages, NULL);
232 up_read(&current->mm->mmap_sem);
233
234 if (ret != buf->npages) {
235 buf->npages = ret < 0 ? 0 : ret;
236 return -EFAULT;
237 }
238
239 ret = isp_video_buffer_lock_vma(buf, 1);
240 if (ret < 0)
241 return ret;
242
243 ret = sg_alloc_table_from_pages(&buf->sgt, buf->pages, buf->npages,
244 offset, buf->vbuf.length, GFP_KERNEL);
245 if (ret < 0)
246 return ret;
247
248 return 0;
249}
250
251/*
252 * isp_video_buffer_prepare_pfnmap - Prepare a VM_PFNMAP userspace buffer
253 *
254 * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
255 * memory and if they span a single VMA. Start by validating the user pointer to
256 * make sure it fulfils that condition, and then build a scatter list of
257 * physically contiguous pages starting at the buffer memory physical address.
258 *
259 * Return 0 on success, -EFAULT if the buffer isn't valid or -ENOMEM if memory
260 * can't be allocated.
261 */
262static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
263{
264 struct vm_area_struct *vma;
265 struct scatterlist *sg;
266 unsigned long prev_pfn;
267 unsigned long this_pfn;
268 unsigned long start;
269 unsigned int offset;
270 unsigned long end;
271 unsigned long pfn;
272 unsigned int i;
273 int ret = 0;
274
275 start = buf->vbuf.m.userptr;
276 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
277 offset = start & ~PAGE_MASK;
278
279 buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
280 buf->pages = NULL;
281
282 down_read(&current->mm->mmap_sem);
283 vma = find_vma(current->mm, start);
284 if (vma == NULL || vma->vm_end < end) {
285 ret = -EFAULT;
286 goto unlock;
287 }
288
289 for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
290 ret = follow_pfn(vma, start, &this_pfn);
291 if (ret < 0)
292 goto unlock;
293
294 if (prev_pfn == 0)
295 pfn = this_pfn;
296 else if (this_pfn != prev_pfn + 1) {
297 ret = -EFAULT;
298 goto unlock;
299 }
300
301 prev_pfn = this_pfn;
302 }
303
304unlock:
305 up_read(&current->mm->mmap_sem);
306 if (ret < 0)
307 return ret;
308
309 ret = sg_alloc_table(&buf->sgt, buf->npages, GFP_KERNEL);
310 if (ret < 0)
311 return ret;
312
313 for (sg = buf->sgt.sgl, i = 0; i < buf->npages; ++i, ++pfn) {
314 sg_set_page(sg, pfn_to_page(pfn), PAGE_SIZE - offset, offset);
315 sg = sg_next(sg);
316 offset = 0;
317 }
318
319 return 0;
320}
321
322/*
323 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
324 *
325 * This function locates the VMAs for the buffer's userspace address and checks
326 * that their flags match. The only flag that we need to care for at the moment
327 * is VM_PFNMAP.
328 *
329 * The buffer vm_flags field is set to the first VMA flags.
330 *
331 * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
332 * have incompatible flags.
333 */
334static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
335{
336 struct vm_area_struct *vma;
337 pgprot_t uninitialized_var(vm_page_prot);
338 unsigned long start;
339 unsigned long end;
340 int ret = -EFAULT;
341
342 start = buf->vbuf.m.userptr;
343 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
344
345 down_read(&current->mm->mmap_sem);
346
347 do {
348 vma = find_vma(current->mm, start);
349 if (vma == NULL)
350 goto done;
351
352 if (start == buf->vbuf.m.userptr) {
353 buf->vm_flags = vma->vm_flags;
354 vm_page_prot = vma->vm_page_prot;
355 }
356
357 if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
358 goto done;
359
360 if (vm_page_prot != vma->vm_page_prot)
361 goto done;
362
363 start = vma->vm_end + 1;
364 } while (vma->vm_end < end);
365
366 /* Skip cache management to enhance performances for non-cached or
367 * write-combining buffers.
368 */
369 if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
370 vm_page_prot == pgprot_writecombine(vm_page_prot))
371 buf->skip_cache = true;
372
373 ret = 0;
374
375done:
376 up_read(&current->mm->mmap_sem);
377 return ret;
378}
379
380/*
381 * isp_video_buffer_prepare - Make a buffer ready for operation
382 *
383 * Preparing a buffer involves:
384 *
385 * - validating VMAs (userspace buffers only)
386 * - locking pages and VMAs into memory (userspace buffers only)
387 * - building page and scatter-gather lists (userspace buffers only)
388 * - mapping buffers for DMA operation
389 * - performing driver-specific preparation
390 *
391 * The function must be called in userspace context with a valid mm context
392 * (this excludes cleanup paths such as sys_close when the userspace process
393 * segfaults).
394 */
395static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
396{
397 enum dma_data_direction direction;
398 DEFINE_DMA_ATTRS(attrs);
399 int ret;
400
401 switch (buf->vbuf.memory) {
402 case V4L2_MEMORY_MMAP:
403 ret = 0;
404 break;
405
406 case V4L2_MEMORY_USERPTR:
407 ret = isp_video_buffer_prepare_vm_flags(buf);
408 if (ret < 0)
409 return ret;
410
411 if (buf->vm_flags & VM_PFNMAP)
412 ret = isp_video_buffer_prepare_pfnmap(buf);
413 else
414 ret = isp_video_buffer_prepare_user(buf);
415
416 if (ret < 0)
417 goto done;
418
419 if (buf->skip_cache)
420 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
421
422 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
423 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
424 ret = dma_map_sg_attrs(buf->queue->dev, buf->sgt.sgl,
425 buf->sgt.orig_nents, direction, &attrs);
426 if (ret <= 0) {
427 ret = -EFAULT;
428 goto done;
429 }
430
431 buf->dma = sg_dma_address(buf->sgt.sgl);
432 break;
433
434 default:
435 return -EINVAL;
436 }
437
438 if (!IS_ALIGNED(buf->dma, 32)) {
439 dev_dbg(buf->queue->dev,
440 "Buffer address must be aligned to 32 bytes boundary.\n");
441 ret = -EINVAL;
442 goto done;
443 }
444
445 if (buf->queue->ops->buffer_prepare)
446 ret = buf->queue->ops->buffer_prepare(buf);
447
448done:
449 if (ret < 0) {
450 isp_video_buffer_cleanup(buf);
451 return ret;
452 }
453
454 return ret;
455}
456
457/*
458 * isp_video_queue_query - Query the status of a given buffer
459 *
460 * Locking: must be called with the queue lock held.
461 */
462static void isp_video_buffer_query(struct isp_video_buffer *buf,
463 struct v4l2_buffer *vbuf)
464{
465 memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
466
467 if (buf->vma_use_count)
468 vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
469
470 switch (buf->state) {
471 case ISP_BUF_STATE_ERROR:
472 vbuf->flags |= V4L2_BUF_FLAG_ERROR;
473 /* Fallthrough */
474 case ISP_BUF_STATE_DONE:
475 vbuf->flags |= V4L2_BUF_FLAG_DONE;
476 break;
477 case ISP_BUF_STATE_QUEUED:
478 case ISP_BUF_STATE_ACTIVE:
479 vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
480 break;
481 case ISP_BUF_STATE_IDLE:
482 default:
483 break;
484 }
485}
486
487/*
488 * isp_video_buffer_wait - Wait for a buffer to be ready
489 *
490 * In non-blocking mode, return immediately with 0 if the buffer is ready or
491 * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
492 *
493 * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
494 * queue using the same condition.
495 */
496static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
497{
498 if (nonblocking) {
499 return (buf->state != ISP_BUF_STATE_QUEUED &&
500 buf->state != ISP_BUF_STATE_ACTIVE)
501 ? 0 : -EAGAIN;
502 }
503
504 return wait_event_interruptible(buf->wait,
505 buf->state != ISP_BUF_STATE_QUEUED &&
506 buf->state != ISP_BUF_STATE_ACTIVE);
507}
508
509/* -----------------------------------------------------------------------------
510 * Queue management
511 */
512
513/*
514 * isp_video_queue_free - Free video buffers memory
515 *
516 * Buffers can only be freed if the queue isn't streaming and if no buffer is
517 * mapped to userspace. Return -EBUSY if those conditions aren't satisfied.
518 *
519 * This function must be called with the queue lock held.
520 */
521static int isp_video_queue_free(struct isp_video_queue *queue)
522{
523 unsigned int i;
524
525 if (queue->streaming)
526 return -EBUSY;
527
528 for (i = 0; i < queue->count; ++i) {
529 if (queue->buffers[i]->vma_use_count != 0)
530 return -EBUSY;
531 }
532
533 for (i = 0; i < queue->count; ++i) {
534 struct isp_video_buffer *buf = queue->buffers[i];
535
536 isp_video_buffer_cleanup(buf);
537
538 if (buf->vaddr) {
539 dma_free_coherent(queue->dev,
540 PAGE_ALIGN(buf->vbuf.length),
541 buf->vaddr, buf->dma);
542 buf->vaddr = NULL;
543 }
544
545 kfree(buf);
546 queue->buffers[i] = NULL;
547 }
548
549 INIT_LIST_HEAD(&queue->queue);
550 queue->count = 0;
551 return 0;
552}
553
554/*
555 * isp_video_queue_alloc - Allocate video buffers memory
556 *
557 * This function must be called with the queue lock held.
558 */
559static int isp_video_queue_alloc(struct isp_video_queue *queue,
560 unsigned int nbuffers,
561 unsigned int size, enum v4l2_memory memory)
562{
563 struct isp_video_buffer *buf;
564 dma_addr_t dma;
565 unsigned int i;
566 void *mem;
567 int ret;
568
569 /* Start by freeing the buffers. */
570 ret = isp_video_queue_free(queue);
571 if (ret < 0)
572 return ret;
573
574 /* Bail out if no buffers should be allocated. */
575 if (nbuffers == 0)
576 return 0;
577
578 /* Initialize the allocated buffers. */
579 for (i = 0; i < nbuffers; ++i) {
580 buf = kzalloc(queue->bufsize, GFP_KERNEL);
581 if (buf == NULL)
582 break;
583
584 if (memory == V4L2_MEMORY_MMAP) {
585 /* Allocate video buffers memory for mmap mode. Align
586 * the size to the page size.
587 */
588 mem = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
589 &dma, GFP_KERNEL);
590 if (mem == NULL) {
591 kfree(buf);
592 break;
593 }
594
595 buf->vbuf.m.offset = i * PAGE_ALIGN(size);
596 buf->vaddr = mem;
597 buf->dma = dma;
598 }
599
600 buf->vbuf.index = i;
601 buf->vbuf.length = size;
602 buf->vbuf.type = queue->type;
603 buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
604 buf->vbuf.field = V4L2_FIELD_NONE;
605 buf->vbuf.memory = memory;
606
607 buf->queue = queue;
608 init_waitqueue_head(&buf->wait);
609
610 queue->buffers[i] = buf;
611 }
612
613 if (i == 0)
614 return -ENOMEM;
615
616 queue->count = i;
617 return nbuffers;
618}
619
620/**
621 * omap3isp_video_queue_cleanup - Clean up the video buffers queue
622 * @queue: Video buffers queue
623 *
624 * Free all allocated resources and clean up the video buffers queue. The queue
625 * must not be busy (no ongoing video stream) and buffers must have been
626 * unmapped.
627 *
628 * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
629 * unmapped.
630 */
631int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
632{
633 return isp_video_queue_free(queue);
634}
635
636/**
637 * omap3isp_video_queue_init - Initialize the video buffers queue
638 * @queue: Video buffers queue
639 * @type: V4L2 buffer type (capture or output)
640 * @ops: Driver-specific queue operations
641 * @dev: Device used for DMA operations
642 * @bufsize: Size of the driver-specific buffer structure
643 *
644 * Initialize the video buffers queue with the supplied parameters.
645 *
646 * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
647 * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
648 *
649 * Buffer objects will be allocated using the given buffer size to allow room
650 * for driver-specific fields. Driver-specific buffer structures must start
651 * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
652 * structure must pass the size of the isp_video_buffer structure in the bufsize
653 * parameter.
654 *
655 * Return 0 on success.
656 */
657int omap3isp_video_queue_init(struct isp_video_queue *queue,
658 enum v4l2_buf_type type,
659 const struct isp_video_queue_operations *ops,
660 struct device *dev, unsigned int bufsize)
661{
662 INIT_LIST_HEAD(&queue->queue);
663
664 queue->type = type;
665 queue->ops = ops;
666 queue->dev = dev;
667 queue->bufsize = bufsize;
668
669 return 0;
670}
671
672/* -----------------------------------------------------------------------------
673 * V4L2 operations
674 */
675
676/**
677 * omap3isp_video_queue_reqbufs - Allocate video buffers memory
678 *
679 * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
680 * allocated video buffer objects and, for MMAP buffers, buffer memory.
681 *
682 * If the number of buffers is 0, all buffers are freed and the function returns
683 * without performing any allocation.
684 *
685 * If the number of buffers is not 0, currently allocated buffers (if any) are
686 * freed and the requested number of buffers are allocated. Depending on
687 * driver-specific requirements and on memory availability, a number of buffer
688 * smaller or bigger than requested can be allocated. This isn't considered as
689 * an error.
690 *
691 * Return 0 on success or one of the following error codes:
692 *
693 * -EINVAL if the buffer type or index are invalid
694 * -EBUSY if the queue is busy (streaming or buffers mapped)
695 * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
696 */
697int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
698 struct v4l2_requestbuffers *rb)
699{
700 unsigned int nbuffers = rb->count;
701 unsigned int size;
702 int ret;
703
704 if (rb->type != queue->type)
705 return -EINVAL;
706
707 queue->ops->queue_prepare(queue, &nbuffers, &size);
708 if (size == 0)
709 return -EINVAL;
710
711 nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
712
713 ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
714 if (ret < 0)
715 return ret;
716
717 rb->count = ret;
718 return 0;
719}
720
721/**
722 * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
723 *
724 * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
725 * returns the status of a given video buffer.
726 *
727 * Return 0 on success or -EINVAL if the buffer type or index are invalid.
728 */
729int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
730 struct v4l2_buffer *vbuf)
731{
732 struct isp_video_buffer *buf;
733
734 if (vbuf->type != queue->type)
735 return -EINVAL;
736
737 if (vbuf->index >= queue->count)
738 return -EINVAL;
739
740 buf = queue->buffers[vbuf->index];
741 isp_video_buffer_query(buf, vbuf);
742
743 return 0;
744}
745
746/**
747 * omap3isp_video_queue_qbuf - Queue a buffer
748 *
749 * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
750 *
751 * The v4l2_buffer structure passed from userspace is first sanity tested. If
752 * sane, the buffer is then processed and added to the main queue and, if the
753 * queue is streaming, to the IRQ queue.
754 *
755 * Before being enqueued, USERPTR buffers are checked for address changes. If
756 * the buffer has a different userspace address, the old memory area is unlocked
757 * and the new memory area is locked.
758 */
759int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
760 struct v4l2_buffer *vbuf)
761{
762 struct isp_video_buffer *buf;
763 int ret;
764
765 if (vbuf->type != queue->type)
766 return -EINVAL;
767
768 if (vbuf->index >= queue->count)
769 return -EINVAL;
770
771 buf = queue->buffers[vbuf->index];
772
773 if (vbuf->memory != buf->vbuf.memory)
774 return -EINVAL;
775
776 if (buf->state != ISP_BUF_STATE_IDLE)
777 return -EINVAL;
778
779 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
780 vbuf->length < buf->vbuf.length)
781 return -EINVAL;
782
783 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
784 vbuf->m.userptr != buf->vbuf.m.userptr) {
785 isp_video_buffer_cleanup(buf);
786 buf->vbuf.m.userptr = vbuf->m.userptr;
787 buf->prepared = 0;
788 }
789
790 if (!buf->prepared) {
791 ret = isp_video_buffer_prepare(buf);
792 if (ret < 0)
793 return ret;
794 buf->prepared = 1;
795 }
796
797 isp_video_buffer_cache_sync(buf);
798
799 buf->state = ISP_BUF_STATE_QUEUED;
800 list_add_tail(&buf->stream, &queue->queue);
801
802 if (queue->streaming)
803 queue->ops->buffer_queue(buf);
804
805 return 0;
806}
807
808/**
809 * omap3isp_video_queue_dqbuf - Dequeue a buffer
810 *
811 * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
812 *
813 * Wait until a buffer is ready to be dequeued, remove it from the queue and
814 * copy its information to the v4l2_buffer structure.
815 *
816 * If the nonblocking argument is not zero and no buffer is ready, return
817 * -EAGAIN immediately instead of waiting.
818 *
819 * If no buffer has been enqueued, or if the requested buffer type doesn't match
820 * the queue type, return -EINVAL.
821 */
822int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
823 struct v4l2_buffer *vbuf, int nonblocking)
824{
825 struct isp_video_buffer *buf;
826 int ret;
827
828 if (vbuf->type != queue->type)
829 return -EINVAL;
830
831 if (list_empty(&queue->queue))
832 return -EINVAL;
833
834 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
835 ret = isp_video_buffer_wait(buf, nonblocking);
836 if (ret < 0)
837 return ret;
838
839 list_del(&buf->stream);
840
841 isp_video_buffer_query(buf, vbuf);
842 buf->state = ISP_BUF_STATE_IDLE;
843 vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
844
845 return 0;
846}
847
848/**
849 * omap3isp_video_queue_streamon - Start streaming
850 *
851 * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
852 * starts streaming on the queue and calls the buffer_queue operation for all
853 * queued buffers.
854 *
855 * Return 0 on success.
856 */
857int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
858{
859 struct isp_video_buffer *buf;
860
861 if (queue->streaming)
862 return 0;
863
864 queue->streaming = 1;
865
866 list_for_each_entry(buf, &queue->queue, stream)
867 queue->ops->buffer_queue(buf);
868
869 return 0;
870}
871
872/**
873 * omap3isp_video_queue_streamoff - Stop streaming
874 *
875 * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
876 * stops streaming on the queue and wakes up all the buffers.
877 *
878 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
879 * delayed works before calling this function to make sure no buffer will be
880 * touched by the driver and/or hardware.
881 */
882void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
883{
884 struct isp_video_buffer *buf;
885 unsigned int i;
886
887 if (!queue->streaming)
888 return;
889
890 queue->streaming = 0;
891
892 for (i = 0; i < queue->count; ++i) {
893 buf = queue->buffers[i];
894
895 if (buf->state == ISP_BUF_STATE_ACTIVE)
896 wake_up(&buf->wait);
897
898 buf->state = ISP_BUF_STATE_IDLE;
899 }
900
901 INIT_LIST_HEAD(&queue->queue);
902}
903
904/**
905 * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
906 *
907 * This function is intended to be used with suspend/resume operations. It
908 * discards all 'done' buffers as they would be too old to be requested after
909 * resume.
910 *
911 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
912 * delayed works before calling this function to make sure no buffer will be
913 * touched by the driver and/or hardware.
914 */
915void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
916{
917 struct isp_video_buffer *buf;
918 unsigned int i;
919
920 if (!queue->streaming)
921 return;
922
923 for (i = 0; i < queue->count; ++i) {
924 buf = queue->buffers[i];
925
926 if (buf->state == ISP_BUF_STATE_DONE)
927 buf->state = ISP_BUF_STATE_ERROR;
928 }
929}
930
931static void isp_video_queue_vm_open(struct vm_area_struct *vma)
932{
933 struct isp_video_buffer *buf = vma->vm_private_data;
934
935 buf->vma_use_count++;
936}
937
938static void isp_video_queue_vm_close(struct vm_area_struct *vma)
939{
940 struct isp_video_buffer *buf = vma->vm_private_data;
941
942 buf->vma_use_count--;
943}
944
945static const struct vm_operations_struct isp_video_queue_vm_ops = {
946 .open = isp_video_queue_vm_open,
947 .close = isp_video_queue_vm_close,
948};
949
950/**
951 * omap3isp_video_queue_mmap - Map buffers to userspace
952 *
953 * This function is intended to be used as an mmap() file operation handler. It
954 * maps a buffer to userspace based on the VMA offset.
955 *
956 * Only buffers of memory type MMAP are supported.
957 */
958int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
959 struct vm_area_struct *vma)
960{
961 struct isp_video_buffer *uninitialized_var(buf);
962 unsigned long size;
963 unsigned int i;
964 int ret = 0;
965
966 for (i = 0; i < queue->count; ++i) {
967 buf = queue->buffers[i];
968 if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
969 break;
970 }
971
972 if (i == queue->count)
973 return -EINVAL;
974
975 size = vma->vm_end - vma->vm_start;
976
977 if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
978 size != PAGE_ALIGN(buf->vbuf.length))
979 return -EINVAL;
980
981 /* dma_mmap_coherent() uses vm_pgoff as an offset inside the buffer
982 * while we used it to identify the buffer and want to map the whole
983 * buffer.
984 */
985 vma->vm_pgoff = 0;
986
987 ret = dma_mmap_coherent(queue->dev, vma, buf->vaddr, buf->dma, size);
988 if (ret < 0)
989 return ret;
990
991 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
992 vma->vm_ops = &isp_video_queue_vm_ops;
993 vma->vm_private_data = buf;
994 isp_video_queue_vm_open(vma);
995
996 return 0;
997}
998
999/**
1000 * omap3isp_video_queue_poll - Poll video queue state
1001 *
1002 * This function is intended to be used as a poll() file operation handler. It
1003 * polls the state of the video buffer at the front of the queue and returns an
1004 * events mask.
1005 *
1006 * If no buffer is present at the front of the queue, POLLERR is returned.
1007 */
1008unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
1009 struct file *file, poll_table *wait)
1010{
1011 struct isp_video_buffer *buf;
1012 unsigned int mask = 0;
1013
1014 if (list_empty(&queue->queue)) {
1015 mask |= POLLERR;
1016 goto done;
1017 }
1018 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
1019
1020 poll_wait(file, &buf->wait, wait);
1021 if (buf->state == ISP_BUF_STATE_DONE ||
1022 buf->state == ISP_BUF_STATE_ERROR) {
1023 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1024 mask |= POLLIN | POLLRDNORM;
1025 else
1026 mask |= POLLOUT | POLLWRNORM;
1027 }
1028
1029done:
1030 return mask;
1031}