diff options
Diffstat (limited to 'drivers/media/video/uvc/uvc_queue.c')
-rw-r--r-- | drivers/media/video/uvc/uvc_queue.c | 634 |
1 files changed, 634 insertions, 0 deletions
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c new file mode 100644 index 00000000000..677691c4450 --- /dev/null +++ b/drivers/media/video/uvc/uvc_queue.c | |||
@@ -0,0 +1,634 @@ | |||
1 | /* | ||
2 | * uvc_queue.c -- USB Video Class driver - Buffers management | ||
3 | * | ||
4 | * Copyright (C) 2005-2010 | ||
5 | * Laurent Pinchart (laurent.pinchart@ideasonboard.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/usb.h> | ||
19 | #include <linux/videodev2.h> | ||
20 | #include <linux/vmalloc.h> | ||
21 | #include <linux/wait.h> | ||
22 | #include <linux/atomic.h> | ||
23 | |||
24 | #include "uvcvideo.h" | ||
25 | |||
26 | /* ------------------------------------------------------------------------ | ||
27 | * Video buffers queue management. | ||
28 | * | ||
29 | * Video queues is initialized by uvc_queue_init(). The function performs | ||
30 | * basic initialization of the uvc_video_queue struct and never fails. | ||
31 | * | ||
32 | * Video buffer allocation and freeing are performed by uvc_alloc_buffers and | ||
33 | * uvc_free_buffers respectively. The former acquires the video queue lock, | ||
34 | * while the later must be called with the lock held (so that allocation can | ||
35 | * free previously allocated buffers). Trying to free buffers that are mapped | ||
36 | * to user space will return -EBUSY. | ||
37 | * | ||
38 | * Video buffers are managed using two queues. However, unlike most USB video | ||
39 | * drivers that use an in queue and an out queue, we use a main queue to hold | ||
40 | * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to | ||
41 | * hold empty buffers. This design (copied from video-buf) minimizes locking | ||
42 | * in interrupt, as only one queue is shared between interrupt and user | ||
43 | * contexts. | ||
44 | * | ||
45 | * Use cases | ||
46 | * --------- | ||
47 | * | ||
48 | * Unless stated otherwise, all operations that modify the irq buffers queue | ||
49 | * are protected by the irq spinlock. | ||
50 | * | ||
51 | * 1. The user queues the buffers, starts streaming and dequeues a buffer. | ||
52 | * | ||
53 | * The buffers are added to the main and irq queues. Both operations are | ||
54 | * protected by the queue lock, and the later is protected by the irq | ||
55 | * spinlock as well. | ||
56 | * | ||
57 | * The completion handler fetches a buffer from the irq queue and fills it | ||
58 | * with video data. If no buffer is available (irq queue empty), the handler | ||
59 | * returns immediately. | ||
60 | * | ||
61 | * When the buffer is full, the completion handler removes it from the irq | ||
62 | * queue, marks it as done (UVC_BUF_STATE_DONE) and wakes its wait queue. | ||
63 | * At that point, any process waiting on the buffer will be woken up. If a | ||
64 | * process tries to dequeue a buffer after it has been marked done, the | ||
65 | * dequeing will succeed immediately. | ||
66 | * | ||
67 | * 2. Buffers are queued, user is waiting on a buffer and the device gets | ||
68 | * disconnected. | ||
69 | * | ||
70 | * When the device is disconnected, the kernel calls the completion handler | ||
71 | * with an appropriate status code. The handler marks all buffers in the | ||
72 | * irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so | ||
73 | * that any process waiting on a buffer gets woken up. | ||
74 | * | ||
75 | * Waking up up the first buffer on the irq list is not enough, as the | ||
76 | * process waiting on the buffer might restart the dequeue operation | ||
77 | * immediately. | ||
78 | * | ||
79 | */ | ||
80 | |||
81 | void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, | ||
82 | int drop_corrupted) | ||
83 | { | ||
84 | mutex_init(&queue->mutex); | ||
85 | spin_lock_init(&queue->irqlock); | ||
86 | INIT_LIST_HEAD(&queue->mainqueue); | ||
87 | INIT_LIST_HEAD(&queue->irqqueue); | ||
88 | queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0; | ||
89 | queue->type = type; | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Free the video buffers. | ||
94 | * | ||
95 | * This function must be called with the queue lock held. | ||
96 | */ | ||
97 | static int __uvc_free_buffers(struct uvc_video_queue *queue) | ||
98 | { | ||
99 | unsigned int i; | ||
100 | |||
101 | for (i = 0; i < queue->count; ++i) { | ||
102 | if (queue->buffer[i].vma_use_count != 0) | ||
103 | return -EBUSY; | ||
104 | } | ||
105 | |||
106 | if (queue->count) { | ||
107 | uvc_queue_cancel(queue, 0); | ||
108 | INIT_LIST_HEAD(&queue->mainqueue); | ||
109 | vfree(queue->mem); | ||
110 | queue->count = 0; | ||
111 | } | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | int uvc_free_buffers(struct uvc_video_queue *queue) | ||
117 | { | ||
118 | int ret; | ||
119 | |||
120 | mutex_lock(&queue->mutex); | ||
121 | ret = __uvc_free_buffers(queue); | ||
122 | mutex_unlock(&queue->mutex); | ||
123 | |||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Allocate the video buffers. | ||
129 | * | ||
130 | * Pages are reserved to make sure they will not be swapped, as they will be | ||
131 | * filled in the URB completion handler. | ||
132 | * | ||
133 | * Buffers will be individually mapped, so they must all be page aligned. | ||
134 | */ | ||
135 | int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers, | ||
136 | unsigned int buflength) | ||
137 | { | ||
138 | unsigned int bufsize = PAGE_ALIGN(buflength); | ||
139 | unsigned int i; | ||
140 | void *mem = NULL; | ||
141 | int ret; | ||
142 | |||
143 | if (nbuffers > UVC_MAX_VIDEO_BUFFERS) | ||
144 | nbuffers = UVC_MAX_VIDEO_BUFFERS; | ||
145 | |||
146 | mutex_lock(&queue->mutex); | ||
147 | |||
148 | if ((ret = __uvc_free_buffers(queue)) < 0) | ||
149 | goto done; | ||
150 | |||
151 | /* Bail out if no buffers should be allocated. */ | ||
152 | if (nbuffers == 0) | ||
153 | goto done; | ||
154 | |||
155 | /* Decrement the number of buffers until allocation succeeds. */ | ||
156 | for (; nbuffers > 0; --nbuffers) { | ||
157 | mem = vmalloc_32(nbuffers * bufsize); | ||
158 | if (mem != NULL) | ||
159 | break; | ||
160 | } | ||
161 | |||
162 | if (mem == NULL) { | ||
163 | ret = -ENOMEM; | ||
164 | goto done; | ||
165 | } | ||
166 | |||
167 | for (i = 0; i < nbuffers; ++i) { | ||
168 | memset(&queue->buffer[i], 0, sizeof queue->buffer[i]); | ||
169 | queue->buffer[i].buf.index = i; | ||
170 | queue->buffer[i].buf.m.offset = i * bufsize; | ||
171 | queue->buffer[i].buf.length = buflength; | ||
172 | queue->buffer[i].buf.type = queue->type; | ||
173 | queue->buffer[i].buf.field = V4L2_FIELD_NONE; | ||
174 | queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP; | ||
175 | queue->buffer[i].buf.flags = 0; | ||
176 | init_waitqueue_head(&queue->buffer[i].wait); | ||
177 | } | ||
178 | |||
179 | queue->mem = mem; | ||
180 | queue->count = nbuffers; | ||
181 | queue->buf_size = bufsize; | ||
182 | ret = nbuffers; | ||
183 | |||
184 | done: | ||
185 | mutex_unlock(&queue->mutex); | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Check if buffers have been allocated. | ||
191 | */ | ||
192 | int uvc_queue_allocated(struct uvc_video_queue *queue) | ||
193 | { | ||
194 | int allocated; | ||
195 | |||
196 | mutex_lock(&queue->mutex); | ||
197 | allocated = queue->count != 0; | ||
198 | mutex_unlock(&queue->mutex); | ||
199 | |||
200 | return allocated; | ||
201 | } | ||
202 | |||
203 | static void __uvc_query_buffer(struct uvc_buffer *buf, | ||
204 | struct v4l2_buffer *v4l2_buf) | ||
205 | { | ||
206 | memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf); | ||
207 | |||
208 | if (buf->vma_use_count) | ||
209 | v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED; | ||
210 | |||
211 | switch (buf->state) { | ||
212 | case UVC_BUF_STATE_ERROR: | ||
213 | case UVC_BUF_STATE_DONE: | ||
214 | v4l2_buf->flags |= V4L2_BUF_FLAG_DONE; | ||
215 | break; | ||
216 | case UVC_BUF_STATE_QUEUED: | ||
217 | case UVC_BUF_STATE_ACTIVE: | ||
218 | case UVC_BUF_STATE_READY: | ||
219 | v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED; | ||
220 | break; | ||
221 | case UVC_BUF_STATE_IDLE: | ||
222 | default: | ||
223 | break; | ||
224 | } | ||
225 | } | ||
226 | |||
227 | int uvc_query_buffer(struct uvc_video_queue *queue, | ||
228 | struct v4l2_buffer *v4l2_buf) | ||
229 | { | ||
230 | int ret = 0; | ||
231 | |||
232 | mutex_lock(&queue->mutex); | ||
233 | if (v4l2_buf->index >= queue->count) { | ||
234 | ret = -EINVAL; | ||
235 | goto done; | ||
236 | } | ||
237 | |||
238 | __uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf); | ||
239 | |||
240 | done: | ||
241 | mutex_unlock(&queue->mutex); | ||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Queue a video buffer. Attempting to queue a buffer that has already been | ||
247 | * queued will return -EINVAL. | ||
248 | */ | ||
249 | int uvc_queue_buffer(struct uvc_video_queue *queue, | ||
250 | struct v4l2_buffer *v4l2_buf) | ||
251 | { | ||
252 | struct uvc_buffer *buf; | ||
253 | unsigned long flags; | ||
254 | int ret = 0; | ||
255 | |||
256 | uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index); | ||
257 | |||
258 | if (v4l2_buf->type != queue->type || | ||
259 | v4l2_buf->memory != V4L2_MEMORY_MMAP) { | ||
260 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) " | ||
261 | "and/or memory (%u).\n", v4l2_buf->type, | ||
262 | v4l2_buf->memory); | ||
263 | return -EINVAL; | ||
264 | } | ||
265 | |||
266 | mutex_lock(&queue->mutex); | ||
267 | if (v4l2_buf->index >= queue->count) { | ||
268 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n"); | ||
269 | ret = -EINVAL; | ||
270 | goto done; | ||
271 | } | ||
272 | |||
273 | buf = &queue->buffer[v4l2_buf->index]; | ||
274 | if (buf->state != UVC_BUF_STATE_IDLE) { | ||
275 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state " | ||
276 | "(%u).\n", buf->state); | ||
277 | ret = -EINVAL; | ||
278 | goto done; | ||
279 | } | ||
280 | |||
281 | if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && | ||
282 | v4l2_buf->bytesused > buf->buf.length) { | ||
283 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); | ||
284 | ret = -EINVAL; | ||
285 | goto done; | ||
286 | } | ||
287 | |||
288 | spin_lock_irqsave(&queue->irqlock, flags); | ||
289 | if (queue->flags & UVC_QUEUE_DISCONNECTED) { | ||
290 | spin_unlock_irqrestore(&queue->irqlock, flags); | ||
291 | ret = -ENODEV; | ||
292 | goto done; | ||
293 | } | ||
294 | buf->state = UVC_BUF_STATE_QUEUED; | ||
295 | if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) | ||
296 | buf->buf.bytesused = 0; | ||
297 | else | ||
298 | buf->buf.bytesused = v4l2_buf->bytesused; | ||
299 | |||
300 | list_add_tail(&buf->stream, &queue->mainqueue); | ||
301 | list_add_tail(&buf->queue, &queue->irqqueue); | ||
302 | spin_unlock_irqrestore(&queue->irqlock, flags); | ||
303 | |||
304 | done: | ||
305 | mutex_unlock(&queue->mutex); | ||
306 | return ret; | ||
307 | } | ||
308 | |||
309 | static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking) | ||
310 | { | ||
311 | if (nonblocking) { | ||
312 | return (buf->state != UVC_BUF_STATE_QUEUED && | ||
313 | buf->state != UVC_BUF_STATE_ACTIVE && | ||
314 | buf->state != UVC_BUF_STATE_READY) | ||
315 | ? 0 : -EAGAIN; | ||
316 | } | ||
317 | |||
318 | return wait_event_interruptible(buf->wait, | ||
319 | buf->state != UVC_BUF_STATE_QUEUED && | ||
320 | buf->state != UVC_BUF_STATE_ACTIVE && | ||
321 | buf->state != UVC_BUF_STATE_READY); | ||
322 | } | ||
323 | |||
324 | /* | ||
325 | * Dequeue a video buffer. If nonblocking is false, block until a buffer is | ||
326 | * available. | ||
327 | */ | ||
328 | int uvc_dequeue_buffer(struct uvc_video_queue *queue, | ||
329 | struct v4l2_buffer *v4l2_buf, int nonblocking) | ||
330 | { | ||
331 | struct uvc_buffer *buf; | ||
332 | int ret = 0; | ||
333 | |||
334 | if (v4l2_buf->type != queue->type || | ||
335 | v4l2_buf->memory != V4L2_MEMORY_MMAP) { | ||
336 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) " | ||
337 | "and/or memory (%u).\n", v4l2_buf->type, | ||
338 | v4l2_buf->memory); | ||
339 | return -EINVAL; | ||
340 | } | ||
341 | |||
342 | mutex_lock(&queue->mutex); | ||
343 | if (list_empty(&queue->mainqueue)) { | ||
344 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n"); | ||
345 | ret = -EINVAL; | ||
346 | goto done; | ||
347 | } | ||
348 | |||
349 | buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream); | ||
350 | if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0) | ||
351 | goto done; | ||
352 | |||
353 | uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n", | ||
354 | buf->buf.index, buf->state, buf->buf.bytesused); | ||
355 | |||
356 | switch (buf->state) { | ||
357 | case UVC_BUF_STATE_ERROR: | ||
358 | uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data " | ||
359 | "(transmission error).\n"); | ||
360 | ret = -EIO; | ||
361 | case UVC_BUF_STATE_DONE: | ||
362 | buf->state = UVC_BUF_STATE_IDLE; | ||
363 | break; | ||
364 | |||
365 | case UVC_BUF_STATE_IDLE: | ||
366 | case UVC_BUF_STATE_QUEUED: | ||
367 | case UVC_BUF_STATE_ACTIVE: | ||
368 | case UVC_BUF_STATE_READY: | ||
369 | default: | ||
370 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u " | ||
371 | "(driver bug?).\n", buf->state); | ||
372 | ret = -EINVAL; | ||
373 | goto done; | ||
374 | } | ||
375 | |||
376 | list_del(&buf->stream); | ||
377 | __uvc_query_buffer(buf, v4l2_buf); | ||
378 | |||
379 | done: | ||
380 | mutex_unlock(&queue->mutex); | ||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * VMA operations. | ||
386 | */ | ||
387 | static void uvc_vm_open(struct vm_area_struct *vma) | ||
388 | { | ||
389 | struct uvc_buffer *buffer = vma->vm_private_data; | ||
390 | buffer->vma_use_count++; | ||
391 | } | ||
392 | |||
393 | static void uvc_vm_close(struct vm_area_struct *vma) | ||
394 | { | ||
395 | struct uvc_buffer *buffer = vma->vm_private_data; | ||
396 | buffer->vma_use_count--; | ||
397 | } | ||
398 | |||
399 | static const struct vm_operations_struct uvc_vm_ops = { | ||
400 | .open = uvc_vm_open, | ||
401 | .close = uvc_vm_close, | ||
402 | }; | ||
403 | |||
404 | /* | ||
405 | * Memory-map a video buffer. | ||
406 | * | ||
407 | * This function implements video buffers memory mapping and is intended to be | ||
408 | * used by the device mmap handler. | ||
409 | */ | ||
410 | int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) | ||
411 | { | ||
412 | struct uvc_buffer *uninitialized_var(buffer); | ||
413 | struct page *page; | ||
414 | unsigned long addr, start, size; | ||
415 | unsigned int i; | ||
416 | int ret = 0; | ||
417 | |||
418 | start = vma->vm_start; | ||
419 | size = vma->vm_end - vma->vm_start; | ||
420 | |||
421 | mutex_lock(&queue->mutex); | ||
422 | |||
423 | for (i = 0; i < queue->count; ++i) { | ||
424 | buffer = &queue->buffer[i]; | ||
425 | if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) | ||
426 | break; | ||
427 | } | ||
428 | |||
429 | if (i == queue->count || PAGE_ALIGN(size) != queue->buf_size) { | ||
430 | ret = -EINVAL; | ||
431 | goto done; | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * VM_IO marks the area as being an mmaped region for I/O to a | ||
436 | * device. It also prevents the region from being core dumped. | ||
437 | */ | ||
438 | vma->vm_flags |= VM_IO; | ||
439 | |||
440 | addr = (unsigned long)queue->mem + buffer->buf.m.offset; | ||
441 | #ifdef CONFIG_MMU | ||
442 | while (size > 0) { | ||
443 | page = vmalloc_to_page((void *)addr); | ||
444 | if ((ret = vm_insert_page(vma, start, page)) < 0) | ||
445 | goto done; | ||
446 | |||
447 | start += PAGE_SIZE; | ||
448 | addr += PAGE_SIZE; | ||
449 | size -= PAGE_SIZE; | ||
450 | } | ||
451 | #endif | ||
452 | |||
453 | vma->vm_ops = &uvc_vm_ops; | ||
454 | vma->vm_private_data = buffer; | ||
455 | uvc_vm_open(vma); | ||
456 | |||
457 | done: | ||
458 | mutex_unlock(&queue->mutex); | ||
459 | return ret; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Poll the video queue. | ||
464 | * | ||
465 | * This function implements video queue polling and is intended to be used by | ||
466 | * the device poll handler. | ||
467 | */ | ||
468 | unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, | ||
469 | poll_table *wait) | ||
470 | { | ||
471 | struct uvc_buffer *buf; | ||
472 | unsigned int mask = 0; | ||
473 | |||
474 | mutex_lock(&queue->mutex); | ||
475 | if (list_empty(&queue->mainqueue)) { | ||
476 | mask |= POLLERR; | ||
477 | goto done; | ||
478 | } | ||
479 | buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream); | ||
480 | |||
481 | poll_wait(file, &buf->wait, wait); | ||
482 | if (buf->state == UVC_BUF_STATE_DONE || | ||
483 | buf->state == UVC_BUF_STATE_ERROR) { | ||
484 | if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) | ||
485 | mask |= POLLIN | POLLRDNORM; | ||
486 | else | ||
487 | mask |= POLLOUT | POLLWRNORM; | ||
488 | } | ||
489 | |||
490 | done: | ||
491 | mutex_unlock(&queue->mutex); | ||
492 | return mask; | ||
493 | } | ||
494 | |||
495 | #ifndef CONFIG_MMU | ||
496 | /* | ||
497 | * Get unmapped area. | ||
498 | * | ||
499 | * NO-MMU arch need this function to make mmap() work correctly. | ||
500 | */ | ||
501 | unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, | ||
502 | unsigned long pgoff) | ||
503 | { | ||
504 | struct uvc_buffer *buffer; | ||
505 | unsigned int i; | ||
506 | unsigned long ret; | ||
507 | |||
508 | mutex_lock(&queue->mutex); | ||
509 | for (i = 0; i < queue->count; ++i) { | ||
510 | buffer = &queue->buffer[i]; | ||
511 | if ((buffer->buf.m.offset >> PAGE_SHIFT) == pgoff) | ||
512 | break; | ||
513 | } | ||
514 | if (i == queue->count) { | ||
515 | ret = -EINVAL; | ||
516 | goto done; | ||
517 | } | ||
518 | ret = (unsigned long)queue->mem + buffer->buf.m.offset; | ||
519 | done: | ||
520 | mutex_unlock(&queue->mutex); | ||
521 | return ret; | ||
522 | } | ||
523 | #endif | ||
524 | |||
525 | /* | ||
526 | * Enable or disable the video buffers queue. | ||
527 | * | ||
528 | * The queue must be enabled before starting video acquisition and must be | ||
529 | * disabled after stopping it. This ensures that the video buffers queue | ||
530 | * state can be properly initialized before buffers are accessed from the | ||
531 | * interrupt handler. | ||
532 | * | ||
533 | * Enabling the video queue returns -EBUSY if the queue is already enabled. | ||
534 | * | ||
535 | * Disabling the video queue cancels the queue and removes all buffers from | ||
536 | * the main queue. | ||
537 | * | ||
538 | * This function can't be called from interrupt context. Use | ||
539 | * uvc_queue_cancel() instead. | ||
540 | */ | ||
541 | int uvc_queue_enable(struct uvc_video_queue *queue, int enable) | ||
542 | { | ||
543 | unsigned int i; | ||
544 | int ret = 0; | ||
545 | |||
546 | mutex_lock(&queue->mutex); | ||
547 | if (enable) { | ||
548 | if (uvc_queue_streaming(queue)) { | ||
549 | ret = -EBUSY; | ||
550 | goto done; | ||
551 | } | ||
552 | queue->flags |= UVC_QUEUE_STREAMING; | ||
553 | queue->buf_used = 0; | ||
554 | } else { | ||
555 | uvc_queue_cancel(queue, 0); | ||
556 | INIT_LIST_HEAD(&queue->mainqueue); | ||
557 | |||
558 | for (i = 0; i < queue->count; ++i) { | ||
559 | queue->buffer[i].error = 0; | ||
560 | queue->buffer[i].state = UVC_BUF_STATE_IDLE; | ||
561 | } | ||
562 | |||
563 | queue->flags &= ~UVC_QUEUE_STREAMING; | ||
564 | } | ||
565 | |||
566 | done: | ||
567 | mutex_unlock(&queue->mutex); | ||
568 | return ret; | ||
569 | } | ||
570 | |||
571 | /* | ||
572 | * Cancel the video buffers queue. | ||
573 | * | ||
574 | * Cancelling the queue marks all buffers on the irq queue as erroneous, | ||
575 | * wakes them up and removes them from the queue. | ||
576 | * | ||
577 | * If the disconnect parameter is set, further calls to uvc_queue_buffer will | ||
578 | * fail with -ENODEV. | ||
579 | * | ||
580 | * This function acquires the irq spinlock and can be called from interrupt | ||
581 | * context. | ||
582 | */ | ||
583 | void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) | ||
584 | { | ||
585 | struct uvc_buffer *buf; | ||
586 | unsigned long flags; | ||
587 | |||
588 | spin_lock_irqsave(&queue->irqlock, flags); | ||
589 | while (!list_empty(&queue->irqqueue)) { | ||
590 | buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, | ||
591 | queue); | ||
592 | list_del(&buf->queue); | ||
593 | buf->state = UVC_BUF_STATE_ERROR; | ||
594 | wake_up(&buf->wait); | ||
595 | } | ||
596 | /* This must be protected by the irqlock spinlock to avoid race | ||
597 | * conditions between uvc_queue_buffer and the disconnection event that | ||
598 | * could result in an interruptible wait in uvc_dequeue_buffer. Do not | ||
599 | * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED | ||
600 | * state outside the queue code. | ||
601 | */ | ||
602 | if (disconnect) | ||
603 | queue->flags |= UVC_QUEUE_DISCONNECTED; | ||
604 | spin_unlock_irqrestore(&queue->irqlock, flags); | ||
605 | } | ||
606 | |||
607 | struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, | ||
608 | struct uvc_buffer *buf) | ||
609 | { | ||
610 | struct uvc_buffer *nextbuf; | ||
611 | unsigned long flags; | ||
612 | |||
613 | if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { | ||
614 | buf->error = 0; | ||
615 | buf->state = UVC_BUF_STATE_QUEUED; | ||
616 | buf->buf.bytesused = 0; | ||
617 | return buf; | ||
618 | } | ||
619 | |||
620 | spin_lock_irqsave(&queue->irqlock, flags); | ||
621 | list_del(&buf->queue); | ||
622 | buf->error = 0; | ||
623 | buf->state = UVC_BUF_STATE_DONE; | ||
624 | if (!list_empty(&queue->irqqueue)) | ||
625 | nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, | ||
626 | queue); | ||
627 | else | ||
628 | nextbuf = NULL; | ||
629 | spin_unlock_irqrestore(&queue->irqlock, flags); | ||
630 | |||
631 | wake_up(&buf->wait); | ||
632 | return nextbuf; | ||
633 | } | ||
634 | |||