aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/gadget/uvc_queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/gadget/uvc_queue.c')
-rw-r--r--drivers/usb/gadget/uvc_queue.c153
1 files changed, 79 insertions, 74 deletions
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index 43891991bf21..f7395ac5dc17 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -78,7 +78,8 @@
78 * 78 *
79 */ 79 */
80 80
81void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type) 81static void
82uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
82{ 83{
83 mutex_init(&queue->mutex); 84 mutex_init(&queue->mutex);
84 spin_lock_init(&queue->irqlock); 85 spin_lock_init(&queue->irqlock);
@@ -88,6 +89,28 @@ void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
88} 89}
89 90
90/* 91/*
92 * Free the video buffers.
93 *
94 * This function must be called with the queue lock held.
95 */
96static int uvc_free_buffers(struct uvc_video_queue *queue)
97{
98 unsigned int i;
99
100 for (i = 0; i < queue->count; ++i) {
101 if (queue->buffer[i].vma_use_count != 0)
102 return -EBUSY;
103 }
104
105 if (queue->count) {
106 vfree(queue->mem);
107 queue->count = 0;
108 }
109
110 return 0;
111}
112
113/*
91 * Allocate the video buffers. 114 * Allocate the video buffers.
92 * 115 *
93 * Pages are reserved to make sure they will not be swapped, as they will be 116 * Pages are reserved to make sure they will not be swapped, as they will be
@@ -95,8 +118,9 @@ void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
95 * 118 *
96 * Buffers will be individually mapped, so they must all be page aligned. 119 * Buffers will be individually mapped, so they must all be page aligned.
97 */ 120 */
98int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers, 121static int
99 unsigned int buflength) 122uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
123 unsigned int buflength)
100{ 124{
101 unsigned int bufsize = PAGE_ALIGN(buflength); 125 unsigned int bufsize = PAGE_ALIGN(buflength);
102 unsigned int i; 126 unsigned int i;
@@ -150,28 +174,6 @@ done:
150 return ret; 174 return ret;
151} 175}
152 176
153/*
154 * Free the video buffers.
155 *
156 * This function must be called with the queue lock held.
157 */
158int uvc_free_buffers(struct uvc_video_queue *queue)
159{
160 unsigned int i;
161
162 for (i = 0; i < queue->count; ++i) {
163 if (queue->buffer[i].vma_use_count != 0)
164 return -EBUSY;
165 }
166
167 if (queue->count) {
168 vfree(queue->mem);
169 queue->count = 0;
170 }
171
172 return 0;
173}
174
175static void __uvc_query_buffer(struct uvc_buffer *buf, 177static void __uvc_query_buffer(struct uvc_buffer *buf,
176 struct v4l2_buffer *v4l2_buf) 178 struct v4l2_buffer *v4l2_buf)
177{ 179{
@@ -195,8 +197,8 @@ static void __uvc_query_buffer(struct uvc_buffer *buf,
195 } 197 }
196} 198}
197 199
198int uvc_query_buffer(struct uvc_video_queue *queue, 200static int
199 struct v4l2_buffer *v4l2_buf) 201uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf)
200{ 202{
201 int ret = 0; 203 int ret = 0;
202 204
@@ -217,8 +219,8 @@ done:
217 * Queue a video buffer. Attempting to queue a buffer that has already been 219 * Queue a video buffer. Attempting to queue a buffer that has already been
218 * queued will return -EINVAL. 220 * queued will return -EINVAL.
219 */ 221 */
220int uvc_queue_buffer(struct uvc_video_queue *queue, 222static int
221 struct v4l2_buffer *v4l2_buf) 223uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf)
222{ 224{
223 struct uvc_buffer *buf; 225 struct uvc_buffer *buf;
224 unsigned long flags; 226 unsigned long flags;
@@ -298,8 +300,9 @@ static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
298 * Dequeue a video buffer. If nonblocking is false, block until a buffer is 300 * Dequeue a video buffer. If nonblocking is false, block until a buffer is
299 * available. 301 * available.
300 */ 302 */
301int uvc_dequeue_buffer(struct uvc_video_queue *queue, 303static int
302 struct v4l2_buffer *v4l2_buf, int nonblocking) 304uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf,
305 int nonblocking)
303{ 306{
304 struct uvc_buffer *buf; 307 struct uvc_buffer *buf;
305 int ret = 0; 308 int ret = 0;
@@ -359,8 +362,9 @@ done:
359 * This function implements video queue polling and is intended to be used by 362 * This function implements video queue polling and is intended to be used by
360 * the device poll handler. 363 * the device poll handler.
361 */ 364 */
362unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, 365static unsigned int
363 poll_table *wait) 366uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
367 poll_table *wait)
364{ 368{
365 struct uvc_buffer *buf; 369 struct uvc_buffer *buf;
366 unsigned int mask = 0; 370 unsigned int mask = 0;
@@ -407,7 +411,8 @@ static struct vm_operations_struct uvc_vm_ops = {
407 * This function implements video buffer memory mapping and is intended to be 411 * This function implements video buffer memory mapping and is intended to be
408 * used by the device mmap handler. 412 * used by the device mmap handler.
409 */ 413 */
410int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) 414static int
415uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
411{ 416{
412 struct uvc_buffer *uninitialized_var(buffer); 417 struct uvc_buffer *uninitialized_var(buffer);
413 struct page *page; 418 struct page *page;
@@ -458,6 +463,42 @@ done:
458} 463}
459 464
460/* 465/*
466 * Cancel the video buffers queue.
467 *
468 * Cancelling the queue marks all buffers on the irq queue as erroneous,
469 * wakes them up and removes them from the queue.
470 *
471 * If the disconnect parameter is set, further calls to uvc_queue_buffer will
472 * fail with -ENODEV.
473 *
474 * This function acquires the irq spinlock and can be called from interrupt
475 * context.
476 */
477static void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
478{
479 struct uvc_buffer *buf;
480 unsigned long flags;
481
482 spin_lock_irqsave(&queue->irqlock, flags);
483 while (!list_empty(&queue->irqqueue)) {
484 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
485 queue);
486 list_del(&buf->queue);
487 buf->state = UVC_BUF_STATE_ERROR;
488 wake_up(&buf->wait);
489 }
490 /* This must be protected by the irqlock spinlock to avoid race
491 * conditions between uvc_queue_buffer and the disconnection event that
492 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
493 * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
494 * state outside the queue code.
495 */
496 if (disconnect)
497 queue->flags |= UVC_QUEUE_DISCONNECTED;
498 spin_unlock_irqrestore(&queue->irqlock, flags);
499}
500
501/*
461 * Enable or disable the video buffers queue. 502 * Enable or disable the video buffers queue.
462 * 503 *
463 * The queue must be enabled before starting video acquisition and must be 504 * The queue must be enabled before starting video acquisition and must be
@@ -474,7 +515,7 @@ done:
474 * This function can't be called from interrupt context. Use 515 * This function can't be called from interrupt context. Use
475 * uvc_queue_cancel() instead. 516 * uvc_queue_cancel() instead.
476 */ 517 */
477int uvc_queue_enable(struct uvc_video_queue *queue, int enable) 518static int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
478{ 519{
479 unsigned int i; 520 unsigned int i;
480 int ret = 0; 521 int ret = 0;
@@ -503,44 +544,8 @@ done:
503 return ret; 544 return ret;
504} 545}
505 546
506/* 547static struct uvc_buffer *
507 * Cancel the video buffers queue. 548uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf)
508 *
509 * Cancelling the queue marks all buffers on the irq queue as erroneous,
510 * wakes them up and removes them from the queue.
511 *
512 * If the disconnect parameter is set, further calls to uvc_queue_buffer will
513 * fail with -ENODEV.
514 *
515 * This function acquires the irq spinlock and can be called from interrupt
516 * context.
517 */
518void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
519{
520 struct uvc_buffer *buf;
521 unsigned long flags;
522
523 spin_lock_irqsave(&queue->irqlock, flags);
524 while (!list_empty(&queue->irqqueue)) {
525 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
526 queue);
527 list_del(&buf->queue);
528 buf->state = UVC_BUF_STATE_ERROR;
529 wake_up(&buf->wait);
530 }
531 /* This must be protected by the irqlock spinlock to avoid race
532 * conditions between uvc_queue_buffer and the disconnection event that
533 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
534 * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
535 * state outside the queue code.
536 */
537 if (disconnect)
538 queue->flags |= UVC_QUEUE_DISCONNECTED;
539 spin_unlock_irqrestore(&queue->irqlock, flags);
540}
541
542struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
543 struct uvc_buffer *buf)
544{ 549{
545 struct uvc_buffer *nextbuf; 550 struct uvc_buffer *nextbuf;
546 unsigned long flags; 551 unsigned long flags;
@@ -568,7 +573,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
568 return nextbuf; 573 return nextbuf;
569} 574}
570 575
571struct uvc_buffer *uvc_queue_head(struct uvc_video_queue *queue) 576static struct uvc_buffer *uvc_queue_head(struct uvc_video_queue *queue)
572{ 577{
573 struct uvc_buffer *buf = NULL; 578 struct uvc_buffer *buf = NULL;
574 579