diff options
author | Bhupesh Sharma <bhupesh.sharma@st.com> | 2013-03-28 05:41:52 -0400 |
---|---|---|
committer | Felipe Balbi <balbi@ti.com> | 2013-04-02 04:42:48 -0400 |
commit | d692522577c051058efbe9e3c8aef68a4c36e4f7 (patch) | |
tree | 68e82cc467c8c507980037d0bb860ee8dbe4cfed /drivers/usb/gadget/uvc_queue.c | |
parent | 225da3e3cb1f0db9e4cb7fa2a7dc3a360d1cf788 (diff) |
usb: gadget/uvc: Port UVC webcam gadget to use videobuf2 framework
This patch reworks the videobuffer management logic present in the UVC
webcam gadget and ports it to use the "more apt" videobuf2 framework for
video buffer management.
To support routing video data captured from a real V4L2 video capture
device with a "zero copy" operation on videobuffers (as they pass from
the V4L2 domain to UVC domain via a user-space application), we need to
support USER_PTR IO method at the UVC gadget side.
So the V4L2 capture device driver can still continue to use MMAP IO
method and now the user-space application can just pass a pointer to the
video buffers being dequeued from the V4L2 device side while queueing
them at the UVC gadget end. This ensures that we have a "zero-copy"
design as the videobuffers pass from the V4L2 capture device to the UVC
gadget.
Note that there will still be a need to apply UVC specific payload
headers on top of each UVC payload data, which will still require a copy
operation to be performed in the 'encode' routines of the UVC gadget.
This patch also addresses one issue found out while porting the UVC
gadget to videobuf2 framework:
- In case the usb requests queued by the gadget get completed
with a status of -ESHUTDOWN (disconnected from host),
the queue of videobuf2 should be cancelled to ensure that the
application space daemon is not left in a state waiting for
a vb2 to be successfully absorbed at the USB side.
Signed-off-by: Bhupesh Sharma <bhupesh.sharma@st.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Diffstat (limited to 'drivers/usb/gadget/uvc_queue.c')
-rw-r--r-- | drivers/usb/gadget/uvc_queue.c | 532 |
1 files changed, 159 insertions, 373 deletions
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c index 104ae9c81251..31397954c889 100644 --- a/drivers/usb/gadget/uvc_queue.c +++ b/drivers/usb/gadget/uvc_queue.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * (at your option) any later version. | 10 | * (at your option) any later version. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/atomic.h> | ||
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
15 | #include <linux/list.h> | 16 | #include <linux/list.h> |
@@ -18,7 +19,8 @@ | |||
18 | #include <linux/videodev2.h> | 19 | #include <linux/videodev2.h> |
19 | #include <linux/vmalloc.h> | 20 | #include <linux/vmalloc.h> |
20 | #include <linux/wait.h> | 21 | #include <linux/wait.h> |
21 | #include <linux/atomic.h> | 22 | |
23 | #include <media/videobuf2-vmalloc.h> | ||
22 | 24 | ||
23 | #include "uvc.h" | 25 | #include "uvc.h" |
24 | 26 | ||
@@ -28,330 +30,175 @@ | |||
28 | * Video queues is initialized by uvc_queue_init(). The function performs | 30 | * Video queues is initialized by uvc_queue_init(). The function performs |
29 | * basic initialization of the uvc_video_queue struct and never fails. | 31 | * basic initialization of the uvc_video_queue struct and never fails. |
30 | * | 32 | * |
31 | * Video buffer allocation and freeing are performed by uvc_alloc_buffers and | 33 | * Video buffers are managed by videobuf2. The driver uses a mutex to protect |
32 | * uvc_free_buffers respectively. The former acquires the video queue lock, | 34 | * the videobuf2 queue operations by serializing calls to videobuf2 and a |
33 | * while the later must be called with the lock held (so that allocation can | 35 | * spinlock to protect the IRQ queue that holds the buffers to be processed by |
34 | * free previously allocated buffers). Trying to free buffers that are mapped | 36 | * the driver. |
35 | * to user space will return -EBUSY. | ||
36 | * | ||
37 | * Video buffers are managed using two queues. However, unlike most USB video | ||
38 | * drivers that use an in queue and an out queue, we use a main queue to hold | ||
39 | * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to | ||
40 | * hold empty buffers. This design (copied from video-buf) minimizes locking | ||
41 | * in interrupt, as only one queue is shared between interrupt and user | ||
42 | * contexts. | ||
43 | * | ||
44 | * Use cases | ||
45 | * --------- | ||
46 | * | ||
47 | * Unless stated otherwise, all operations that modify the irq buffers queue | ||
48 | * are protected by the irq spinlock. | ||
49 | * | ||
50 | * 1. The user queues the buffers, starts streaming and dequeues a buffer. | ||
51 | * | ||
52 | * The buffers are added to the main and irq queues. Both operations are | ||
53 | * protected by the queue lock, and the later is protected by the irq | ||
54 | * spinlock as well. | ||
55 | * | ||
56 | * The completion handler fetches a buffer from the irq queue and fills it | ||
57 | * with video data. If no buffer is available (irq queue empty), the handler | ||
58 | * returns immediately. | ||
59 | * | ||
60 | * When the buffer is full, the completion handler removes it from the irq | ||
61 | * queue, marks it as ready (UVC_BUF_STATE_DONE) and wakes its wait queue. | ||
62 | * At that point, any process waiting on the buffer will be woken up. If a | ||
63 | * process tries to dequeue a buffer after it has been marked ready, the | ||
64 | * dequeing will succeed immediately. | ||
65 | * | ||
66 | * 2. Buffers are queued, user is waiting on a buffer and the device gets | ||
67 | * disconnected. | ||
68 | * | ||
69 | * When the device is disconnected, the kernel calls the completion handler | ||
70 | * with an appropriate status code. The handler marks all buffers in the | ||
71 | * irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so | ||
72 | * that any process waiting on a buffer gets woken up. | ||
73 | * | ||
74 | * Waking up up the first buffer on the irq list is not enough, as the | ||
75 | * process waiting on the buffer might restart the dequeue operation | ||
76 | * immediately. | ||
77 | * | ||
78 | */ | 37 | */ |
79 | 38 | ||
80 | static void | 39 | /* ----------------------------------------------------------------------------- |
81 | uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type) | 40 | * videobuf2 queue operations |
82 | { | ||
83 | mutex_init(&queue->mutex); | ||
84 | spin_lock_init(&queue->irqlock); | ||
85 | INIT_LIST_HEAD(&queue->mainqueue); | ||
86 | INIT_LIST_HEAD(&queue->irqqueue); | ||
87 | queue->type = type; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Free the video buffers. | ||
92 | * | ||
93 | * This function must be called with the queue lock held. | ||
94 | */ | 41 | */ |
95 | static int uvc_free_buffers(struct uvc_video_queue *queue) | 42 | |
43 | static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, | ||
44 | unsigned int *nbuffers, unsigned int *nplanes, | ||
45 | unsigned int sizes[], void *alloc_ctxs[]) | ||
96 | { | 46 | { |
97 | unsigned int i; | 47 | struct uvc_video_queue *queue = vb2_get_drv_priv(vq); |
48 | struct uvc_video *video = container_of(queue, struct uvc_video, queue); | ||
98 | 49 | ||
99 | for (i = 0; i < queue->count; ++i) { | 50 | if (*nbuffers > UVC_MAX_VIDEO_BUFFERS) |
100 | if (queue->buffer[i].vma_use_count != 0) | 51 | *nbuffers = UVC_MAX_VIDEO_BUFFERS; |
101 | return -EBUSY; | ||
102 | } | ||
103 | 52 | ||
104 | if (queue->count) { | 53 | *nplanes = 1; |
105 | vfree(queue->mem); | 54 | |
106 | queue->count = 0; | 55 | sizes[0] = video->imagesize; |
107 | } | ||
108 | 56 | ||
109 | return 0; | 57 | return 0; |
110 | } | 58 | } |
111 | 59 | ||
112 | /* | 60 | static int uvc_buffer_prepare(struct vb2_buffer *vb) |
113 | * Allocate the video buffers. | ||
114 | * | ||
115 | * Pages are reserved to make sure they will not be swapped, as they will be | ||
116 | * filled in the URB completion handler. | ||
117 | * | ||
118 | * Buffers will be individually mapped, so they must all be page aligned. | ||
119 | */ | ||
120 | static int | ||
121 | uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers, | ||
122 | unsigned int buflength) | ||
123 | { | 61 | { |
124 | unsigned int bufsize = PAGE_ALIGN(buflength); | 62 | struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); |
125 | unsigned int i; | 63 | struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); |
126 | void *mem = NULL; | ||
127 | int ret; | ||
128 | 64 | ||
129 | if (nbuffers > UVC_MAX_VIDEO_BUFFERS) | 65 | if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT && |
130 | nbuffers = UVC_MAX_VIDEO_BUFFERS; | 66 | vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { |
67 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); | ||
68 | return -EINVAL; | ||
69 | } | ||
131 | 70 | ||
132 | mutex_lock(&queue->mutex); | 71 | if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) |
72 | return -ENODEV; | ||
133 | 73 | ||
134 | if ((ret = uvc_free_buffers(queue)) < 0) | 74 | buf->state = UVC_BUF_STATE_QUEUED; |
135 | goto done; | 75 | buf->mem = vb2_plane_vaddr(vb, 0); |
76 | buf->length = vb2_plane_size(vb, 0); | ||
77 | if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) | ||
78 | buf->bytesused = 0; | ||
79 | else | ||
80 | buf->bytesused = vb2_get_plane_payload(vb, 0); | ||
136 | 81 | ||
137 | /* Bail out if no buffers should be allocated. */ | 82 | return 0; |
138 | if (nbuffers == 0) | 83 | } |
139 | goto done; | ||
140 | 84 | ||
141 | /* Decrement the number of buffers until allocation succeeds. */ | 85 | static void uvc_buffer_queue(struct vb2_buffer *vb) |
142 | for (; nbuffers > 0; --nbuffers) { | 86 | { |
143 | mem = vmalloc_32(nbuffers * bufsize); | 87 | struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); |
144 | if (mem != NULL) | 88 | struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); |
145 | break; | 89 | unsigned long flags; |
146 | } | ||
147 | 90 | ||
148 | if (mem == NULL) { | 91 | spin_lock_irqsave(&queue->irqlock, flags); |
149 | ret = -ENOMEM; | ||
150 | goto done; | ||
151 | } | ||
152 | 92 | ||
153 | for (i = 0; i < nbuffers; ++i) { | 93 | if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { |
154 | memset(&queue->buffer[i], 0, sizeof queue->buffer[i]); | 94 | list_add_tail(&buf->queue, &queue->irqqueue); |
155 | queue->buffer[i].buf.index = i; | 95 | } else { |
156 | queue->buffer[i].buf.m.offset = i * bufsize; | 96 | /* If the device is disconnected return the buffer to userspace |
157 | queue->buffer[i].buf.length = buflength; | 97 | * directly. The next QBUF call will fail with -ENODEV. |
158 | queue->buffer[i].buf.type = queue->type; | 98 | */ |
159 | queue->buffer[i].buf.sequence = 0; | 99 | buf->state = UVC_BUF_STATE_ERROR; |
160 | queue->buffer[i].buf.field = V4L2_FIELD_NONE; | 100 | vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); |
161 | queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP; | ||
162 | queue->buffer[i].buf.flags = 0; | ||
163 | init_waitqueue_head(&queue->buffer[i].wait); | ||
164 | } | 101 | } |
165 | 102 | ||
166 | queue->mem = mem; | 103 | spin_unlock_irqrestore(&queue->irqlock, flags); |
167 | queue->count = nbuffers; | ||
168 | queue->buf_size = bufsize; | ||
169 | ret = nbuffers; | ||
170 | |||
171 | done: | ||
172 | mutex_unlock(&queue->mutex); | ||
173 | return ret; | ||
174 | } | 104 | } |
175 | 105 | ||
176 | static void __uvc_query_buffer(struct uvc_buffer *buf, | 106 | static struct vb2_ops uvc_queue_qops = { |
177 | struct v4l2_buffer *v4l2_buf) | 107 | .queue_setup = uvc_queue_setup, |
178 | { | 108 | .buf_prepare = uvc_buffer_prepare, |
179 | memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf); | 109 | .buf_queue = uvc_buffer_queue, |
180 | 110 | }; | |
181 | if (buf->vma_use_count) | ||
182 | v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED; | ||
183 | |||
184 | switch (buf->state) { | ||
185 | case UVC_BUF_STATE_ERROR: | ||
186 | case UVC_BUF_STATE_DONE: | ||
187 | v4l2_buf->flags |= V4L2_BUF_FLAG_DONE; | ||
188 | break; | ||
189 | case UVC_BUF_STATE_QUEUED: | ||
190 | case UVC_BUF_STATE_ACTIVE: | ||
191 | v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED; | ||
192 | break; | ||
193 | case UVC_BUF_STATE_IDLE: | ||
194 | default: | ||
195 | break; | ||
196 | } | ||
197 | } | ||
198 | 111 | ||
199 | static int | 112 | static int uvc_queue_init(struct uvc_video_queue *queue, |
200 | uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf) | 113 | enum v4l2_buf_type type) |
201 | { | 114 | { |
202 | int ret = 0; | 115 | int ret; |
203 | 116 | ||
204 | mutex_lock(&queue->mutex); | 117 | queue->queue.type = type; |
205 | if (v4l2_buf->index >= queue->count) { | 118 | queue->queue.io_modes = VB2_MMAP | VB2_USERPTR; |
206 | ret = -EINVAL; | 119 | queue->queue.drv_priv = queue; |
207 | goto done; | 120 | queue->queue.buf_struct_size = sizeof(struct uvc_buffer); |
208 | } | 121 | queue->queue.ops = &uvc_queue_qops; |
122 | queue->queue.mem_ops = &vb2_vmalloc_memops; | ||
123 | ret = vb2_queue_init(&queue->queue); | ||
124 | if (ret) | ||
125 | return ret; | ||
126 | |||
127 | mutex_init(&queue->mutex); | ||
128 | spin_lock_init(&queue->irqlock); | ||
129 | INIT_LIST_HEAD(&queue->irqqueue); | ||
130 | queue->flags = 0; | ||
209 | 131 | ||
210 | __uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf); | 132 | return 0; |
133 | } | ||
211 | 134 | ||
212 | done: | 135 | /* |
136 | * Free the video buffers. | ||
137 | */ | ||
138 | static void uvc_free_buffers(struct uvc_video_queue *queue) | ||
139 | { | ||
140 | mutex_lock(&queue->mutex); | ||
141 | vb2_queue_release(&queue->queue); | ||
213 | mutex_unlock(&queue->mutex); | 142 | mutex_unlock(&queue->mutex); |
214 | return ret; | ||
215 | } | 143 | } |
216 | 144 | ||
217 | /* | 145 | /* |
218 | * Queue a video buffer. Attempting to queue a buffer that has already been | 146 | * Allocate the video buffers. |
219 | * queued will return -EINVAL. | ||
220 | */ | 147 | */ |
221 | static int | 148 | static int uvc_alloc_buffers(struct uvc_video_queue *queue, |
222 | uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf) | 149 | struct v4l2_requestbuffers *rb) |
223 | { | 150 | { |
224 | struct uvc_buffer *buf; | 151 | int ret; |
225 | unsigned long flags; | ||
226 | int ret = 0; | ||
227 | 152 | ||
228 | uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index); | 153 | mutex_lock(&queue->mutex); |
154 | ret = vb2_reqbufs(&queue->queue, rb); | ||
155 | mutex_unlock(&queue->mutex); | ||
229 | 156 | ||
230 | if (v4l2_buf->type != queue->type || | 157 | return ret ? ret : rb->count; |
231 | v4l2_buf->memory != V4L2_MEMORY_MMAP) { | 158 | } |
232 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) " | ||
233 | "and/or memory (%u).\n", v4l2_buf->type, | ||
234 | v4l2_buf->memory); | ||
235 | return -EINVAL; | ||
236 | } | ||
237 | 159 | ||
238 | mutex_lock(&queue->mutex); | 160 | static int uvc_query_buffer(struct uvc_video_queue *queue, |
239 | if (v4l2_buf->index >= queue->count) { | 161 | struct v4l2_buffer *buf) |
240 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n"); | 162 | { |
241 | ret = -EINVAL; | 163 | int ret; |
242 | goto done; | ||
243 | } | ||
244 | 164 | ||
245 | buf = &queue->buffer[v4l2_buf->index]; | 165 | mutex_lock(&queue->mutex); |
246 | if (buf->state != UVC_BUF_STATE_IDLE) { | 166 | ret = vb2_querybuf(&queue->queue, buf); |
247 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state " | 167 | mutex_unlock(&queue->mutex); |
248 | "(%u).\n", buf->state); | ||
249 | ret = -EINVAL; | ||
250 | goto done; | ||
251 | } | ||
252 | 168 | ||
253 | if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && | 169 | return ret; |
254 | v4l2_buf->bytesused > buf->buf.length) { | 170 | } |
255 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); | ||
256 | ret = -EINVAL; | ||
257 | goto done; | ||
258 | } | ||
259 | 171 | ||
260 | if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) | 172 | static int uvc_queue_buffer(struct uvc_video_queue *queue, |
261 | buf->buf.bytesused = 0; | 173 | struct v4l2_buffer *buf) |
262 | else | 174 | { |
263 | buf->buf.bytesused = v4l2_buf->bytesused; | 175 | unsigned long flags; |
176 | int ret; | ||
264 | 177 | ||
178 | mutex_lock(&queue->mutex); | ||
179 | ret = vb2_qbuf(&queue->queue, buf); | ||
265 | spin_lock_irqsave(&queue->irqlock, flags); | 180 | spin_lock_irqsave(&queue->irqlock, flags); |
266 | if (queue->flags & UVC_QUEUE_DISCONNECTED) { | ||
267 | spin_unlock_irqrestore(&queue->irqlock, flags); | ||
268 | ret = -ENODEV; | ||
269 | goto done; | ||
270 | } | ||
271 | buf->state = UVC_BUF_STATE_QUEUED; | ||
272 | |||
273 | ret = (queue->flags & UVC_QUEUE_PAUSED) != 0; | 181 | ret = (queue->flags & UVC_QUEUE_PAUSED) != 0; |
274 | queue->flags &= ~UVC_QUEUE_PAUSED; | 182 | queue->flags &= ~UVC_QUEUE_PAUSED; |
275 | |||
276 | list_add_tail(&buf->stream, &queue->mainqueue); | ||
277 | list_add_tail(&buf->queue, &queue->irqqueue); | ||
278 | spin_unlock_irqrestore(&queue->irqlock, flags); | 183 | spin_unlock_irqrestore(&queue->irqlock, flags); |
279 | |||
280 | done: | ||
281 | mutex_unlock(&queue->mutex); | 184 | mutex_unlock(&queue->mutex); |
282 | return ret; | ||
283 | } | ||
284 | |||
285 | static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking) | ||
286 | { | ||
287 | if (nonblocking) { | ||
288 | return (buf->state != UVC_BUF_STATE_QUEUED && | ||
289 | buf->state != UVC_BUF_STATE_ACTIVE) | ||
290 | ? 0 : -EAGAIN; | ||
291 | } | ||
292 | 185 | ||
293 | return wait_event_interruptible(buf->wait, | 186 | return ret; |
294 | buf->state != UVC_BUF_STATE_QUEUED && | ||
295 | buf->state != UVC_BUF_STATE_ACTIVE); | ||
296 | } | 187 | } |
297 | 188 | ||
298 | /* | 189 | /* |
299 | * Dequeue a video buffer. If nonblocking is false, block until a buffer is | 190 | * Dequeue a video buffer. If nonblocking is false, block until a buffer is |
300 | * available. | 191 | * available. |
301 | */ | 192 | */ |
302 | static int | 193 | static int uvc_dequeue_buffer(struct uvc_video_queue *queue, |
303 | uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *v4l2_buf, | 194 | struct v4l2_buffer *buf, int nonblocking) |
304 | int nonblocking) | ||
305 | { | 195 | { |
306 | struct uvc_buffer *buf; | 196 | int ret; |
307 | int ret = 0; | ||
308 | |||
309 | if (v4l2_buf->type != queue->type || | ||
310 | v4l2_buf->memory != V4L2_MEMORY_MMAP) { | ||
311 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) " | ||
312 | "and/or memory (%u).\n", v4l2_buf->type, | ||
313 | v4l2_buf->memory); | ||
314 | return -EINVAL; | ||
315 | } | ||
316 | 197 | ||
317 | mutex_lock(&queue->mutex); | 198 | mutex_lock(&queue->mutex); |
318 | if (list_empty(&queue->mainqueue)) { | 199 | ret = vb2_dqbuf(&queue->queue, buf, nonblocking); |
319 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n"); | ||
320 | ret = -EINVAL; | ||
321 | goto done; | ||
322 | } | ||
323 | |||
324 | buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream); | ||
325 | if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0) | ||
326 | goto done; | ||
327 | |||
328 | uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n", | ||
329 | buf->buf.index, buf->state, buf->buf.bytesused); | ||
330 | |||
331 | switch (buf->state) { | ||
332 | case UVC_BUF_STATE_ERROR: | ||
333 | uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data " | ||
334 | "(transmission error).\n"); | ||
335 | ret = -EIO; | ||
336 | case UVC_BUF_STATE_DONE: | ||
337 | buf->state = UVC_BUF_STATE_IDLE; | ||
338 | break; | ||
339 | |||
340 | case UVC_BUF_STATE_IDLE: | ||
341 | case UVC_BUF_STATE_QUEUED: | ||
342 | case UVC_BUF_STATE_ACTIVE: | ||
343 | default: | ||
344 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u " | ||
345 | "(driver bug?).\n", buf->state); | ||
346 | ret = -EINVAL; | ||
347 | goto done; | ||
348 | } | ||
349 | |||
350 | list_del(&buf->stream); | ||
351 | __uvc_query_buffer(buf, v4l2_buf); | ||
352 | |||
353 | done: | ||
354 | mutex_unlock(&queue->mutex); | 200 | mutex_unlock(&queue->mutex); |
201 | |||
355 | return ret; | 202 | return ret; |
356 | } | 203 | } |
357 | 204 | ||
@@ -361,103 +208,27 @@ done: | |||
361 | * This function implements video queue polling and is intended to be used by | 208 | * This function implements video queue polling and is intended to be used by |
362 | * the device poll handler. | 209 | * the device poll handler. |
363 | */ | 210 | */ |
364 | static unsigned int | 211 | static unsigned int uvc_queue_poll(struct uvc_video_queue *queue, |
365 | uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, | 212 | struct file *file, poll_table *wait) |
366 | poll_table *wait) | ||
367 | { | 213 | { |
368 | struct uvc_buffer *buf; | 214 | unsigned int ret; |
369 | unsigned int mask = 0; | ||
370 | 215 | ||
371 | mutex_lock(&queue->mutex); | 216 | mutex_lock(&queue->mutex); |
372 | if (list_empty(&queue->mainqueue)) | 217 | ret = vb2_poll(&queue->queue, file, wait); |
373 | goto done; | ||
374 | |||
375 | buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream); | ||
376 | |||
377 | poll_wait(file, &buf->wait, wait); | ||
378 | if (buf->state == UVC_BUF_STATE_DONE || | ||
379 | buf->state == UVC_BUF_STATE_ERROR) | ||
380 | mask |= POLLOUT | POLLWRNORM; | ||
381 | |||
382 | done: | ||
383 | mutex_unlock(&queue->mutex); | 218 | mutex_unlock(&queue->mutex); |
384 | return mask; | ||
385 | } | ||
386 | 219 | ||
387 | /* | 220 | return ret; |
388 | * VMA operations. | ||
389 | */ | ||
390 | static void uvc_vm_open(struct vm_area_struct *vma) | ||
391 | { | ||
392 | struct uvc_buffer *buffer = vma->vm_private_data; | ||
393 | buffer->vma_use_count++; | ||
394 | } | ||
395 | |||
396 | static void uvc_vm_close(struct vm_area_struct *vma) | ||
397 | { | ||
398 | struct uvc_buffer *buffer = vma->vm_private_data; | ||
399 | buffer->vma_use_count--; | ||
400 | } | 221 | } |
401 | 222 | ||
402 | static struct vm_operations_struct uvc_vm_ops = { | 223 | static int uvc_queue_mmap(struct uvc_video_queue *queue, |
403 | .open = uvc_vm_open, | 224 | struct vm_area_struct *vma) |
404 | .close = uvc_vm_close, | ||
405 | }; | ||
406 | |||
407 | /* | ||
408 | * Memory-map a buffer. | ||
409 | * | ||
410 | * This function implements video buffer memory mapping and is intended to be | ||
411 | * used by the device mmap handler. | ||
412 | */ | ||
413 | static int | ||
414 | uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) | ||
415 | { | 225 | { |
416 | struct uvc_buffer *uninitialized_var(buffer); | 226 | int ret; |
417 | struct page *page; | ||
418 | unsigned long addr, start, size; | ||
419 | unsigned int i; | ||
420 | int ret = 0; | ||
421 | |||
422 | start = vma->vm_start; | ||
423 | size = vma->vm_end - vma->vm_start; | ||
424 | 227 | ||
425 | mutex_lock(&queue->mutex); | 228 | mutex_lock(&queue->mutex); |
426 | 229 | ret = vb2_mmap(&queue->queue, vma); | |
427 | for (i = 0; i < queue->count; ++i) { | ||
428 | buffer = &queue->buffer[i]; | ||
429 | if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) | ||
430 | break; | ||
431 | } | ||
432 | |||
433 | if (i == queue->count || size != queue->buf_size) { | ||
434 | ret = -EINVAL; | ||
435 | goto done; | ||
436 | } | ||
437 | |||
438 | /* | ||
439 | * VM_IO marks the area as being an mmaped region for I/O to a | ||
440 | * device. It also prevents the region from being core dumped. | ||
441 | */ | ||
442 | vma->vm_flags |= VM_IO; | ||
443 | |||
444 | addr = (unsigned long)queue->mem + buffer->buf.m.offset; | ||
445 | while (size > 0) { | ||
446 | page = vmalloc_to_page((void *)addr); | ||
447 | if ((ret = vm_insert_page(vma, start, page)) < 0) | ||
448 | goto done; | ||
449 | |||
450 | start += PAGE_SIZE; | ||
451 | addr += PAGE_SIZE; | ||
452 | size -= PAGE_SIZE; | ||
453 | } | ||
454 | |||
455 | vma->vm_ops = &uvc_vm_ops; | ||
456 | vma->vm_private_data = buffer; | ||
457 | uvc_vm_open(vma); | ||
458 | |||
459 | done: | ||
460 | mutex_unlock(&queue->mutex); | 230 | mutex_unlock(&queue->mutex); |
231 | |||
461 | return ret; | 232 | return ret; |
462 | } | 233 | } |
463 | 234 | ||
@@ -484,7 +255,7 @@ static void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) | |||
484 | queue); | 255 | queue); |
485 | list_del(&buf->queue); | 256 | list_del(&buf->queue); |
486 | buf->state = UVC_BUF_STATE_ERROR; | 257 | buf->state = UVC_BUF_STATE_ERROR; |
487 | wake_up(&buf->wait); | 258 | vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); |
488 | } | 259 | } |
489 | /* This must be protected by the irqlock spinlock to avoid race | 260 | /* This must be protected by the irqlock spinlock to avoid race |
490 | * conditions between uvc_queue_buffer and the disconnection event that | 261 | * conditions between uvc_queue_buffer and the disconnection event that |
@@ -516,26 +287,33 @@ static void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) | |||
516 | */ | 287 | */ |
517 | static int uvc_queue_enable(struct uvc_video_queue *queue, int enable) | 288 | static int uvc_queue_enable(struct uvc_video_queue *queue, int enable) |
518 | { | 289 | { |
519 | unsigned int i; | 290 | unsigned long flags; |
520 | int ret = 0; | 291 | int ret = 0; |
521 | 292 | ||
522 | mutex_lock(&queue->mutex); | 293 | mutex_lock(&queue->mutex); |
523 | if (enable) { | 294 | if (enable) { |
524 | if (uvc_queue_streaming(queue)) { | 295 | ret = vb2_streamon(&queue->queue, queue->queue.type); |
525 | ret = -EBUSY; | 296 | if (ret < 0) |
526 | goto done; | 297 | goto done; |
527 | } | 298 | |
528 | queue->sequence = 0; | 299 | queue->sequence = 0; |
529 | queue->flags |= UVC_QUEUE_STREAMING; | ||
530 | queue->buf_used = 0; | 300 | queue->buf_used = 0; |
531 | } else { | 301 | } else { |
532 | uvc_queue_cancel(queue, 0); | 302 | ret = vb2_streamoff(&queue->queue, queue->queue.type); |
533 | INIT_LIST_HEAD(&queue->mainqueue); | 303 | if (ret < 0) |
304 | goto done; | ||
534 | 305 | ||
535 | for (i = 0; i < queue->count; ++i) | 306 | spin_lock_irqsave(&queue->irqlock, flags); |
536 | queue->buffer[i].state = UVC_BUF_STATE_IDLE; | 307 | INIT_LIST_HEAD(&queue->irqqueue); |
537 | 308 | ||
538 | queue->flags &= ~UVC_QUEUE_STREAMING; | 309 | /* |
310 | * FIXME: We need to clear the DISCONNECTED flag to ensure that | ||
311 | * applications will be able to queue buffers for the next | ||
312 | * streaming run. However, clearing it here doesn't guarantee | ||
313 | * that the device will be reconnected in the meantime. | ||
314 | */ | ||
315 | queue->flags &= ~UVC_QUEUE_DISCONNECTED; | ||
316 | spin_unlock_irqrestore(&queue->irqlock, flags); | ||
539 | } | 317 | } |
540 | 318 | ||
541 | done: | 319 | done: |
@@ -544,15 +322,15 @@ done: | |||
544 | } | 322 | } |
545 | 323 | ||
546 | /* called with &queue_irqlock held.. */ | 324 | /* called with &queue_irqlock held.. */ |
547 | static struct uvc_buffer * | 325 | static struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, |
548 | uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) | 326 | struct uvc_buffer *buf) |
549 | { | 327 | { |
550 | struct uvc_buffer *nextbuf; | 328 | struct uvc_buffer *nextbuf; |
551 | 329 | ||
552 | if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) && | 330 | if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) && |
553 | buf->buf.length != buf->buf.bytesused) { | 331 | buf->length != buf->bytesused) { |
554 | buf->state = UVC_BUF_STATE_QUEUED; | 332 | buf->state = UVC_BUF_STATE_QUEUED; |
555 | buf->buf.bytesused = 0; | 333 | vb2_set_plane_payload(&buf->buf, 0, 0); |
556 | return buf; | 334 | return buf; |
557 | } | 335 | } |
558 | 336 | ||
@@ -563,10 +341,18 @@ uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) | |||
563 | else | 341 | else |
564 | nextbuf = NULL; | 342 | nextbuf = NULL; |
565 | 343 | ||
566 | buf->buf.sequence = queue->sequence++; | 344 | /* |
567 | do_gettimeofday(&buf->buf.timestamp); | 345 | * FIXME: with videobuf2, the sequence number or timestamp fields |
346 | * are valid only for video capture devices and the UVC gadget usually | ||
347 | * is a video output device. Keeping these until the specs are clear on | ||
348 | * this aspect. | ||
349 | */ | ||
350 | buf->buf.v4l2_buf.sequence = queue->sequence++; | ||
351 | do_gettimeofday(&buf->buf.v4l2_buf.timestamp); | ||
352 | |||
353 | vb2_set_plane_payload(&buf->buf, 0, buf->bytesused); | ||
354 | vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE); | ||
568 | 355 | ||
569 | wake_up(&buf->wait); | ||
570 | return nextbuf; | 356 | return nextbuf; |
571 | } | 357 | } |
572 | 358 | ||