diff options
author | Mauro Carvalho Chehab <mchehab@redhat.com> | 2009-03-03 04:14:34 -0500 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2009-04-06 20:44:01 -0400 |
commit | 84b5dbf39ed2f51224841bbbf08439158d69d427 (patch) | |
tree | b24963462dc1ad93860645d8729d1ddfc6ce526e /drivers/media/video/cx231xx/cx231xx-vbi.c | |
parent | e0d3bafd02586cfde286c320f56906fd9fa8d256 (diff) |
V4L/DVB (10955): cx231xx: CodingStyle automatic fixes with Lindent
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/media/video/cx231xx/cx231xx-vbi.c')
-rw-r--r-- | drivers/media/video/cx231xx/cx231xx-vbi.c | 555 |
1 files changed, 287 insertions, 268 deletions
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.c b/drivers/media/video/cx231xx/cx231xx-vbi.c index e370160973f4..87a77d53faa6 100644 --- a/drivers/media/video/cx231xx/cx231xx-vbi.c +++ b/drivers/media/video/cx231xx/cx231xx-vbi.c | |||
@@ -2,7 +2,7 @@ | |||
2 | cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices | 2 | cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices |
3 | 3 | ||
4 | Copyright (C) 2008 <srinivasa.deevi at conexant dot com> | 4 | Copyright (C) 2008 <srinivasa.deevi at conexant dot com> |
5 | Based on cx88 driver | 5 | Based on cx88 driver |
6 | 6 | ||
7 | This program is free software; you can redistribute it and/or modify | 7 | This program is free software; you can redistribute it and/or modify |
8 | it under the terms of the GNU General Public License as published by | 8 | it under the terms of the GNU General Public License as published by |
@@ -39,8 +39,7 @@ | |||
39 | #include "cx231xx.h" | 39 | #include "cx231xx.h" |
40 | #include "cx231xx-vbi.h" | 40 | #include "cx231xx-vbi.h" |
41 | 41 | ||
42 | static inline void print_err_status(struct cx231xx *dev, | 42 | static inline void print_err_status(struct cx231xx *dev, int packet, int status) |
43 | int packet, int status) | ||
44 | { | 43 | { |
45 | char *errmsg = "Unknown"; | 44 | char *errmsg = "Unknown"; |
46 | 45 | ||
@@ -71,10 +70,11 @@ static inline void print_err_status(struct cx231xx *dev, | |||
71 | break; | 70 | break; |
72 | } | 71 | } |
73 | if (packet < 0) { | 72 | if (packet < 0) { |
74 | cx231xx_err(DRIVER_NAME "URB status %d [%s].\n", status, errmsg); | 73 | cx231xx_err(DRIVER_NAME "URB status %d [%s].\n", status, |
74 | errmsg); | ||
75 | } else { | 75 | } else { |
76 | cx231xx_err(DRIVER_NAME "URB packet %d, status %d [%s].\n", | 76 | cx231xx_err(DRIVER_NAME "URB packet %d, status %d [%s].\n", |
77 | packet, status, errmsg); | 77 | packet, status, errmsg); |
78 | } | 78 | } |
79 | } | 79 | } |
80 | 80 | ||
@@ -83,12 +83,12 @@ static inline void print_err_status(struct cx231xx *dev, | |||
83 | */ | 83 | */ |
84 | static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb) | 84 | static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb) |
85 | { | 85 | { |
86 | struct cx231xx_buffer *buf; | 86 | struct cx231xx_buffer *buf; |
87 | struct cx231xx_dmaqueue *dma_q = urb->context; | 87 | struct cx231xx_dmaqueue *dma_q = urb->context; |
88 | int rc = 1; | 88 | int rc = 1; |
89 | unsigned char *p_buffer; | 89 | unsigned char *p_buffer; |
90 | u32 bytes_parsed = 0, buffer_size = 0; | 90 | u32 bytes_parsed = 0, buffer_size = 0; |
91 | u8 sav_eav = 0; | 91 | u8 sav_eav = 0; |
92 | 92 | ||
93 | if (!dev) | 93 | if (!dev) |
94 | return 0; | 94 | return 0; |
@@ -104,60 +104,58 @@ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb) | |||
104 | 104 | ||
105 | buf = dev->vbi_mode.isoc_ctl.buf; | 105 | buf = dev->vbi_mode.isoc_ctl.buf; |
106 | 106 | ||
107 | /* get buffer pointer and length */ | 107 | /* get buffer pointer and length */ |
108 | p_buffer = urb->transfer_buffer; | 108 | p_buffer = urb->transfer_buffer; |
109 | buffer_size = urb->actual_length; | 109 | buffer_size = urb->actual_length; |
110 | 110 | ||
111 | if (buffer_size > 0) { | 111 | if (buffer_size > 0) { |
112 | 112 | ||
113 | bytes_parsed = 0; | 113 | bytes_parsed = 0; |
114 | 114 | ||
115 | if(dma_q->is_partial_line) { | 115 | if (dma_q->is_partial_line) { |
116 | /* Handle the case where we were working on a partial line */ | 116 | /* Handle the case where we were working on a partial line */ |
117 | sav_eav = dma_q->last_sav; | 117 | sav_eav = dma_q->last_sav; |
118 | } else { | 118 | } else { |
119 | /* Check for a SAV/EAV overlapping the buffer boundary */ | 119 | /* Check for a SAV/EAV overlapping the buffer boundary */ |
120 | sav_eav = cx231xx_find_boundary_SAV_EAV(p_buffer, dma_q->partial_buf, &bytes_parsed); | 120 | sav_eav = |
121 | } | 121 | cx231xx_find_boundary_SAV_EAV(p_buffer, |
122 | 122 | dma_q->partial_buf, | |
123 | sav_eav &= 0xF0; | 123 | &bytes_parsed); |
124 | /* Get the first line if we have some portion of an SAV/EAV from the last buffer | 124 | } |
125 | or a partial line */ | 125 | |
126 | if(sav_eav) { | 126 | sav_eav &= 0xF0; |
127 | bytes_parsed += cx231xx_get_vbi_line(dev, dma_q, | 127 | /* Get the first line if we have some portion of an SAV/EAV from the last buffer |
128 | sav_eav, /* SAV/EAV */ | 128 | or a partial line */ |
129 | p_buffer + bytes_parsed, /* p_buffer */ | 129 | if (sav_eav) { |
130 | buffer_size - bytes_parsed); /* buffer size */ | 130 | bytes_parsed += cx231xx_get_vbi_line(dev, dma_q, sav_eav, /* SAV/EAV */ |
131 | } | 131 | p_buffer + bytes_parsed, /* p_buffer */ |
132 | 132 | buffer_size - bytes_parsed); /* buffer size */ | |
133 | /* Now parse data that is completely in this buffer */ | 133 | } |
134 | dma_q->is_partial_line = 0; | 134 | |
135 | 135 | /* Now parse data that is completely in this buffer */ | |
136 | while(bytes_parsed < buffer_size) | 136 | dma_q->is_partial_line = 0; |
137 | { | 137 | |
138 | u32 bytes_used = 0; | 138 | while (bytes_parsed < buffer_size) { |
139 | 139 | u32 bytes_used = 0; | |
140 | sav_eav = cx231xx_find_next_SAV_EAV( | 140 | |
141 | p_buffer + bytes_parsed, /* p_buffer */ | 141 | sav_eav = cx231xx_find_next_SAV_EAV(p_buffer + bytes_parsed, /* p_buffer */ |
142 | buffer_size - bytes_parsed, /* buffer size */ | 142 | buffer_size - bytes_parsed, /* buffer size */ |
143 | &bytes_used); /* Receives bytes used to get SAV/EAV */ | 143 | &bytes_used); /* Receives bytes used to get SAV/EAV */ |
144 | 144 | ||
145 | bytes_parsed += bytes_used; | 145 | bytes_parsed += bytes_used; |
146 | 146 | ||
147 | sav_eav &= 0xF0; | 147 | sav_eav &= 0xF0; |
148 | if(sav_eav && (bytes_parsed < buffer_size)) | 148 | if (sav_eav && (bytes_parsed < buffer_size)) { |
149 | { | 149 | bytes_parsed += cx231xx_get_vbi_line(dev, dma_q, sav_eav, /* SAV/EAV */ |
150 | bytes_parsed += cx231xx_get_vbi_line(dev, dma_q, | 150 | p_buffer + bytes_parsed, /* p_buffer */ |
151 | sav_eav, /* SAV/EAV */ | 151 | buffer_size - bytes_parsed); /* buffer size */ |
152 | p_buffer + bytes_parsed, /* p_buffer */ | 152 | } |
153 | buffer_size - bytes_parsed); /* buffer size */ | 153 | } |
154 | } | 154 | |
155 | } | 155 | /* Save the last four bytes of the buffer so we can check the buffer boundary |
156 | 156 | condition next time */ | |
157 | /* Save the last four bytes of the buffer so we can check the buffer boundary | 157 | memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4); |
158 | condition next time */ | 158 | bytes_parsed = 0; |
159 | memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4); | ||
160 | bytes_parsed = 0; | ||
161 | } | 159 | } |
162 | 160 | ||
163 | return rc; | 161 | return rc; |
@@ -168,25 +166,26 @@ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb) | |||
168 | ------------------------------------------------------------------*/ | 166 | ------------------------------------------------------------------*/ |
169 | 167 | ||
170 | static int | 168 | static int |
171 | vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) | 169 | vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count, |
170 | unsigned int *size) | ||
172 | { | 171 | { |
173 | struct cx231xx_fh *fh = vq->priv_data; | 172 | struct cx231xx_fh *fh = vq->priv_data; |
174 | struct cx231xx *dev = fh->dev; | 173 | struct cx231xx *dev = fh->dev; |
175 | u32 height = 0; | 174 | u32 height = 0; |
176 | 175 | ||
177 | height = ((dev->norm & V4L2_STD_625_50) ? | 176 | height = ((dev->norm & V4L2_STD_625_50) ? |
178 | PAL_VBI_LINES : NTSC_VBI_LINES) ; | 177 | PAL_VBI_LINES : NTSC_VBI_LINES); |
179 | 178 | ||
180 | *size = ( dev->width * height * 2); | 179 | *size = (dev->width * height * 2); |
181 | if (0 == *count) | 180 | if (0 == *count) |
182 | *count = CX231XX_DEF_VBI_BUF; | 181 | *count = CX231XX_DEF_VBI_BUF; |
183 | 182 | ||
184 | if (*count < CX231XX_MIN_BUF) | 183 | if (*count < CX231XX_MIN_BUF) |
185 | *count = CX231XX_MIN_BUF; | 184 | *count = CX231XX_MIN_BUF; |
186 | 185 | ||
187 | /* call VBI setup if required */ | 186 | /* call VBI setup if required */ |
188 | /* cx231xx_i2c_call_clients(&dev->i2c_bus[1], VIDIOC_S_FREQUENCY, &f); | 187 | /* cx231xx_i2c_call_clients(&dev->i2c_bus[1], VIDIOC_S_FREQUENCY, &f); |
189 | */ | 188 | */ |
190 | 189 | ||
191 | return 0; | 190 | return 0; |
192 | } | 191 | } |
@@ -194,8 +193,8 @@ vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *s | |||
194 | /* This is called *without* dev->slock held; please keep it that way */ | 193 | /* This is called *without* dev->slock held; please keep it that way */ |
195 | static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf) | 194 | static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf) |
196 | { | 195 | { |
197 | struct cx231xx_fh *fh = vq->priv_data; | 196 | struct cx231xx_fh *fh = vq->priv_data; |
198 | struct cx231xx *dev = fh->dev; | 197 | struct cx231xx *dev = fh->dev; |
199 | unsigned long flags = 0; | 198 | unsigned long flags = 0; |
200 | if (in_interrupt()) | 199 | if (in_interrupt()) |
201 | BUG(); | 200 | BUG(); |
@@ -208,7 +207,7 @@ static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf) | |||
208 | This should be safe; by the time we get here, the buffer isn't | 207 | This should be safe; by the time we get here, the buffer isn't |
209 | queued anymore. If we ever start marking the buffers as | 208 | queued anymore. If we ever start marking the buffers as |
210 | VIDEOBUF_ACTIVE, it won't be, though. | 209 | VIDEOBUF_ACTIVE, it won't be, though. |
211 | */ | 210 | */ |
212 | spin_lock_irqsave(&dev->vbi_mode.slock, flags); | 211 | spin_lock_irqsave(&dev->vbi_mode.slock, flags); |
213 | if (dev->vbi_mode.isoc_ctl.buf == buf) | 212 | if (dev->vbi_mode.isoc_ctl.buf == buf) |
214 | dev->vbi_mode.isoc_ctl.buf = NULL; | 213 | dev->vbi_mode.isoc_ctl.buf = NULL; |
@@ -220,25 +219,26 @@ static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf) | |||
220 | 219 | ||
221 | static int | 220 | static int |
222 | vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, | 221 | vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, |
223 | enum v4l2_field field) | 222 | enum v4l2_field field) |
224 | { | 223 | { |
225 | struct cx231xx_fh *fh = vq->priv_data; | 224 | struct cx231xx_fh *fh = vq->priv_data; |
226 | struct cx231xx_buffer *buf = container_of(vb, struct cx231xx_buffer, vb); | 225 | struct cx231xx_buffer *buf = |
227 | struct cx231xx *dev = fh->dev; | 226 | container_of(vb, struct cx231xx_buffer, vb); |
228 | int rc = 0, urb_init = 0; | 227 | struct cx231xx *dev = fh->dev; |
229 | u32 height = 0; | 228 | int rc = 0, urb_init = 0; |
229 | u32 height = 0; | ||
230 | 230 | ||
231 | height = ((dev->norm & V4L2_STD_625_50) ? | 231 | height = ((dev->norm & V4L2_STD_625_50) ? |
232 | PAL_VBI_LINES : NTSC_VBI_LINES) ; | 232 | PAL_VBI_LINES : NTSC_VBI_LINES); |
233 | buf->vb.size = ( (dev->width << 1) * height ); | 233 | buf->vb.size = ((dev->width << 1) * height); |
234 | 234 | ||
235 | if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) | 235 | if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) |
236 | return -EINVAL; | 236 | return -EINVAL; |
237 | 237 | ||
238 | buf->vb.width = dev->width; | 238 | buf->vb.width = dev->width; |
239 | buf->vb.height = height; | 239 | buf->vb.height = height; |
240 | buf->vb.field = field; | 240 | buf->vb.field = field; |
241 | buf->vb.field = V4L2_FIELD_SEQ_TB; | 241 | buf->vb.field = V4L2_FIELD_SEQ_TB; |
242 | 242 | ||
243 | if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { | 243 | if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { |
244 | rc = videobuf_iolock(vq, &buf->vb, NULL); | 244 | rc = videobuf_iolock(vq, &buf->vb, NULL); |
@@ -251,8 +251,9 @@ vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, | |||
251 | 251 | ||
252 | if (urb_init) { | 252 | if (urb_init) { |
253 | rc = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS, | 253 | rc = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS, |
254 | CX231XX_NUM_VBI_BUFS, dev->vbi_mode.alt_max_pkt_size[0], | 254 | CX231XX_NUM_VBI_BUFS, |
255 | cx231xx_isoc_vbi_copy); | 255 | dev->vbi_mode.alt_max_pkt_size[0], |
256 | cx231xx_isoc_vbi_copy); | ||
256 | if (rc < 0) | 257 | if (rc < 0) |
257 | goto fail; | 258 | goto fail; |
258 | } | 259 | } |
@@ -260,7 +261,7 @@ vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, | |||
260 | buf->vb.state = VIDEOBUF_PREPARED; | 261 | buf->vb.state = VIDEOBUF_PREPARED; |
261 | return 0; | 262 | return 0; |
262 | 263 | ||
263 | fail: | 264 | fail: |
264 | free_buffer(vq, buf); | 265 | free_buffer(vq, buf); |
265 | return rc; | 266 | return rc; |
266 | } | 267 | } |
@@ -268,10 +269,11 @@ fail: | |||
268 | static void | 269 | static void |
269 | vbi_buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) | 270 | vbi_buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) |
270 | { | 271 | { |
271 | struct cx231xx_buffer *buf = container_of(vb, struct cx231xx_buffer, vb); | 272 | struct cx231xx_buffer *buf = |
272 | struct cx231xx_fh *fh = vq->priv_data; | 273 | container_of(vb, struct cx231xx_buffer, vb); |
273 | struct cx231xx *dev = fh->dev; | 274 | struct cx231xx_fh *fh = vq->priv_data; |
274 | struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; | 275 | struct cx231xx *dev = fh->dev; |
276 | struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; | ||
275 | 277 | ||
276 | buf->vb.state = VIDEOBUF_QUEUED; | 278 | buf->vb.state = VIDEOBUF_QUEUED; |
277 | list_add_tail(&buf->vb.queue, &vidq->active); | 279 | list_add_tail(&buf->vb.queue, &vidq->active); |
@@ -279,29 +281,27 @@ vbi_buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) | |||
279 | } | 281 | } |
280 | 282 | ||
281 | static void vbi_buffer_release(struct videobuf_queue *vq, | 283 | static void vbi_buffer_release(struct videobuf_queue *vq, |
282 | struct videobuf_buffer *vb) | 284 | struct videobuf_buffer *vb) |
283 | { | 285 | { |
284 | struct cx231xx_buffer *buf = container_of(vb, struct cx231xx_buffer, vb); | 286 | struct cx231xx_buffer *buf = |
285 | /* | 287 | container_of(vb, struct cx231xx_buffer, vb); |
286 | struct cx231xx_fh *fh = vq->priv_data; | 288 | /* |
287 | struct cx231xx *dev = (struct cx231xx *)fh->dev; | 289 | struct cx231xx_fh *fh = vq->priv_data; |
290 | struct cx231xx *dev = (struct cx231xx *)fh->dev; | ||
288 | 291 | ||
289 | cx231xx_info(DRIVER_NAME "cx231xx: called vbi_buffer_release\n"); | 292 | cx231xx_info(DRIVER_NAME "cx231xx: called vbi_buffer_release\n"); |
290 | */ | 293 | */ |
291 | 294 | ||
292 | free_buffer(vq, buf); | 295 | free_buffer(vq, buf); |
293 | } | 296 | } |
294 | 297 | ||
295 | |||
296 | struct videobuf_queue_ops cx231xx_vbi_qops = { | 298 | struct videobuf_queue_ops cx231xx_vbi_qops = { |
297 | .buf_setup = vbi_buffer_setup, | 299 | .buf_setup = vbi_buffer_setup, |
298 | .buf_prepare = vbi_buffer_prepare, | 300 | .buf_prepare = vbi_buffer_prepare, |
299 | .buf_queue = vbi_buffer_queue, | 301 | .buf_queue = vbi_buffer_queue, |
300 | .buf_release = vbi_buffer_release, | 302 | .buf_release = vbi_buffer_release, |
301 | }; | 303 | }; |
302 | 304 | ||
303 | |||
304 | |||
305 | /* ------------------------------------------------------------------ | 305 | /* ------------------------------------------------------------------ |
306 | URB control | 306 | URB control |
307 | ------------------------------------------------------------------*/ | 307 | ------------------------------------------------------------------*/ |
@@ -311,23 +311,24 @@ struct videobuf_queue_ops cx231xx_vbi_qops = { | |||
311 | */ | 311 | */ |
312 | static void cx231xx_irq_vbi_callback(struct urb *urb) | 312 | static void cx231xx_irq_vbi_callback(struct urb *urb) |
313 | { | 313 | { |
314 | struct cx231xx_dmaqueue *dma_q = urb->context; | 314 | struct cx231xx_dmaqueue *dma_q = urb->context; |
315 | struct cx231xx_video_mode *vmode = container_of(dma_q, struct cx231xx_video_mode, vidq); | 315 | struct cx231xx_video_mode *vmode = |
316 | struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); | 316 | container_of(dma_q, struct cx231xx_video_mode, vidq); |
317 | struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); | ||
317 | int rc; | 318 | int rc; |
318 | 319 | ||
319 | 320 | switch (urb->status) { | |
320 | switch (urb->status) { | 321 | case 0: /* success */ |
321 | case 0: /* success */ | 322 | case -ETIMEDOUT: /* NAK */ |
322 | case -ETIMEDOUT: /* NAK */ | 323 | break; |
323 | break; | 324 | case -ECONNRESET: /* kill */ |
324 | case -ECONNRESET: /* kill */ | 325 | case -ENOENT: |
325 | case -ENOENT: | 326 | case -ESHUTDOWN: |
326 | case -ESHUTDOWN: | 327 | return; |
327 | return; | 328 | default: /* error */ |
328 | default: /* error */ | 329 | cx231xx_err(DRIVER_NAME "urb completition error %d.\n", |
329 | cx231xx_err(DRIVER_NAME "urb completition error %d.\n", urb->status); | 330 | urb->status); |
330 | break; | 331 | break; |
331 | } | 332 | } |
332 | 333 | ||
333 | /* Copy data from URB */ | 334 | /* Copy data from URB */ |
@@ -341,7 +342,7 @@ static void cx231xx_irq_vbi_callback(struct urb *urb) | |||
341 | urb->status = usb_submit_urb(urb, GFP_ATOMIC); | 342 | urb->status = usb_submit_urb(urb, GFP_ATOMIC); |
342 | if (urb->status) { | 343 | if (urb->status) { |
343 | cx231xx_err(DRIVER_NAME "urb resubmit failed (error=%i)\n", | 344 | cx231xx_err(DRIVER_NAME "urb resubmit failed (error=%i)\n", |
344 | urb->status); | 345 | urb->status); |
345 | } | 346 | } |
346 | } | 347 | } |
347 | 348 | ||
@@ -353,21 +354,23 @@ void cx231xx_uninit_vbi_isoc(struct cx231xx *dev) | |||
353 | struct urb *urb; | 354 | struct urb *urb; |
354 | int i; | 355 | int i; |
355 | 356 | ||
356 | cx231xx_info(DRIVER_NAME "cx231xx: called cx231xx_uninit_vbi_isoc\n"); | 357 | cx231xx_info(DRIVER_NAME "cx231xx: called cx231xx_uninit_vbi_isoc\n"); |
357 | 358 | ||
358 | dev->vbi_mode.isoc_ctl.nfields = -1; | 359 | dev->vbi_mode.isoc_ctl.nfields = -1; |
359 | for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) { | 360 | for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) { |
360 | urb = dev->vbi_mode.isoc_ctl.urb[i]; | 361 | urb = dev->vbi_mode.isoc_ctl.urb[i]; |
361 | if (urb) { | 362 | if (urb) { |
362 | if (!irqs_disabled()) | 363 | if (!irqs_disabled()) |
363 | usb_kill_urb(urb); | 364 | usb_kill_urb(urb); |
364 | else | 365 | else |
365 | usb_unlink_urb(urb); | 366 | usb_unlink_urb(urb); |
366 | 367 | ||
367 | if (dev->vbi_mode.isoc_ctl.transfer_buffer[i]) { | 368 | if (dev->vbi_mode.isoc_ctl.transfer_buffer[i]) { |
368 | 369 | ||
369 | kfree(dev->vbi_mode.isoc_ctl.transfer_buffer[i]); | 370 | kfree(dev->vbi_mode.isoc_ctl. |
370 | dev->vbi_mode.isoc_ctl.transfer_buffer[i] = NULL; | 371 | transfer_buffer[i]); |
372 | dev->vbi_mode.isoc_ctl.transfer_buffer[i] = | ||
373 | NULL; | ||
371 | } | 374 | } |
372 | usb_free_urb(urb); | 375 | usb_free_urb(urb); |
373 | dev->vbi_mode.isoc_ctl.urb[i] = NULL; | 376 | dev->vbi_mode.isoc_ctl.urb[i] = NULL; |
@@ -384,14 +387,16 @@ void cx231xx_uninit_vbi_isoc(struct cx231xx *dev) | |||
384 | 387 | ||
385 | cx231xx_capture_start(dev, 0, Vbi); | 388 | cx231xx_capture_start(dev, 0, Vbi); |
386 | } | 389 | } |
390 | |||
387 | EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc); | 391 | EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc); |
388 | 392 | ||
389 | /* | 393 | /* |
390 | * Allocate URBs and start IRQ | 394 | * Allocate URBs and start IRQ |
391 | */ | 395 | */ |
392 | int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets, | 396 | int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets, |
393 | int num_bufs, int max_pkt_size, | 397 | int num_bufs, int max_pkt_size, |
394 | int (*isoc_copy) (struct cx231xx *dev, struct urb *urb)) | 398 | int (*isoc_copy) (struct cx231xx * dev, |
399 | struct urb * urb)) | ||
395 | { | 400 | { |
396 | struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq; | 401 | struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq; |
397 | int i; | 402 | int i; |
@@ -404,31 +409,33 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets, | |||
404 | /* De-allocates all pending stuff */ | 409 | /* De-allocates all pending stuff */ |
405 | cx231xx_uninit_vbi_isoc(dev); | 410 | cx231xx_uninit_vbi_isoc(dev); |
406 | 411 | ||
407 | /* clear if any halt */ | 412 | /* clear if any halt */ |
408 | usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr)); | 413 | usb_clear_halt(dev->udev, |
409 | 414 | usb_rcvbulkpipe(dev->udev, | |
415 | dev->vbi_mode.end_point_addr)); | ||
410 | 416 | ||
411 | dev->vbi_mode.isoc_ctl.isoc_copy = isoc_copy; | 417 | dev->vbi_mode.isoc_ctl.isoc_copy = isoc_copy; |
412 | dev->vbi_mode.isoc_ctl.num_bufs = num_bufs; | 418 | dev->vbi_mode.isoc_ctl.num_bufs = num_bufs; |
413 | dma_q->pos = 0; | 419 | dma_q->pos = 0; |
414 | dma_q->is_partial_line = 0; | 420 | dma_q->is_partial_line = 0; |
415 | dma_q->last_sav = 0; | 421 | dma_q->last_sav = 0; |
416 | dma_q->current_field = -1; | 422 | dma_q->current_field = -1; |
417 | dma_q->bytes_left_in_line = dev->width << 1; | 423 | dma_q->bytes_left_in_line = dev->width << 1; |
418 | dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ? | 424 | dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ? |
419 | PAL_VBI_LINES : NTSC_VBI_LINES) ; | 425 | PAL_VBI_LINES : NTSC_VBI_LINES); |
420 | dma_q->lines_completed = 0; | 426 | dma_q->lines_completed = 0; |
421 | for(i = 0; i < 8 ; i++) | 427 | for (i = 0; i < 8; i++) |
422 | dma_q->partial_buf[i] = 0; | 428 | dma_q->partial_buf[i] = 0; |
423 | 429 | ||
424 | dev->vbi_mode.isoc_ctl.urb = kzalloc(sizeof(void *)*num_bufs, GFP_KERNEL); | 430 | dev->vbi_mode.isoc_ctl.urb = |
431 | kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); | ||
425 | if (!dev->vbi_mode.isoc_ctl.urb) { | 432 | if (!dev->vbi_mode.isoc_ctl.urb) { |
426 | cx231xx_errdev("cannot alloc memory for usb buffers\n"); | 433 | cx231xx_errdev("cannot alloc memory for usb buffers\n"); |
427 | return -ENOMEM; | 434 | return -ENOMEM; |
428 | } | 435 | } |
429 | 436 | ||
430 | dev->vbi_mode.isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs, | 437 | dev->vbi_mode.isoc_ctl.transfer_buffer = |
431 | GFP_KERNEL); | 438 | kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); |
432 | if (!dev->vbi_mode.isoc_ctl.transfer_buffer) { | 439 | if (!dev->vbi_mode.isoc_ctl.transfer_buffer) { |
433 | cx231xx_errdev("cannot allocate memory for usbtransfer\n"); | 440 | cx231xx_errdev("cannot allocate memory for usbtransfer\n"); |
434 | kfree(dev->vbi_mode.isoc_ctl.urb); | 441 | kfree(dev->vbi_mode.isoc_ctl.urb); |
@@ -445,27 +452,29 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets, | |||
445 | 452 | ||
446 | urb = usb_alloc_urb(0, GFP_KERNEL); | 453 | urb = usb_alloc_urb(0, GFP_KERNEL); |
447 | if (!urb) { | 454 | if (!urb) { |
448 | cx231xx_err(DRIVER_NAME ": cannot alloc isoc_ctl.urb %i\n", i); | 455 | cx231xx_err(DRIVER_NAME |
456 | ": cannot alloc isoc_ctl.urb %i\n", i); | ||
449 | cx231xx_uninit_vbi_isoc(dev); | 457 | cx231xx_uninit_vbi_isoc(dev); |
450 | return -ENOMEM; | 458 | return -ENOMEM; |
451 | } | 459 | } |
452 | dev->vbi_mode.isoc_ctl.urb[i] = urb; | 460 | dev->vbi_mode.isoc_ctl.urb[i] = urb; |
453 | urb->transfer_flags = 0; | 461 | urb->transfer_flags = 0; |
454 | 462 | ||
455 | dev->vbi_mode.isoc_ctl.transfer_buffer[i] = kzalloc(sb_size, GFP_KERNEL); | 463 | dev->vbi_mode.isoc_ctl.transfer_buffer[i] = |
464 | kzalloc(sb_size, GFP_KERNEL); | ||
456 | if (!dev->vbi_mode.isoc_ctl.transfer_buffer[i]) { | 465 | if (!dev->vbi_mode.isoc_ctl.transfer_buffer[i]) { |
457 | cx231xx_err(DRIVER_NAME ": unable to allocate %i bytes for transfer" | 466 | cx231xx_err(DRIVER_NAME |
458 | " buffer %i%s\n", | 467 | ": unable to allocate %i bytes for transfer" |
459 | sb_size, i, | 468 | " buffer %i%s\n", sb_size, i, |
460 | in_interrupt()?" while in int":""); | 469 | in_interrupt()? " while in int" : ""); |
461 | cx231xx_uninit_vbi_isoc(dev); | 470 | cx231xx_uninit_vbi_isoc(dev); |
462 | return -ENOMEM; | 471 | return -ENOMEM; |
463 | } | 472 | } |
464 | 473 | ||
465 | pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr); | 474 | pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr); |
466 | usb_fill_bulk_urb(urb, dev->udev, pipe, | 475 | usb_fill_bulk_urb(urb, dev->udev, pipe, |
467 | dev->vbi_mode.isoc_ctl.transfer_buffer[i], sb_size, | 476 | dev->vbi_mode.isoc_ctl.transfer_buffer[i], |
468 | cx231xx_irq_vbi_callback, dma_q); | 477 | sb_size, cx231xx_irq_vbi_callback, dma_q); |
469 | } | 478 | } |
470 | 479 | ||
471 | init_waitqueue_head(&dma_q->wq); | 480 | init_waitqueue_head(&dma_q->wq); |
@@ -474,55 +483,58 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets, | |||
474 | for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) { | 483 | for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) { |
475 | rc = usb_submit_urb(dev->vbi_mode.isoc_ctl.urb[i], GFP_ATOMIC); | 484 | rc = usb_submit_urb(dev->vbi_mode.isoc_ctl.urb[i], GFP_ATOMIC); |
476 | if (rc) { | 485 | if (rc) { |
477 | cx231xx_err(DRIVER_NAME ": submit of urb %i failed (error=%i)\n", i, | 486 | cx231xx_err(DRIVER_NAME |
478 | rc); | 487 | ": submit of urb %i failed (error=%i)\n", i, |
488 | rc); | ||
479 | cx231xx_uninit_vbi_isoc(dev); | 489 | cx231xx_uninit_vbi_isoc(dev); |
480 | return rc; | 490 | return rc; |
481 | } | 491 | } |
482 | } | 492 | } |
483 | 493 | ||
484 | cx231xx_capture_start(dev, 1, Vbi); | 494 | cx231xx_capture_start(dev, 1, Vbi); |
485 | 495 | ||
486 | return 0; | 496 | return 0; |
487 | } | 497 | } |
488 | EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc); | ||
489 | 498 | ||
499 | EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc); | ||
490 | 500 | ||
491 | u32 cx231xx_get_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, | 501 | u32 cx231xx_get_vbi_line(struct cx231xx * dev, struct cx231xx_dmaqueue * dma_q, |
492 | u8 sav_eav, u8 *p_buffer, u32 buffer_size) | 502 | u8 sav_eav, u8 * p_buffer, u32 buffer_size) |
493 | { | 503 | { |
494 | u32 bytes_copied = 0; | 504 | u32 bytes_copied = 0; |
495 | int current_field = -1; | 505 | int current_field = -1; |
496 | 506 | ||
497 | switch(sav_eav) { | 507 | switch (sav_eav) { |
498 | 508 | ||
499 | case SAV_VBI_FIELD1: | 509 | case SAV_VBI_FIELD1: |
500 | current_field = 1; | 510 | current_field = 1; |
501 | break; | 511 | break; |
502 | 512 | ||
503 | case SAV_VBI_FIELD2: | 513 | case SAV_VBI_FIELD2: |
504 | current_field = 2; | 514 | current_field = 2; |
505 | break; | 515 | break; |
506 | default: | 516 | default: |
507 | break; | 517 | break; |
508 | } | 518 | } |
509 | 519 | ||
510 | if(current_field < 0 ) | 520 | if (current_field < 0) |
511 | return bytes_copied; | 521 | return bytes_copied; |
512 | 522 | ||
513 | dma_q->last_sav = sav_eav; | 523 | dma_q->last_sav = sav_eav; |
514 | 524 | ||
515 | bytes_copied = cx231xx_copy_vbi_line(dev, dma_q, p_buffer, buffer_size, current_field); | 525 | bytes_copied = |
526 | cx231xx_copy_vbi_line(dev, dma_q, p_buffer, buffer_size, | ||
527 | current_field); | ||
516 | 528 | ||
517 | return bytes_copied; | 529 | return bytes_copied; |
518 | } | 530 | } |
519 | 531 | ||
520 | /* | 532 | /* |
521 | * Announces that a buffer were filled and request the next | 533 | * Announces that a buffer were filled and request the next |
522 | */ | 534 | */ |
523 | static inline void vbi_buffer_filled(struct cx231xx *dev, | 535 | static inline void vbi_buffer_filled(struct cx231xx *dev, |
524 | struct cx231xx_dmaqueue *dma_q, | 536 | struct cx231xx_dmaqueue *dma_q, |
525 | struct cx231xx_buffer *buf) | 537 | struct cx231xx_buffer *buf) |
526 | { | 538 | { |
527 | /* Advice that buffer was filled */ | 539 | /* Advice that buffer was filled */ |
528 | /* cx231xx_info(DRIVER_NAME "[%p/%d] wakeup\n", buf, buf->vb.i); */ | 540 | /* cx231xx_info(DRIVER_NAME "[%p/%d] wakeup\n", buf, buf->vb.i); */ |
@@ -537,80 +549,83 @@ static inline void vbi_buffer_filled(struct cx231xx *dev, | |||
537 | wake_up(&buf->vb.done); | 549 | wake_up(&buf->vb.done); |
538 | } | 550 | } |
539 | 551 | ||
540 | u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, | 552 | u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, |
541 | u8 *p_line, u32 length, int field_number) | 553 | u8 * p_line, u32 length, int field_number) |
542 | { | 554 | { |
543 | u32 bytes_to_copy; | 555 | u32 bytes_to_copy; |
544 | struct cx231xx_buffer *buf; | 556 | struct cx231xx_buffer *buf; |
545 | u32 _line_size = dev->width * 2; | 557 | u32 _line_size = dev->width * 2; |
546 | 558 | ||
547 | if( dma_q->current_field != field_number ) { | 559 | if (dma_q->current_field != field_number) { |
548 | cx231xx_reset_vbi_buffer(dev, dma_q); | 560 | cx231xx_reset_vbi_buffer(dev, dma_q); |
549 | } | 561 | } |
550 | 562 | ||
551 | /* get the buffer pointer */ | 563 | /* get the buffer pointer */ |
552 | buf = dev->vbi_mode.isoc_ctl.buf; | 564 | buf = dev->vbi_mode.isoc_ctl.buf; |
553 | 565 | ||
554 | /* Remember the field number for next time */ | 566 | /* Remember the field number for next time */ |
555 | dma_q->current_field = field_number; | 567 | dma_q->current_field = field_number; |
556 | 568 | ||
557 | bytes_to_copy = dma_q->bytes_left_in_line; | 569 | bytes_to_copy = dma_q->bytes_left_in_line; |
558 | if(bytes_to_copy > length) | 570 | if (bytes_to_copy > length) |
559 | bytes_to_copy = length; | 571 | bytes_to_copy = length; |
560 | 572 | ||
561 | if(dma_q->lines_completed >= dma_q->lines_per_field) { | 573 | if (dma_q->lines_completed >= dma_q->lines_per_field) { |
562 | dma_q->bytes_left_in_line -= bytes_to_copy; | 574 | dma_q->bytes_left_in_line -= bytes_to_copy; |
563 | dma_q->is_partial_line = (dma_q->bytes_left_in_line == 0) ? 0 : 1; | 575 | dma_q->is_partial_line = |
564 | return 0; | 576 | (dma_q->bytes_left_in_line == 0) ? 0 : 1; |
565 | } | 577 | return 0; |
578 | } | ||
566 | 579 | ||
567 | dma_q->is_partial_line = 1; | 580 | dma_q->is_partial_line = 1; |
568 | 581 | ||
569 | /* If we don't have a buffer, just return the number of bytes we would | 582 | /* If we don't have a buffer, just return the number of bytes we would |
570 | have copied if we had a buffer. */ | 583 | have copied if we had a buffer. */ |
571 | if(!buf) { | 584 | if (!buf) { |
572 | dma_q->bytes_left_in_line -= bytes_to_copy; | 585 | dma_q->bytes_left_in_line -= bytes_to_copy; |
573 | dma_q->is_partial_line = (dma_q->bytes_left_in_line == 0) ? 0 : 1; | 586 | dma_q->is_partial_line = |
574 | return bytes_to_copy; | 587 | (dma_q->bytes_left_in_line == 0) ? 0 : 1; |
575 | } | 588 | return bytes_to_copy; |
589 | } | ||
576 | 590 | ||
577 | /* copy the data to video buffer */ | 591 | /* copy the data to video buffer */ |
578 | cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy); | 592 | cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy); |
579 | 593 | ||
580 | dma_q->pos += bytes_to_copy; | 594 | dma_q->pos += bytes_to_copy; |
581 | dma_q->bytes_left_in_line -= bytes_to_copy; | 595 | dma_q->bytes_left_in_line -= bytes_to_copy; |
582 | 596 | ||
583 | if(dma_q->bytes_left_in_line == 0) { | 597 | if (dma_q->bytes_left_in_line == 0) { |
584 | 598 | ||
585 | dma_q->bytes_left_in_line = _line_size; | 599 | dma_q->bytes_left_in_line = _line_size; |
586 | dma_q->lines_completed++; | 600 | dma_q->lines_completed++; |
587 | dma_q->is_partial_line = 0; | 601 | dma_q->is_partial_line = 0; |
588 | 602 | ||
589 | if(cx231xx_is_vbi_buffer_done(dev, dma_q) && buf ) { | 603 | if (cx231xx_is_vbi_buffer_done(dev, dma_q) && buf) { |
590 | 604 | ||
591 | vbi_buffer_filled(dev, dma_q, buf); | 605 | vbi_buffer_filled(dev, dma_q, buf); |
592 | 606 | ||
593 | dma_q->pos = 0; | 607 | dma_q->pos = 0; |
594 | buf = NULL; | 608 | buf = NULL; |
595 | dma_q->lines_completed = 0; | 609 | dma_q->lines_completed = 0; |
596 | } | 610 | } |
597 | } | 611 | } |
598 | 612 | ||
599 | return bytes_to_copy; | 613 | return bytes_to_copy; |
600 | } | 614 | } |
601 | 615 | ||
602 | /* | 616 | /* |
603 | * video-buf generic routine to get the next available buffer | 617 | * video-buf generic routine to get the next available buffer |
604 | */ | 618 | */ |
605 | static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q, | 619 | static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q, |
606 | struct cx231xx_buffer **buf) | 620 | struct cx231xx_buffer **buf) |
607 | { | 621 | { |
608 | struct cx231xx_video_mode *vmode = container_of(dma_q, struct cx231xx_video_mode, vidq); | 622 | struct cx231xx_video_mode *vmode = |
609 | struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); | 623 | container_of(dma_q, struct cx231xx_video_mode, vidq); |
624 | struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); | ||
610 | char *outp; | 625 | char *outp; |
611 | 626 | ||
612 | if (list_empty(&dma_q->active)) { | 627 | if (list_empty(&dma_q->active)) { |
613 | cx231xx_err(DRIVER_NAME ": No active queue to serve\n"); | 628 | cx231xx_err(DRIVER_NAME ": No active queue to serve\n"); |
614 | dev->vbi_mode.isoc_ctl.buf = NULL; | 629 | dev->vbi_mode.isoc_ctl.buf = NULL; |
615 | *buf = NULL; | 630 | *buf = NULL; |
616 | return; | 631 | return; |
@@ -628,66 +643,70 @@ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q, | |||
628 | return; | 643 | return; |
629 | } | 644 | } |
630 | 645 | ||
631 | 646 | void cx231xx_reset_vbi_buffer(struct cx231xx *dev, | |
632 | void cx231xx_reset_vbi_buffer(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q) | 647 | struct cx231xx_dmaqueue *dma_q) |
633 | { | 648 | { |
634 | struct cx231xx_buffer *buf; | 649 | struct cx231xx_buffer *buf; |
635 | 650 | ||
636 | buf = dev->vbi_mode.isoc_ctl.buf; | 651 | buf = dev->vbi_mode.isoc_ctl.buf; |
637 | 652 | ||
638 | if(buf == NULL) { | 653 | if (buf == NULL) { |
639 | 654 | ||
640 | /* first try to get the buffer */ | 655 | /* first try to get the buffer */ |
641 | get_next_vbi_buf(dma_q, &buf); | 656 | get_next_vbi_buf(dma_q, &buf); |
642 | 657 | ||
643 | dma_q->pos = 0; | 658 | dma_q->pos = 0; |
644 | dma_q->current_field = -1; | 659 | dma_q->current_field = -1; |
645 | } | 660 | } |
646 | 661 | ||
647 | dma_q->bytes_left_in_line = dev->width << 1; | 662 | dma_q->bytes_left_in_line = dev->width << 1; |
648 | dma_q->lines_completed = 0; | 663 | dma_q->lines_completed = 0; |
649 | } | 664 | } |
650 | 665 | ||
651 | int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, | 666 | int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, |
652 | u8 *p_buffer, u32 bytes_to_copy) | 667 | u8 * p_buffer, u32 bytes_to_copy) |
653 | { | 668 | { |
654 | u8 *p_out_buffer = NULL; | 669 | u8 *p_out_buffer = NULL; |
655 | u32 current_line_bytes_copied = 0; | 670 | u32 current_line_bytes_copied = 0; |
656 | struct cx231xx_buffer *buf; | 671 | struct cx231xx_buffer *buf; |
657 | u32 _line_size = dev->width << 1; | 672 | u32 _line_size = dev->width << 1; |
658 | void *startwrite; | 673 | void *startwrite; |
659 | int offset, lencopy; | 674 | int offset, lencopy; |
660 | 675 | ||
661 | buf = dev->vbi_mode.isoc_ctl.buf; | 676 | buf = dev->vbi_mode.isoc_ctl.buf; |
662 | 677 | ||
663 | if (buf == NULL) { | 678 | if (buf == NULL) { |
664 | return -1; | 679 | return -1; |
665 | } | 680 | } |
666 | 681 | ||
667 | p_out_buffer = videobuf_to_vmalloc(&buf->vb); | 682 | p_out_buffer = videobuf_to_vmalloc(&buf->vb); |
668 | 683 | ||
669 | if(dma_q->bytes_left_in_line != _line_size ) { | 684 | if (dma_q->bytes_left_in_line != _line_size) { |
670 | current_line_bytes_copied = _line_size - dma_q->bytes_left_in_line; | 685 | current_line_bytes_copied = |
671 | } | 686 | _line_size - dma_q->bytes_left_in_line; |
687 | } | ||
672 | 688 | ||
673 | offset = ( dma_q->lines_completed * _line_size ) + current_line_bytes_copied; | 689 | offset = |
690 | (dma_q->lines_completed * _line_size) + current_line_bytes_copied; | ||
674 | 691 | ||
675 | /* prepare destination address */ | 692 | /* prepare destination address */ |
676 | startwrite = p_out_buffer + offset; | 693 | startwrite = p_out_buffer + offset; |
677 | 694 | ||
678 | lencopy = dma_q->bytes_left_in_line > bytes_to_copy ? bytes_to_copy : dma_q->bytes_left_in_line; | 695 | lencopy = |
696 | dma_q->bytes_left_in_line > | ||
697 | bytes_to_copy ? bytes_to_copy : dma_q->bytes_left_in_line; | ||
679 | 698 | ||
680 | memcpy(startwrite, p_buffer, lencopy); | 699 | memcpy(startwrite, p_buffer, lencopy); |
681 | 700 | ||
682 | return 0; | 701 | return 0; |
683 | } | 702 | } |
684 | 703 | ||
685 | 704 | u8 cx231xx_is_vbi_buffer_done(struct cx231xx * dev, | |
686 | u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev,struct cx231xx_dmaqueue *dma_q) | 705 | struct cx231xx_dmaqueue * dma_q) |
687 | { | 706 | { |
688 | u32 height = 0; | 707 | u32 height = 0; |
689 | 708 | ||
690 | height = ((dev->norm & V4L2_STD_625_50) ? | 709 | height = ((dev->norm & V4L2_STD_625_50) ? |
691 | PAL_VBI_LINES : NTSC_VBI_LINES) ; | 710 | PAL_VBI_LINES : NTSC_VBI_LINES); |
692 | return (dma_q->lines_completed == height)?1:0; | 711 | return (dma_q->lines_completed == height) ? 1 : 0; |
693 | } | 712 | } |