diff options
author | Hugues Fruchet <hugues.fruchet@st.com> | 2018-06-11 05:50:27 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab+samsung@kernel.org> | 2018-06-28 07:47:44 -0400 |
commit | 49bcc1746ffbf94f41840718c5fab1a8d56c82d8 (patch) | |
tree | 39911e4cc8607c78c3b1fc5d90dceee3ff71d584 /drivers/media/platform/stm32/stm32-dcmi.c | |
parent | 2d494d4a09c027fd656862dc0a1aa7a14db42e2a (diff) |
media: stm32-dcmi: revisit buffer list management
Cleanup "active" field usage and enhance list management
to avoid exceptions when releasing buffers on error or
stopping streaming.
Signed-off-by: Hugues Fruchet <hugues.fruchet@st.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Diffstat (limited to 'drivers/media/platform/stm32/stm32-dcmi.c')
-rw-r--r-- | drivers/media/platform/stm32/stm32-dcmi.c | 65 |
1 files changed, 31 insertions, 34 deletions
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index 61daa1e0514a..5d866ac69d5c 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c | |||
@@ -190,7 +190,7 @@ static inline void reg_clear(void __iomem *base, u32 reg, u32 mask) | |||
190 | reg_write(base, reg, reg_read(base, reg) & ~mask); | 190 | reg_write(base, reg, reg_read(base, reg) & ~mask); |
191 | } | 191 | } |
192 | 192 | ||
193 | static int dcmi_start_capture(struct stm32_dcmi *dcmi); | 193 | static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf); |
194 | 194 | ||
195 | static void dcmi_buffer_done(struct stm32_dcmi *dcmi, | 195 | static void dcmi_buffer_done(struct stm32_dcmi *dcmi, |
196 | struct dcmi_buf *buf, | 196 | struct dcmi_buf *buf, |
@@ -202,6 +202,8 @@ static void dcmi_buffer_done(struct stm32_dcmi *dcmi, | |||
202 | if (!buf) | 202 | if (!buf) |
203 | return; | 203 | return; |
204 | 204 | ||
205 | list_del_init(&buf->list); | ||
206 | |||
205 | vbuf = &buf->vb; | 207 | vbuf = &buf->vb; |
206 | 208 | ||
207 | vbuf->sequence = dcmi->sequence++; | 209 | vbuf->sequence = dcmi->sequence++; |
@@ -219,6 +221,8 @@ static void dcmi_buffer_done(struct stm32_dcmi *dcmi, | |||
219 | 221 | ||
220 | static int dcmi_restart_capture(struct stm32_dcmi *dcmi) | 222 | static int dcmi_restart_capture(struct stm32_dcmi *dcmi) |
221 | { | 223 | { |
224 | struct dcmi_buf *buf; | ||
225 | |||
222 | spin_lock_irq(&dcmi->irqlock); | 226 | spin_lock_irq(&dcmi->irqlock); |
223 | 227 | ||
224 | if (dcmi->state != RUNNING) { | 228 | if (dcmi->state != RUNNING) { |
@@ -229,19 +233,16 @@ static int dcmi_restart_capture(struct stm32_dcmi *dcmi) | |||
229 | /* Restart a new DMA transfer with next buffer */ | 233 | /* Restart a new DMA transfer with next buffer */ |
230 | if (list_empty(&dcmi->buffers)) { | 234 | if (list_empty(&dcmi->buffers)) { |
231 | dev_dbg(dcmi->dev, "Capture restart is deferred to next buffer queueing\n"); | 235 | dev_dbg(dcmi->dev, "Capture restart is deferred to next buffer queueing\n"); |
232 | dcmi->active = NULL; | ||
233 | dcmi->state = WAIT_FOR_BUFFER; | 236 | dcmi->state = WAIT_FOR_BUFFER; |
234 | spin_unlock_irq(&dcmi->irqlock); | 237 | spin_unlock_irq(&dcmi->irqlock); |
235 | return 0; | 238 | return 0; |
236 | } | 239 | } |
237 | 240 | buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list); | |
238 | dcmi->active = list_entry(dcmi->buffers.next, | 241 | dcmi->active = buf; |
239 | struct dcmi_buf, list); | ||
240 | list_del_init(&dcmi->active->list); | ||
241 | 242 | ||
242 | spin_unlock_irq(&dcmi->irqlock); | 243 | spin_unlock_irq(&dcmi->irqlock); |
243 | 244 | ||
244 | return dcmi_start_capture(dcmi); | 245 | return dcmi_start_capture(dcmi, buf); |
245 | } | 246 | } |
246 | 247 | ||
247 | static void dcmi_dma_callback(void *param) | 248 | static void dcmi_dma_callback(void *param) |
@@ -251,6 +252,8 @@ static void dcmi_dma_callback(void *param) | |||
251 | enum dma_status status; | 252 | enum dma_status status; |
252 | struct dcmi_buf *buf = dcmi->active; | 253 | struct dcmi_buf *buf = dcmi->active; |
253 | 254 | ||
255 | spin_lock_irq(&dcmi->irqlock); | ||
256 | |||
254 | /* Check DMA status */ | 257 | /* Check DMA status */ |
255 | status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state); | 258 | status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state); |
256 | 259 | ||
@@ -273,15 +276,19 @@ static void dcmi_dma_callback(void *param) | |||
273 | /* Return buffer to V4L2 */ | 276 | /* Return buffer to V4L2 */ |
274 | dcmi_buffer_done(dcmi, buf, buf->size, 0); | 277 | dcmi_buffer_done(dcmi, buf, buf->size, 0); |
275 | 278 | ||
279 | spin_unlock_irq(&dcmi->irqlock); | ||
280 | |||
276 | /* Restart capture */ | 281 | /* Restart capture */ |
277 | if (dcmi_restart_capture(dcmi)) | 282 | if (dcmi_restart_capture(dcmi)) |
278 | dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n", | 283 | dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n", |
279 | __func__); | 284 | __func__); |
280 | break; | 285 | return; |
281 | default: | 286 | default: |
282 | dev_err(dcmi->dev, "%s: Received unknown status\n", __func__); | 287 | dev_err(dcmi->dev, "%s: Received unknown status\n", __func__); |
283 | break; | 288 | break; |
284 | } | 289 | } |
290 | |||
291 | spin_unlock_irq(&dcmi->irqlock); | ||
285 | } | 292 | } |
286 | 293 | ||
287 | static int dcmi_start_dma(struct stm32_dcmi *dcmi, | 294 | static int dcmi_start_dma(struct stm32_dcmi *dcmi, |
@@ -333,10 +340,9 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi, | |||
333 | return 0; | 340 | return 0; |
334 | } | 341 | } |
335 | 342 | ||
336 | static int dcmi_start_capture(struct stm32_dcmi *dcmi) | 343 | static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf) |
337 | { | 344 | { |
338 | int ret; | 345 | int ret; |
339 | struct dcmi_buf *buf = dcmi->active; | ||
340 | 346 | ||
341 | if (!buf) | 347 | if (!buf) |
342 | return -EINVAL; | 348 | return -EINVAL; |
@@ -490,8 +496,6 @@ static int dcmi_queue_setup(struct vb2_queue *vq, | |||
490 | *nplanes = 1; | 496 | *nplanes = 1; |
491 | sizes[0] = size; | 497 | sizes[0] = size; |
492 | 498 | ||
493 | dcmi->active = NULL; | ||
494 | |||
495 | dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n", | 499 | dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n", |
496 | *nbuffers, size); | 500 | *nbuffers, size); |
497 | 501 | ||
@@ -549,23 +553,24 @@ static void dcmi_buf_queue(struct vb2_buffer *vb) | |||
549 | 553 | ||
550 | spin_lock_irq(&dcmi->irqlock); | 554 | spin_lock_irq(&dcmi->irqlock); |
551 | 555 | ||
552 | dcmi->active = buf; | 556 | /* Enqueue to video buffers list */ |
557 | list_add_tail(&buf->list, &dcmi->buffers); | ||
553 | 558 | ||
554 | if (dcmi->state == WAIT_FOR_BUFFER) { | 559 | if (dcmi->state == WAIT_FOR_BUFFER) { |
555 | dcmi->state = RUNNING; | 560 | dcmi->state = RUNNING; |
561 | dcmi->active = buf; | ||
556 | 562 | ||
557 | dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n", | 563 | dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n", |
558 | buf->vb.vb2_buf.index); | 564 | buf->vb.vb2_buf.index); |
559 | 565 | ||
560 | spin_unlock_irq(&dcmi->irqlock); | 566 | spin_unlock_irq(&dcmi->irqlock); |
561 | if (dcmi_start_capture(dcmi)) | 567 | if (dcmi_start_capture(dcmi, buf)) |
562 | dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n", | 568 | dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n", |
563 | __func__); | 569 | __func__); |
564 | } else { | 570 | return; |
565 | /* Enqueue to video buffers list */ | ||
566 | list_add_tail(&buf->list, &dcmi->buffers); | ||
567 | spin_unlock_irq(&dcmi->irqlock); | ||
568 | } | 571 | } |
572 | |||
573 | spin_unlock_irq(&dcmi->irqlock); | ||
569 | } | 574 | } |
570 | 575 | ||
571 | static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) | 576 | static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) |
@@ -637,7 +642,6 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) | |||
637 | dcmi->errors_count = 0; | 642 | dcmi->errors_count = 0; |
638 | dcmi->overrun_count = 0; | 643 | dcmi->overrun_count = 0; |
639 | dcmi->buffers_count = 0; | 644 | dcmi->buffers_count = 0; |
640 | dcmi->active = NULL; | ||
641 | 645 | ||
642 | /* | 646 | /* |
643 | * Start transfer if at least one buffer has been queued, | 647 | * Start transfer if at least one buffer has been queued, |
@@ -650,15 +654,15 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) | |||
650 | return 0; | 654 | return 0; |
651 | } | 655 | } |
652 | 656 | ||
653 | dcmi->active = list_entry(dcmi->buffers.next, struct dcmi_buf, list); | 657 | buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list); |
654 | list_del_init(&dcmi->active->list); | 658 | dcmi->active = buf; |
655 | |||
656 | dev_dbg(dcmi->dev, "Start streaming, starting capture\n"); | ||
657 | 659 | ||
658 | dcmi->state = RUNNING; | 660 | dcmi->state = RUNNING; |
659 | 661 | ||
662 | dev_dbg(dcmi->dev, "Start streaming, starting capture\n"); | ||
663 | |||
660 | spin_unlock_irq(&dcmi->irqlock); | 664 | spin_unlock_irq(&dcmi->irqlock); |
661 | ret = dcmi_start_capture(dcmi); | 665 | ret = dcmi_start_capture(dcmi, buf); |
662 | if (ret) { | 666 | if (ret) { |
663 | dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n", | 667 | dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n", |
664 | __func__); | 668 | __func__); |
@@ -682,15 +686,11 @@ err_release_buffers: | |||
682 | * Return all buffers to vb2 in QUEUED state. | 686 | * Return all buffers to vb2 in QUEUED state. |
683 | * This will give ownership back to userspace | 687 | * This will give ownership back to userspace |
684 | */ | 688 | */ |
685 | if (dcmi->active) { | ||
686 | buf = dcmi->active; | ||
687 | vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); | ||
688 | dcmi->active = NULL; | ||
689 | } | ||
690 | list_for_each_entry_safe(buf, node, &dcmi->buffers, list) { | 689 | list_for_each_entry_safe(buf, node, &dcmi->buffers, list) { |
691 | list_del_init(&buf->list); | 690 | list_del_init(&buf->list); |
692 | vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); | 691 | vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); |
693 | } | 692 | } |
693 | dcmi->active = NULL; | ||
694 | spin_unlock_irq(&dcmi->irqlock); | 694 | spin_unlock_irq(&dcmi->irqlock); |
695 | 695 | ||
696 | return ret; | 696 | return ret; |
@@ -732,16 +732,13 @@ static void dcmi_stop_streaming(struct vb2_queue *vq) | |||
732 | } | 732 | } |
733 | 733 | ||
734 | /* Return all queued buffers to vb2 in ERROR state */ | 734 | /* Return all queued buffers to vb2 in ERROR state */ |
735 | if (dcmi->active) { | ||
736 | buf = dcmi->active; | ||
737 | vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); | ||
738 | dcmi->active = NULL; | ||
739 | } | ||
740 | list_for_each_entry_safe(buf, node, &dcmi->buffers, list) { | 735 | list_for_each_entry_safe(buf, node, &dcmi->buffers, list) { |
741 | list_del_init(&buf->list); | 736 | list_del_init(&buf->list); |
742 | vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); | 737 | vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); |
743 | } | 738 | } |
744 | 739 | ||
740 | dcmi->active = NULL; | ||
741 | |||
745 | spin_unlock_irq(&dcmi->irqlock); | 742 | spin_unlock_irq(&dcmi->irqlock); |
746 | 743 | ||
747 | /* Stop all pending DMA operations */ | 744 | /* Stop all pending DMA operations */ |