aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/platform/stm32/stm32-dcmi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/platform/stm32/stm32-dcmi.c')
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c305
1 files changed, 202 insertions, 103 deletions
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 9460b3080dca..2e1933d872ee 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Driver for STM32 Digital Camera Memory Interface 3 * Driver for STM32 Digital Camera Memory Interface
3 * 4 *
@@ -5,7 +6,6 @@
5 * Authors: Yannick Fertre <yannick.fertre@st.com> 6 * Authors: Yannick Fertre <yannick.fertre@st.com>
6 * Hugues Fruchet <hugues.fruchet@st.com> 7 * Hugues Fruchet <hugues.fruchet@st.com>
7 * for STMicroelectronics. 8 * for STMicroelectronics.
8 * License terms: GNU General Public License (GPL), version 2
9 * 9 *
10 * This driver is based on atmel_isi.c 10 * This driver is based on atmel_isi.c
11 * 11 *
@@ -93,6 +93,11 @@ enum state {
93#define MIN_HEIGHT 16U 93#define MIN_HEIGHT 16U
94#define MAX_HEIGHT 2048U 94#define MAX_HEIGHT 2048U
95 95
96#define MIN_JPEG_WIDTH 16U
97#define MAX_JPEG_WIDTH 2592U
98#define MIN_JPEG_HEIGHT 16U
99#define MAX_JPEG_HEIGHT 2592U
100
96#define TIMEOUT_MS 1000 101#define TIMEOUT_MS 1000
97 102
98struct dcmi_graph_entity { 103struct dcmi_graph_entity {
@@ -160,6 +165,7 @@ struct stm32_dcmi {
160 dma_cookie_t dma_cookie; 165 dma_cookie_t dma_cookie;
161 u32 misr; 166 u32 misr;
162 int errors_count; 167 int errors_count;
168 int overrun_count;
163 int buffers_count; 169 int buffers_count;
164}; 170};
165 171
@@ -190,14 +196,67 @@ static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
190 196
191static int dcmi_start_capture(struct stm32_dcmi *dcmi); 197static int dcmi_start_capture(struct stm32_dcmi *dcmi);
192 198
199static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
200 struct dcmi_buf *buf,
201 size_t bytesused,
202 int err)
203{
204 struct vb2_v4l2_buffer *vbuf;
205
206 if (!buf)
207 return;
208
209 vbuf = &buf->vb;
210
211 vbuf->sequence = dcmi->sequence++;
212 vbuf->field = V4L2_FIELD_NONE;
213 vbuf->vb2_buf.timestamp = ktime_get_ns();
214 vb2_set_plane_payload(&vbuf->vb2_buf, 0, bytesused);
215 vb2_buffer_done(&vbuf->vb2_buf,
216 err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
217 dev_dbg(dcmi->dev, "buffer[%d] done seq=%d, bytesused=%zu\n",
218 vbuf->vb2_buf.index, vbuf->sequence, bytesused);
219
220 dcmi->buffers_count++;
221 dcmi->active = NULL;
222}
223
224static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
225{
226 spin_lock_irq(&dcmi->irqlock);
227
228 if (dcmi->state != RUNNING) {
229 spin_unlock_irq(&dcmi->irqlock);
230 return -EINVAL;
231 }
232
233 /* Restart a new DMA transfer with next buffer */
234 if (list_empty(&dcmi->buffers)) {
235 dev_err(dcmi->dev, "%s: No more buffer queued, cannot capture buffer\n",
236 __func__);
237 dcmi->errors_count++;
238 dcmi->active = NULL;
239
240 spin_unlock_irq(&dcmi->irqlock);
241 return -EINVAL;
242 }
243
244 dcmi->active = list_entry(dcmi->buffers.next,
245 struct dcmi_buf, list);
246 list_del_init(&dcmi->active->list);
247
248 spin_unlock_irq(&dcmi->irqlock);
249
250 return dcmi_start_capture(dcmi);
251}
252
193static void dcmi_dma_callback(void *param) 253static void dcmi_dma_callback(void *param)
194{ 254{
195 struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param; 255 struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param;
196 struct dma_chan *chan = dcmi->dma_chan; 256 struct dma_chan *chan = dcmi->dma_chan;
197 struct dma_tx_state state; 257 struct dma_tx_state state;
198 enum dma_status status; 258 enum dma_status status;
199 259 struct dcmi_buf *buf = dcmi->active;
200 spin_lock(&dcmi->irqlock);
201 260
202 /* Check DMA status */ 261 /* Check DMA status */
203 status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state); 262 status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state);
@@ -215,58 +274,18 @@ static void dcmi_dma_callback(void *param)
215 case DMA_COMPLETE: 274 case DMA_COMPLETE:
216 dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__); 275 dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__);
217 276
218 if (dcmi->active) { 277 /* Return buffer to V4L2 */
219 struct dcmi_buf *buf = dcmi->active; 278 dcmi_buffer_done(dcmi, buf, buf->size, 0);
220 struct vb2_v4l2_buffer *vbuf = &dcmi->active->vb;
221
222 vbuf->sequence = dcmi->sequence++;
223 vbuf->field = V4L2_FIELD_NONE;
224 vbuf->vb2_buf.timestamp = ktime_get_ns();
225 vb2_set_plane_payload(&vbuf->vb2_buf, 0, buf->size);
226 vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
227 dev_dbg(dcmi->dev, "buffer[%d] done seq=%d\n",
228 vbuf->vb2_buf.index, vbuf->sequence);
229
230 dcmi->buffers_count++;
231 dcmi->active = NULL;
232 }
233
234 /* Restart a new DMA transfer with next buffer */
235 if (dcmi->state == RUNNING) {
236 if (list_empty(&dcmi->buffers)) {
237 dev_err(dcmi->dev, "%s: No more buffer queued, cannot capture buffer",
238 __func__);
239 dcmi->errors_count++;
240 dcmi->active = NULL;
241
242 spin_unlock(&dcmi->irqlock);
243 return;
244 }
245
246 dcmi->active = list_entry(dcmi->buffers.next,
247 struct dcmi_buf, list);
248
249 list_del_init(&dcmi->active->list);
250
251 if (dcmi_start_capture(dcmi)) {
252 dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete",
253 __func__);
254
255 spin_unlock(&dcmi->irqlock);
256 return;
257 }
258
259 /* Enable capture */
260 reg_set(dcmi->regs, DCMI_CR, CR_CAPTURE);
261 }
262 279
280 /* Restart capture */
281 if (dcmi_restart_capture(dcmi))
282 dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n",
283 __func__);
263 break; 284 break;
264 default: 285 default:
265 dev_err(dcmi->dev, "%s: Received unknown status\n", __func__); 286 dev_err(dcmi->dev, "%s: Received unknown status\n", __func__);
266 break; 287 break;
267 } 288 }
268
269 spin_unlock(&dcmi->irqlock);
270} 289}
271 290
272static int dcmi_start_dma(struct stm32_dcmi *dcmi, 291static int dcmi_start_dma(struct stm32_dcmi *dcmi,
@@ -359,11 +378,57 @@ static void dcmi_set_crop(struct stm32_dcmi *dcmi)
359 reg_set(dcmi->regs, DCMI_CR, CR_CROP); 378 reg_set(dcmi->regs, DCMI_CR, CR_CROP);
360} 379}
361 380
381static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
382{
383 struct dma_tx_state state;
384 enum dma_status status;
385 struct dma_chan *chan = dcmi->dma_chan;
386 struct dcmi_buf *buf = dcmi->active;
387
388 if (!buf)
389 return;
390
391 /*
392 * Because of variable JPEG buffer size sent by sensor,
393 * DMA transfer never completes due to transfer size
394 * never reached.
395 * In order to ensure that all the JPEG data are transferred
396 * in active buffer memory, DMA is drained.
397 * Then DMA tx status gives the amount of data transferred
398 * to memory, which is then returned to V4L2 through the active
399 * buffer payload.
400 */
401
402 /* Drain DMA */
403 dmaengine_synchronize(chan);
404
405 /* Get DMA residue to get JPEG size */
406 status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state);
407 if (status != DMA_ERROR && state.residue < buf->size) {
408 /* Return JPEG buffer to V4L2 with received JPEG buffer size */
409 dcmi_buffer_done(dcmi, buf, buf->size - state.residue, 0);
410 } else {
411 dcmi->errors_count++;
412 dev_err(dcmi->dev, "%s: Cannot get JPEG size from DMA\n",
413 __func__);
414 /* Return JPEG buffer to V4L2 in ERROR state */
415 dcmi_buffer_done(dcmi, buf, 0, -EIO);
416 }
417
418 /* Abort DMA operation */
419 dmaengine_terminate_all(dcmi->dma_chan);
420
421 /* Restart capture */
422 if (dcmi_restart_capture(dcmi))
423 dev_err(dcmi->dev, "%s: Cannot restart capture on JPEG received\n",
424 __func__);
425}
426
362static irqreturn_t dcmi_irq_thread(int irq, void *arg) 427static irqreturn_t dcmi_irq_thread(int irq, void *arg)
363{ 428{
364 struct stm32_dcmi *dcmi = arg; 429 struct stm32_dcmi *dcmi = arg;
365 430
366 spin_lock(&dcmi->irqlock); 431 spin_lock_irq(&dcmi->irqlock);
367 432
368 /* Stop capture is required */ 433 /* Stop capture is required */
369 if (dcmi->state == STOPPING) { 434 if (dcmi->state == STOPPING) {
@@ -373,50 +438,41 @@ static irqreturn_t dcmi_irq_thread(int irq, void *arg)
373 438
374 complete(&dcmi->complete); 439 complete(&dcmi->complete);
375 440
376 spin_unlock(&dcmi->irqlock); 441 spin_unlock_irq(&dcmi->irqlock);
377 return IRQ_HANDLED; 442 return IRQ_HANDLED;
378 } 443 }
379 444
380 if ((dcmi->misr & IT_OVR) || (dcmi->misr & IT_ERR)) { 445 if ((dcmi->misr & IT_OVR) || (dcmi->misr & IT_ERR)) {
381 /*
382 * An overflow or an error has been detected,
383 * stop current DMA transfert & restart it
384 */
385 dev_warn(dcmi->dev, "%s: Overflow or error detected\n",
386 __func__);
387
388 dcmi->errors_count++; 446 dcmi->errors_count++;
389 dmaengine_terminate_all(dcmi->dma_chan); 447 if (dcmi->misr & IT_OVR)
390 448 dcmi->overrun_count++;
391 reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR); 449 }
392
393 dev_dbg(dcmi->dev, "Restarting capture after DCMI error\n");
394
395 if (dcmi_start_capture(dcmi)) {
396 dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
397 __func__);
398 450
399 spin_unlock(&dcmi->irqlock); 451 if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG &&
400 return IRQ_HANDLED; 452 dcmi->misr & IT_FRAME) {
401 } 453 /* JPEG received */
454 spin_unlock_irq(&dcmi->irqlock);
455 dcmi_process_jpeg(dcmi);
456 return IRQ_HANDLED;
402 } 457 }
403 458
404 spin_unlock(&dcmi->irqlock); 459 spin_unlock_irq(&dcmi->irqlock);
405 return IRQ_HANDLED; 460 return IRQ_HANDLED;
406} 461}
407 462
408static irqreturn_t dcmi_irq_callback(int irq, void *arg) 463static irqreturn_t dcmi_irq_callback(int irq, void *arg)
409{ 464{
410 struct stm32_dcmi *dcmi = arg; 465 struct stm32_dcmi *dcmi = arg;
466 unsigned long flags;
411 467
412 spin_lock(&dcmi->irqlock); 468 spin_lock_irqsave(&dcmi->irqlock, flags);
413 469
414 dcmi->misr = reg_read(dcmi->regs, DCMI_MIS); 470 dcmi->misr = reg_read(dcmi->regs, DCMI_MIS);
415 471
416 /* Clear interrupt */ 472 /* Clear interrupt */
417 reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR); 473 reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR);
418 474
419 spin_unlock(&dcmi->irqlock); 475 spin_unlock_irqrestore(&dcmi->irqlock, flags);
420 476
421 return IRQ_WAKE_THREAD; 477 return IRQ_WAKE_THREAD;
422} 478}
@@ -483,7 +539,7 @@ static int dcmi_buf_prepare(struct vb2_buffer *vb)
483 539
484 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size); 540 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
485 541
486 dev_dbg(dcmi->dev, "buffer[%d] phy=0x%pad size=%zu\n", 542 dev_dbg(dcmi->dev, "buffer[%d] phy=%pad size=%zu\n",
487 vb->index, &buf->paddr, buf->size); 543 vb->index, &buf->paddr, buf->size);
488 } 544 }
489 545
@@ -495,29 +551,24 @@ static void dcmi_buf_queue(struct vb2_buffer *vb)
495 struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue); 551 struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
496 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 552 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
497 struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb); 553 struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
498 unsigned long flags = 0;
499 554
500 spin_lock_irqsave(&dcmi->irqlock, flags); 555 spin_lock_irq(&dcmi->irqlock);
501 556
502 if ((dcmi->state == RUNNING) && (!dcmi->active)) { 557 if (dcmi->state == RUNNING && !dcmi->active) {
503 dcmi->active = buf; 558 dcmi->active = buf;
504 559
505 dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n", 560 dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n",
506 buf->vb.vb2_buf.index); 561 buf->vb.vb2_buf.index);
507 562
508 if (dcmi_start_capture(dcmi)) { 563 spin_unlock_irq(&dcmi->irqlock);
564 if (dcmi_start_capture(dcmi))
509 dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n", 565 dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
510 __func__); 566 __func__);
511
512 spin_unlock_irqrestore(&dcmi->irqlock, flags);
513 return;
514 }
515 } else { 567 } else {
516 /* Enqueue to video buffers list */ 568 /* Enqueue to video buffers list */
517 list_add_tail(&buf->list, &dcmi->buffers); 569 list_add_tail(&buf->list, &dcmi->buffers);
570 spin_unlock_irq(&dcmi->irqlock);
518 } 571 }
519
520 spin_unlock_irqrestore(&dcmi->irqlock, flags);
521} 572}
522 573
523static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) 574static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -529,7 +580,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
529 580
530 ret = clk_enable(dcmi->mclk); 581 ret = clk_enable(dcmi->mclk);
531 if (ret) { 582 if (ret) {
532 dev_err(dcmi->dev, "%s: Failed to start streaming, cannot enable clock", 583 dev_err(dcmi->dev, "%s: Failed to start streaming, cannot enable clock\n",
533 __func__); 584 __func__);
534 goto err_release_buffers; 585 goto err_release_buffers;
535 } 586 }
@@ -578,6 +629,10 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
578 if (dcmi->do_crop) 629 if (dcmi->do_crop)
579 dcmi_set_crop(dcmi); 630 dcmi_set_crop(dcmi);
580 631
632 /* Enable jpeg capture */
633 if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG)
634 reg_set(dcmi->regs, DCMI_CR, CR_CM);/* Snapshot mode */
635
581 /* Enable dcmi */ 636 /* Enable dcmi */
582 reg_set(dcmi->regs, DCMI_CR, CR_ENABLE); 637 reg_set(dcmi->regs, DCMI_CR, CR_ENABLE);
583 638
@@ -585,6 +640,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
585 640
586 dcmi->sequence = 0; 641 dcmi->sequence = 0;
587 dcmi->errors_count = 0; 642 dcmi->errors_count = 0;
643 dcmi->overrun_count = 0;
588 dcmi->buffers_count = 0; 644 dcmi->buffers_count = 0;
589 dcmi->active = NULL; 645 dcmi->active = NULL;
590 646
@@ -603,20 +659,17 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
603 659
604 dev_dbg(dcmi->dev, "Start streaming, starting capture\n"); 660 dev_dbg(dcmi->dev, "Start streaming, starting capture\n");
605 661
662 spin_unlock_irq(&dcmi->irqlock);
606 ret = dcmi_start_capture(dcmi); 663 ret = dcmi_start_capture(dcmi);
607 if (ret) { 664 if (ret) {
608 dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture", 665 dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n",
609 __func__); 666 __func__);
610
611 spin_unlock_irq(&dcmi->irqlock);
612 goto err_subdev_streamoff; 667 goto err_subdev_streamoff;
613 } 668 }
614 669
615 /* Enable interruptions */ 670 /* Enable interruptions */
616 reg_set(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR); 671 reg_set(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
617 672
618 spin_unlock_irq(&dcmi->irqlock);
619
620 return 0; 673 return 0;
621 674
622err_subdev_streamoff: 675err_subdev_streamoff:
@@ -656,9 +709,12 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
656 /* Disable stream on the sub device */ 709 /* Disable stream on the sub device */
657 ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0); 710 ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
658 if (ret && ret != -ENOIOCTLCMD) 711 if (ret && ret != -ENOIOCTLCMD)
659 dev_err(dcmi->dev, "stream off failed in subdev\n"); 712 dev_err(dcmi->dev, "%s: Failed to stop streaming, subdev streamoff error (%d)\n",
713 __func__, ret);
660 714
715 spin_lock_irq(&dcmi->irqlock);
661 dcmi->state = STOPPING; 716 dcmi->state = STOPPING;
717 spin_unlock_irq(&dcmi->irqlock);
662 718
663 timeout = wait_for_completion_interruptible_timeout(&dcmi->complete, 719 timeout = wait_for_completion_interruptible_timeout(&dcmi->complete,
664 time_ms); 720 time_ms);
@@ -672,7 +728,8 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
672 reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE); 728 reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE);
673 729
674 if (!timeout) { 730 if (!timeout) {
675 dev_err(dcmi->dev, "Timeout during stop streaming\n"); 731 dev_err(dcmi->dev, "%s: Timeout during stop streaming\n",
732 __func__);
676 dcmi->state = STOPPED; 733 dcmi->state = STOPPED;
677 } 734 }
678 735
@@ -694,8 +751,13 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
694 751
695 clk_disable(dcmi->mclk); 752 clk_disable(dcmi->mclk);
696 753
697 dev_dbg(dcmi->dev, "Stop streaming, errors=%d buffers=%d\n", 754 if (dcmi->errors_count)
698 dcmi->errors_count, dcmi->buffers_count); 755 dev_warn(dcmi->dev, "Some errors found while streaming: errors=%d (overrun=%d), buffers=%d\n",
756 dcmi->errors_count, dcmi->overrun_count,
757 dcmi->buffers_count);
758 dev_dbg(dcmi->dev, "Stop streaming, errors=%d (overrun=%d), buffers=%d\n",
759 dcmi->errors_count, dcmi->overrun_count,
760 dcmi->buffers_count);
699} 761}
700 762
701static const struct vb2_ops dcmi_video_qops = { 763static const struct vb2_ops dcmi_video_qops = {
@@ -749,7 +811,7 @@ static void __find_outer_frame_size(struct stm32_dcmi *dcmi,
749 int h_err = (fsize->height - pix->height); 811 int h_err = (fsize->height - pix->height);
750 int err = w_err + h_err; 812 int err = w_err + h_err;
751 813
752 if ((w_err >= 0) && (h_err >= 0) && (err < min_err)) { 814 if (w_err >= 0 && h_err >= 0 && err < min_err) {
753 min_err = err; 815 min_err = err;
754 match = fsize; 816 match = fsize;
755 } 817 }
@@ -771,6 +833,7 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
771 struct v4l2_subdev_format format = { 833 struct v4l2_subdev_format format = {
772 .which = V4L2_SUBDEV_FORMAT_TRY, 834 .which = V4L2_SUBDEV_FORMAT_TRY,
773 }; 835 };
836 bool do_crop;
774 int ret; 837 int ret;
775 838
776 sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat); 839 sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
@@ -780,10 +843,19 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
780 } 843 }
781 844
782 /* Limit to hardware capabilities */ 845 /* Limit to hardware capabilities */
783 pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH); 846 if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
784 pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT); 847 pix->width = clamp(pix->width, MIN_JPEG_WIDTH, MAX_JPEG_WIDTH);
848 pix->height =
849 clamp(pix->height, MIN_JPEG_HEIGHT, MAX_JPEG_HEIGHT);
850 } else {
851 pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
852 pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
853 }
785 854
786 if (dcmi->do_crop && dcmi->num_of_sd_framesizes) { 855 /* No crop if JPEG is requested */
856 do_crop = dcmi->do_crop && (pix->pixelformat != V4L2_PIX_FMT_JPEG);
857
858 if (do_crop && dcmi->num_of_sd_framesizes) {
787 struct dcmi_framesize outer_sd_fsize; 859 struct dcmi_framesize outer_sd_fsize;
788 /* 860 /*
789 * If crop is requested and sensor have discrete frame sizes, 861 * If crop is requested and sensor have discrete frame sizes,
@@ -807,7 +879,7 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
807 sd_fsize.width = pix->width; 879 sd_fsize.width = pix->width;
808 sd_fsize.height = pix->height; 880 sd_fsize.height = pix->height;
809 881
810 if (dcmi->do_crop) { 882 if (do_crop) {
811 struct v4l2_rect c = dcmi->crop; 883 struct v4l2_rect c = dcmi->crop;
812 struct v4l2_rect max_rect; 884 struct v4l2_rect max_rect;
813 885
@@ -862,6 +934,10 @@ static int dcmi_set_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f)
862 if (ret) 934 if (ret)
863 return ret; 935 return ret;
864 936
937 /* Disable crop if JPEG is requested */
938 if (pix->pixelformat == V4L2_PIX_FMT_JPEG)
939 dcmi->do_crop = false;
940
865 /* pix to mbus format */ 941 /* pix to mbus format */
866 v4l2_fill_mbus_format(mf, pix, 942 v4l2_fill_mbus_format(mf, pix,
867 sd_format->mbus_code); 943 sd_format->mbus_code);
@@ -1084,10 +1160,10 @@ static int dcmi_s_selection(struct file *file, void *priv,
1084 r.top = clamp_t(s32, r.top, 0, pix.height - r.height); 1160 r.top = clamp_t(s32, r.top, 0, pix.height - r.height);
1085 r.left = clamp_t(s32, r.left, 0, pix.width - r.width); 1161 r.left = clamp_t(s32, r.left, 0, pix.width - r.width);
1086 1162
1087 if (!((r.top == dcmi->sd_bounds.top) && 1163 if (!(r.top == dcmi->sd_bounds.top &&
1088 (r.left == dcmi->sd_bounds.left) && 1164 r.left == dcmi->sd_bounds.left &&
1089 (r.width == dcmi->sd_bounds.width) && 1165 r.width == dcmi->sd_bounds.width &&
1090 (r.height == dcmi->sd_bounds.height))) { 1166 r.height == dcmi->sd_bounds.height)) {
1091 /* Crop if request is different than sensor resolution */ 1167 /* Crop if request is different than sensor resolution */
1092 dcmi->do_crop = true; 1168 dcmi->do_crop = true;
1093 dcmi->crop = r; 1169 dcmi->crop = r;
@@ -1167,6 +1243,22 @@ static int dcmi_enum_framesizes(struct file *file, void *fh,
1167 return 0; 1243 return 0;
1168} 1244}
1169 1245
1246static int dcmi_g_parm(struct file *file, void *priv,
1247 struct v4l2_streamparm *p)
1248{
1249 struct stm32_dcmi *dcmi = video_drvdata(file);
1250
1251 return v4l2_g_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
1252}
1253
1254static int dcmi_s_parm(struct file *file, void *priv,
1255 struct v4l2_streamparm *p)
1256{
1257 struct stm32_dcmi *dcmi = video_drvdata(file);
1258
1259 return v4l2_s_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
1260}
1261
1170static int dcmi_enum_frameintervals(struct file *file, void *fh, 1262static int dcmi_enum_frameintervals(struct file *file, void *fh,
1171 struct v4l2_frmivalenum *fival) 1263 struct v4l2_frmivalenum *fival)
1172{ 1264{
@@ -1269,6 +1361,9 @@ static const struct v4l2_ioctl_ops dcmi_ioctl_ops = {
1269 .vidioc_g_input = dcmi_g_input, 1361 .vidioc_g_input = dcmi_g_input,
1270 .vidioc_s_input = dcmi_s_input, 1362 .vidioc_s_input = dcmi_s_input,
1271 1363
1364 .vidioc_g_parm = dcmi_g_parm,
1365 .vidioc_s_parm = dcmi_s_parm,
1366
1272 .vidioc_enum_framesizes = dcmi_enum_framesizes, 1367 .vidioc_enum_framesizes = dcmi_enum_framesizes,
1273 .vidioc_enum_frameintervals = dcmi_enum_frameintervals, 1368 .vidioc_enum_frameintervals = dcmi_enum_frameintervals,
1274 1369
@@ -1334,6 +1429,10 @@ static const struct dcmi_format dcmi_formats[] = {
1334 .fourcc = V4L2_PIX_FMT_UYVY, 1429 .fourcc = V4L2_PIX_FMT_UYVY,
1335 .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8, 1430 .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
1336 .bpp = 2, 1431 .bpp = 2,
1432 }, {
1433 .fourcc = V4L2_PIX_FMT_JPEG,
1434 .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
1435 .bpp = 1,
1337 }, 1436 },
1338}; 1437};
1339 1438