aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/platform
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/platform')
-rw-r--r--drivers/media/platform/Kconfig19
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/coda.c278
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c2
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c4
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h1
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c29
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c2
-rw-r--r--drivers/media/platform/m2m-deinterlace.c3
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c14
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c1
-rw-r--r--drivers/media/platform/mem2mem_testdev.c3
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c8
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_grp_layer.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_vp_layer.c2
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c5
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c46
-rw-r--r--drivers/media/platform/ti-vpe/Makefile5
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c846
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h203
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h641
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2099
-rw-r--r--drivers/media/platform/ti-vpe/vpe_regs.h496
-rw-r--r--drivers/media/platform/timblogiw.c4
31 files changed, 4566 insertions, 173 deletions
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index eb70dda8cbf3..d7f0249e4050 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -143,6 +143,7 @@ if V4L_MEM2MEM_DRIVERS
143config VIDEO_CODA 143config VIDEO_CODA
144 tristate "Chips&Media Coda multi-standard codec IP" 144 tristate "Chips&Media Coda multi-standard codec IP"
145 depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_MXC 145 depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_MXC
146 select SRAM
146 select VIDEOBUF2_DMA_CONTIG 147 select VIDEOBUF2_DMA_CONTIG
147 select V4L2_MEM2MEM_DEV 148 select V4L2_MEM2MEM_DEV
148 ---help--- 149 ---help---
@@ -212,7 +213,7 @@ config VIDEO_SH_VEU
212 213
213config VIDEO_RENESAS_VSP1 214config VIDEO_RENESAS_VSP1
214 tristate "Renesas VSP1 Video Processing Engine" 215 tristate "Renesas VSP1 Video Processing Engine"
215 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 216 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
216 select VIDEOBUF2_DMA_CONTIG 217 select VIDEOBUF2_DMA_CONTIG
217 ---help--- 218 ---help---
218 This is a V4L2 driver for the Renesas VSP1 video processing engine. 219 This is a V4L2 driver for the Renesas VSP1 video processing engine.
@@ -220,6 +221,22 @@ config VIDEO_RENESAS_VSP1
220 To compile this driver as a module, choose M here: the module 221 To compile this driver as a module, choose M here: the module
221 will be called vsp1. 222 will be called vsp1.
222 223
224config VIDEO_TI_VPE
225 tristate "TI VPE (Video Processing Engine) driver"
226 depends on VIDEO_DEV && VIDEO_V4L2 && SOC_DRA7XX
227 select VIDEOBUF2_DMA_CONTIG
228 select V4L2_MEM2MEM_DEV
229 default n
230 ---help---
231 Support for the TI VPE(Video Processing Engine) block
232 found on DRA7XX SoC.
233
234config VIDEO_TI_VPE_DEBUG
235 bool "VPE debug messages"
236 depends on VIDEO_TI_VPE
237 ---help---
238 Enable debug messages on VPE driver.
239
223endif # V4L_MEM2MEM_DRIVERS 240endif # V4L_MEM2MEM_DRIVERS
224 241
225menuconfig V4L_TEST_DRIVERS 242menuconfig V4L_TEST_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 4e4da482c522..1348ba1faf92 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -22,6 +22,8 @@ obj-$(CONFIG_VIDEO_VIVI) += vivi.o
22 22
23obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o 23obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
24 24
25obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
26
25obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o 27obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
26obj-$(CONFIG_VIDEO_CODA) += coda.o 28obj-$(CONFIG_VIDEO_CODA) += coda.o
27 29
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 4993610051ee..bd72fb97fea5 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -39,7 +39,7 @@
39 39
40#define CODA_NAME "coda" 40#define CODA_NAME "coda"
41 41
42#define CODA_MAX_INSTANCES 4 42#define CODADX6_MAX_INSTANCES 4
43 43
44#define CODA_FMO_BUF_SIZE 32 44#define CODA_FMO_BUF_SIZE 32
45#define CODADX6_WORK_BUF_SIZE (288 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024) 45#define CODADX6_WORK_BUF_SIZE (288 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024)
@@ -54,8 +54,6 @@
54 54
55#define CODA_MAX_FRAMEBUFFERS 8 55#define CODA_MAX_FRAMEBUFFERS 8
56 56
57#define MAX_W 8192
58#define MAX_H 8192
59#define CODA_MAX_FRAME_SIZE 0x100000 57#define CODA_MAX_FRAME_SIZE 0x100000
60#define FMO_SLICE_SAVE_BUF_SIZE (32) 58#define FMO_SLICE_SAVE_BUF_SIZE (32)
61#define CODA_DEFAULT_GAMMA 4096 59#define CODA_DEFAULT_GAMMA 4096
@@ -394,14 +392,57 @@ static struct coda_codec *coda_find_codec(struct coda_dev *dev, int src_fourcc,
394 return &codecs[k]; 392 return &codecs[k];
395} 393}
396 394
395static void coda_get_max_dimensions(struct coda_dev *dev,
396 struct coda_codec *codec,
397 int *max_w, int *max_h)
398{
399 struct coda_codec *codecs = dev->devtype->codecs;
400 int num_codecs = dev->devtype->num_codecs;
401 unsigned int w, h;
402 int k;
403
404 if (codec) {
405 w = codec->max_w;
406 h = codec->max_h;
407 } else {
408 for (k = 0, w = 0, h = 0; k < num_codecs; k++) {
409 w = max(w, codecs[k].max_w);
410 h = max(h, codecs[k].max_h);
411 }
412 }
413
414 if (max_w)
415 *max_w = w;
416 if (max_h)
417 *max_h = h;
418}
419
420static char *coda_product_name(int product)
421{
422 static char buf[9];
423
424 switch (product) {
425 case CODA_DX6:
426 return "CodaDx6";
427 case CODA_7541:
428 return "CODA7541";
429 default:
430 snprintf(buf, sizeof(buf), "(0x%04x)", product);
431 return buf;
432 }
433}
434
397/* 435/*
398 * V4L2 ioctl() operations. 436 * V4L2 ioctl() operations.
399 */ 437 */
400static int vidioc_querycap(struct file *file, void *priv, 438static int coda_querycap(struct file *file, void *priv,
401 struct v4l2_capability *cap) 439 struct v4l2_capability *cap)
402{ 440{
441 struct coda_ctx *ctx = fh_to_ctx(priv);
442
403 strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver)); 443 strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver));
404 strlcpy(cap->card, CODA_NAME, sizeof(cap->card)); 444 strlcpy(cap->card, coda_product_name(ctx->dev->devtype->product),
445 sizeof(cap->card));
405 strlcpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info)); 446 strlcpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info));
406 /* 447 /*
407 * This is only a mem-to-mem video device. The capture and output 448 * This is only a mem-to-mem video device. The capture and output
@@ -457,6 +498,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
457 fmt = &formats[i]; 498 fmt = &formats[i];
458 strlcpy(f->description, fmt->name, sizeof(f->description)); 499 strlcpy(f->description, fmt->name, sizeof(f->description));
459 f->pixelformat = fmt->fourcc; 500 f->pixelformat = fmt->fourcc;
501 if (!coda_format_is_yuv(fmt->fourcc))
502 f->flags |= V4L2_FMT_FLAG_COMPRESSED;
460 return 0; 503 return 0;
461 } 504 }
462 505
@@ -464,8 +507,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
464 return -EINVAL; 507 return -EINVAL;
465} 508}
466 509
467static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, 510static int coda_enum_fmt_vid_cap(struct file *file, void *priv,
468 struct v4l2_fmtdesc *f) 511 struct v4l2_fmtdesc *f)
469{ 512{
470 struct coda_ctx *ctx = fh_to_ctx(priv); 513 struct coda_ctx *ctx = fh_to_ctx(priv);
471 struct vb2_queue *src_vq; 514 struct vb2_queue *src_vq;
@@ -483,13 +526,14 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
483 return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0); 526 return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0);
484} 527}
485 528
486static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, 529static int coda_enum_fmt_vid_out(struct file *file, void *priv,
487 struct v4l2_fmtdesc *f) 530 struct v4l2_fmtdesc *f)
488{ 531{
489 return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0); 532 return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0);
490} 533}
491 534
492static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) 535static int coda_g_fmt(struct file *file, void *priv,
536 struct v4l2_format *f)
493{ 537{
494 struct vb2_queue *vq; 538 struct vb2_queue *vq;
495 struct coda_q_data *q_data; 539 struct coda_q_data *q_data;
@@ -516,8 +560,11 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
516 return 0; 560 return 0;
517} 561}
518 562
519static int vidioc_try_fmt(struct coda_codec *codec, struct v4l2_format *f) 563static int coda_try_fmt(struct coda_ctx *ctx, struct coda_codec *codec,
564 struct v4l2_format *f)
520{ 565{
566 struct coda_dev *dev = ctx->dev;
567 struct coda_q_data *q_data;
521 unsigned int max_w, max_h; 568 unsigned int max_w, max_h;
522 enum v4l2_field field; 569 enum v4l2_field field;
523 570
@@ -531,32 +578,48 @@ static int vidioc_try_fmt(struct coda_codec *codec, struct v4l2_format *f)
531 * if any of the dimensions is unsupported */ 578 * if any of the dimensions is unsupported */
532 f->fmt.pix.field = field; 579 f->fmt.pix.field = field;
533 580
534 if (codec) { 581 coda_get_max_dimensions(dev, codec, &max_w, &max_h);
535 max_w = codec->max_w; 582 v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, W_ALIGN,
536 max_h = codec->max_h; 583 &f->fmt.pix.height, MIN_H, max_h, H_ALIGN,
537 } else { 584 S_ALIGN);
538 max_w = MAX_W; 585
539 max_h = MAX_H; 586 switch (f->fmt.pix.pixelformat) {
587 case V4L2_PIX_FMT_YUV420:
588 case V4L2_PIX_FMT_YVU420:
589 case V4L2_PIX_FMT_H264:
590 case V4L2_PIX_FMT_MPEG4:
591 case V4L2_PIX_FMT_JPEG:
592 break;
593 default:
594 q_data = get_q_data(ctx, f->type);
595 f->fmt.pix.pixelformat = q_data->fourcc;
540 } 596 }
541 v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w,
542 W_ALIGN, &f->fmt.pix.height,
543 MIN_H, max_h, H_ALIGN, S_ALIGN);
544 597
545 if (coda_format_is_yuv(f->fmt.pix.pixelformat)) { 598 switch (f->fmt.pix.pixelformat) {
599 case V4L2_PIX_FMT_YUV420:
600 case V4L2_PIX_FMT_YVU420:
546 /* Frame stride must be multiple of 8 */ 601 /* Frame stride must be multiple of 8 */
547 f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 8); 602 f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 8);
548 f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * 603 f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
549 f->fmt.pix.height * 3 / 2; 604 f->fmt.pix.height * 3 / 2;
550 } else { /*encoded formats h.264/mpeg4 */ 605 break;
606 case V4L2_PIX_FMT_H264:
607 case V4L2_PIX_FMT_MPEG4:
608 case V4L2_PIX_FMT_JPEG:
551 f->fmt.pix.bytesperline = 0; 609 f->fmt.pix.bytesperline = 0;
552 f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE; 610 f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE;
611 break;
612 default:
613 BUG();
553 } 614 }
554 615
616 f->fmt.pix.priv = 0;
617
555 return 0; 618 return 0;
556} 619}
557 620
558static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 621static int coda_try_fmt_vid_cap(struct file *file, void *priv,
559 struct v4l2_format *f) 622 struct v4l2_format *f)
560{ 623{
561 struct coda_ctx *ctx = fh_to_ctx(priv); 624 struct coda_ctx *ctx = fh_to_ctx(priv);
562 struct coda_codec *codec; 625 struct coda_codec *codec;
@@ -584,7 +647,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
584 647
585 f->fmt.pix.colorspace = ctx->colorspace; 648 f->fmt.pix.colorspace = ctx->colorspace;
586 649
587 ret = vidioc_try_fmt(codec, f); 650 ret = coda_try_fmt(ctx, codec, f);
588 if (ret < 0) 651 if (ret < 0)
589 return ret; 652 return ret;
590 653
@@ -600,8 +663,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
600 return 0; 663 return 0;
601} 664}
602 665
603static int vidioc_try_fmt_vid_out(struct file *file, void *priv, 666static int coda_try_fmt_vid_out(struct file *file, void *priv,
604 struct v4l2_format *f) 667 struct v4l2_format *f)
605{ 668{
606 struct coda_ctx *ctx = fh_to_ctx(priv); 669 struct coda_ctx *ctx = fh_to_ctx(priv);
607 struct coda_codec *codec; 670 struct coda_codec *codec;
@@ -613,10 +676,10 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
613 if (!f->fmt.pix.colorspace) 676 if (!f->fmt.pix.colorspace)
614 f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709; 677 f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
615 678
616 return vidioc_try_fmt(codec, f); 679 return coda_try_fmt(ctx, codec, f);
617} 680}
618 681
619static int vidioc_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f) 682static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
620{ 683{
621 struct coda_q_data *q_data; 684 struct coda_q_data *q_data;
622 struct vb2_queue *vq; 685 struct vb2_queue *vq;
@@ -646,61 +709,62 @@ static int vidioc_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
646 return 0; 709 return 0;
647} 710}
648 711
649static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 712static int coda_s_fmt_vid_cap(struct file *file, void *priv,
650 struct v4l2_format *f) 713 struct v4l2_format *f)
651{ 714{
652 struct coda_ctx *ctx = fh_to_ctx(priv); 715 struct coda_ctx *ctx = fh_to_ctx(priv);
653 int ret; 716 int ret;
654 717
655 ret = vidioc_try_fmt_vid_cap(file, priv, f); 718 ret = coda_try_fmt_vid_cap(file, priv, f);
656 if (ret) 719 if (ret)
657 return ret; 720 return ret;
658 721
659 return vidioc_s_fmt(ctx, f); 722 return coda_s_fmt(ctx, f);
660} 723}
661 724
662static int vidioc_s_fmt_vid_out(struct file *file, void *priv, 725static int coda_s_fmt_vid_out(struct file *file, void *priv,
663 struct v4l2_format *f) 726 struct v4l2_format *f)
664{ 727{
665 struct coda_ctx *ctx = fh_to_ctx(priv); 728 struct coda_ctx *ctx = fh_to_ctx(priv);
666 int ret; 729 int ret;
667 730
668 ret = vidioc_try_fmt_vid_out(file, priv, f); 731 ret = coda_try_fmt_vid_out(file, priv, f);
669 if (ret) 732 if (ret)
670 return ret; 733 return ret;
671 734
672 ret = vidioc_s_fmt(ctx, f); 735 ret = coda_s_fmt(ctx, f);
673 if (ret) 736 if (ret)
674 ctx->colorspace = f->fmt.pix.colorspace; 737 ctx->colorspace = f->fmt.pix.colorspace;
675 738
676 return ret; 739 return ret;
677} 740}
678 741
679static int vidioc_reqbufs(struct file *file, void *priv, 742static int coda_reqbufs(struct file *file, void *priv,
680 struct v4l2_requestbuffers *reqbufs) 743 struct v4l2_requestbuffers *reqbufs)
681{ 744{
682 struct coda_ctx *ctx = fh_to_ctx(priv); 745 struct coda_ctx *ctx = fh_to_ctx(priv);
683 746
684 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); 747 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
685} 748}
686 749
687static int vidioc_querybuf(struct file *file, void *priv, 750static int coda_querybuf(struct file *file, void *priv,
688 struct v4l2_buffer *buf) 751 struct v4l2_buffer *buf)
689{ 752{
690 struct coda_ctx *ctx = fh_to_ctx(priv); 753 struct coda_ctx *ctx = fh_to_ctx(priv);
691 754
692 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); 755 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
693} 756}
694 757
695static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) 758static int coda_qbuf(struct file *file, void *priv,
759 struct v4l2_buffer *buf)
696{ 760{
697 struct coda_ctx *ctx = fh_to_ctx(priv); 761 struct coda_ctx *ctx = fh_to_ctx(priv);
698 762
699 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); 763 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
700} 764}
701 765
702static int vidioc_expbuf(struct file *file, void *priv, 766static int coda_expbuf(struct file *file, void *priv,
703 struct v4l2_exportbuffer *eb) 767 struct v4l2_exportbuffer *eb)
704{ 768{
705 struct coda_ctx *ctx = fh_to_ctx(priv); 769 struct coda_ctx *ctx = fh_to_ctx(priv);
706 770
@@ -718,7 +782,8 @@ static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
718 (buf->sequence == (ctx->qsequence - 1))); 782 (buf->sequence == (ctx->qsequence - 1)));
719} 783}
720 784
721static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) 785static int coda_dqbuf(struct file *file, void *priv,
786 struct v4l2_buffer *buf)
722{ 787{
723 struct coda_ctx *ctx = fh_to_ctx(priv); 788 struct coda_ctx *ctx = fh_to_ctx(priv);
724 int ret; 789 int ret;
@@ -738,24 +803,24 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
738 return ret; 803 return ret;
739} 804}
740 805
741static int vidioc_create_bufs(struct file *file, void *priv, 806static int coda_create_bufs(struct file *file, void *priv,
742 struct v4l2_create_buffers *create) 807 struct v4l2_create_buffers *create)
743{ 808{
744 struct coda_ctx *ctx = fh_to_ctx(priv); 809 struct coda_ctx *ctx = fh_to_ctx(priv);
745 810
746 return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create); 811 return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create);
747} 812}
748 813
749static int vidioc_streamon(struct file *file, void *priv, 814static int coda_streamon(struct file *file, void *priv,
750 enum v4l2_buf_type type) 815 enum v4l2_buf_type type)
751{ 816{
752 struct coda_ctx *ctx = fh_to_ctx(priv); 817 struct coda_ctx *ctx = fh_to_ctx(priv);
753 818
754 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); 819 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
755} 820}
756 821
757static int vidioc_streamoff(struct file *file, void *priv, 822static int coda_streamoff(struct file *file, void *priv,
758 enum v4l2_buf_type type) 823 enum v4l2_buf_type type)
759{ 824{
760 struct coda_ctx *ctx = fh_to_ctx(priv); 825 struct coda_ctx *ctx = fh_to_ctx(priv);
761 int ret; 826 int ret;
@@ -772,23 +837,34 @@ static int vidioc_streamoff(struct file *file, void *priv,
772 return ret; 837 return ret;
773} 838}
774 839
775static int vidioc_decoder_cmd(struct file *file, void *fh, 840static int coda_try_decoder_cmd(struct file *file, void *fh,
776 struct v4l2_decoder_cmd *dc) 841 struct v4l2_decoder_cmd *dc)
777{ 842{
778 struct coda_ctx *ctx = fh_to_ctx(fh);
779
780 if (dc->cmd != V4L2_DEC_CMD_STOP) 843 if (dc->cmd != V4L2_DEC_CMD_STOP)
781 return -EINVAL; 844 return -EINVAL;
782 845
783 if ((dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK) || 846 if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
784 (dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY))
785 return -EINVAL; 847 return -EINVAL;
786 848
787 if (dc->stop.pts != 0) 849 if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
788 return -EINVAL; 850 return -EINVAL;
789 851
852 return 0;
853}
854
855static int coda_decoder_cmd(struct file *file, void *fh,
856 struct v4l2_decoder_cmd *dc)
857{
858 struct coda_ctx *ctx = fh_to_ctx(fh);
859 int ret;
860
861 ret = coda_try_decoder_cmd(file, fh, dc);
862 if (ret < 0)
863 return ret;
864
865 /* Ignore decoder stop command silently in encoder context */
790 if (ctx->inst_type != CODA_INST_DECODER) 866 if (ctx->inst_type != CODA_INST_DECODER)
791 return -EINVAL; 867 return 0;
792 868
793 /* Set the strem-end flag on this context */ 869 /* Set the strem-end flag on this context */
794 ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG; 870 ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
@@ -796,8 +872,8 @@ static int vidioc_decoder_cmd(struct file *file, void *fh,
796 return 0; 872 return 0;
797} 873}
798 874
799static int vidioc_subscribe_event(struct v4l2_fh *fh, 875static int coda_subscribe_event(struct v4l2_fh *fh,
800 const struct v4l2_event_subscription *sub) 876 const struct v4l2_event_subscription *sub)
801{ 877{
802 switch (sub->type) { 878 switch (sub->type) {
803 case V4L2_EVENT_EOS: 879 case V4L2_EVENT_EOS:
@@ -808,32 +884,33 @@ static int vidioc_subscribe_event(struct v4l2_fh *fh,
808} 884}
809 885
810static const struct v4l2_ioctl_ops coda_ioctl_ops = { 886static const struct v4l2_ioctl_ops coda_ioctl_ops = {
811 .vidioc_querycap = vidioc_querycap, 887 .vidioc_querycap = coda_querycap,
812 888
813 .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, 889 .vidioc_enum_fmt_vid_cap = coda_enum_fmt_vid_cap,
814 .vidioc_g_fmt_vid_cap = vidioc_g_fmt, 890 .vidioc_g_fmt_vid_cap = coda_g_fmt,
815 .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, 891 .vidioc_try_fmt_vid_cap = coda_try_fmt_vid_cap,
816 .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, 892 .vidioc_s_fmt_vid_cap = coda_s_fmt_vid_cap,
817 893
818 .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, 894 .vidioc_enum_fmt_vid_out = coda_enum_fmt_vid_out,
819 .vidioc_g_fmt_vid_out = vidioc_g_fmt, 895 .vidioc_g_fmt_vid_out = coda_g_fmt,
820 .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, 896 .vidioc_try_fmt_vid_out = coda_try_fmt_vid_out,
821 .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, 897 .vidioc_s_fmt_vid_out = coda_s_fmt_vid_out,
822 898
823 .vidioc_reqbufs = vidioc_reqbufs, 899 .vidioc_reqbufs = coda_reqbufs,
824 .vidioc_querybuf = vidioc_querybuf, 900 .vidioc_querybuf = coda_querybuf,
825 901
826 .vidioc_qbuf = vidioc_qbuf, 902 .vidioc_qbuf = coda_qbuf,
827 .vidioc_expbuf = vidioc_expbuf, 903 .vidioc_expbuf = coda_expbuf,
828 .vidioc_dqbuf = vidioc_dqbuf, 904 .vidioc_dqbuf = coda_dqbuf,
829 .vidioc_create_bufs = vidioc_create_bufs, 905 .vidioc_create_bufs = coda_create_bufs,
830 906
831 .vidioc_streamon = vidioc_streamon, 907 .vidioc_streamon = coda_streamon,
832 .vidioc_streamoff = vidioc_streamoff, 908 .vidioc_streamoff = coda_streamoff,
833 909
834 .vidioc_decoder_cmd = vidioc_decoder_cmd, 910 .vidioc_try_decoder_cmd = coda_try_decoder_cmd,
911 .vidioc_decoder_cmd = coda_decoder_cmd,
835 912
836 .vidioc_subscribe_event = vidioc_subscribe_event, 913 .vidioc_subscribe_event = coda_subscribe_event,
837 .vidioc_unsubscribe_event = v4l2_event_unsubscribe, 914 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
838}; 915};
839 916
@@ -1928,8 +2005,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
1928 if (!(ctx->streamon_out & ctx->streamon_cap)) 2005 if (!(ctx->streamon_out & ctx->streamon_cap))
1929 return 0; 2006 return 0;
1930 2007
1931 /* Allow device_run with no buffers queued and after streamoff */ 2008 /* Allow decoder device_run with no new buffers queued */
1932 v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true); 2009 if (ctx->inst_type == CODA_INST_DECODER)
2010 v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true);
1933 2011
1934 ctx->gopcounter = ctx->params.gop_size - 1; 2012 ctx->gopcounter = ctx->params.gop_size - 1;
1935 buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); 2013 buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
@@ -2071,10 +2149,8 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
2071 coda_setup_iram(ctx); 2149 coda_setup_iram(ctx);
2072 2150
2073 if (dst_fourcc == V4L2_PIX_FMT_H264) { 2151 if (dst_fourcc == V4L2_PIX_FMT_H264) {
2074 value = (FMO_SLICE_SAVE_BUF_SIZE << 7);
2075 value |= (0 & CODA_FMOPARAM_TYPE_MASK) << CODA_FMOPARAM_TYPE_OFFSET;
2076 value |= 0 & CODA_FMOPARAM_SLICENUM_MASK;
2077 if (dev->devtype->product == CODA_DX6) { 2152 if (dev->devtype->product == CODA_DX6) {
2153 value = FMO_SLICE_SAVE_BUF_SIZE << 7;
2078 coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO); 2154 coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO);
2079 } else { 2155 } else {
2080 coda_write(dev, ctx->iram_info.search_ram_paddr, 2156 coda_write(dev, ctx->iram_info.search_ram_paddr,
@@ -2371,7 +2447,13 @@ static int coda_queue_init(void *priv, struct vb2_queue *src_vq,
2371 2447
2372static int coda_next_free_instance(struct coda_dev *dev) 2448static int coda_next_free_instance(struct coda_dev *dev)
2373{ 2449{
2374 return ffz(dev->instance_mask); 2450 int idx = ffz(dev->instance_mask);
2451
2452 if ((idx < 0) ||
2453 (dev->devtype->product == CODA_DX6 && idx > CODADX6_MAX_INSTANCES))
2454 return -EBUSY;
2455
2456 return idx;
2375} 2457}
2376 2458
2377static int coda_open(struct file *file) 2459static int coda_open(struct file *file)
@@ -2386,8 +2468,8 @@ static int coda_open(struct file *file)
2386 return -ENOMEM; 2468 return -ENOMEM;
2387 2469
2388 idx = coda_next_free_instance(dev); 2470 idx = coda_next_free_instance(dev);
2389 if (idx >= CODA_MAX_INSTANCES) { 2471 if (idx < 0) {
2390 ret = -EBUSY; 2472 ret = idx;
2391 goto err_coda_max; 2473 goto err_coda_max;
2392 } 2474 }
2393 set_bit(idx, &dev->instance_mask); 2475 set_bit(idx, &dev->instance_mask);
@@ -2719,7 +2801,6 @@ static void coda_finish_encode(struct coda_ctx *ctx)
2719 dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); 2801 dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
2720 2802
2721 /* Get results from the coda */ 2803 /* Get results from the coda */
2722 coda_read(dev, CODA_RET_ENC_PIC_TYPE);
2723 start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START); 2804 start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
2724 wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)); 2805 wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
2725 2806
@@ -2739,7 +2820,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
2739 coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM); 2820 coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
2740 coda_read(dev, CODA_RET_ENC_PIC_FLAG); 2821 coda_read(dev, CODA_RET_ENC_PIC_FLAG);
2741 2822
2742 if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) { 2823 if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) {
2743 dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; 2824 dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
2744 dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME; 2825 dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
2745 } else { 2826 } else {
@@ -2861,21 +2942,6 @@ static bool coda_firmware_supported(u32 vernum)
2861 return false; 2942 return false;
2862} 2943}
2863 2944
2864static char *coda_product_name(int product)
2865{
2866 static char buf[9];
2867
2868 switch (product) {
2869 case CODA_DX6:
2870 return "CodaDx6";
2871 case CODA_7541:
2872 return "CODA7541";
2873 default:
2874 snprintf(buf, sizeof(buf), "(0x%04x)", product);
2875 return buf;
2876 }
2877}
2878
2879static int coda_hw_init(struct coda_dev *dev) 2945static int coda_hw_init(struct coda_dev *dev)
2880{ 2946{
2881 u16 product, major, minor, release; 2947 u16 product, major, minor, release;
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 04609cc6eba7..eac472b5ae83 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -1785,7 +1785,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
1785 } 1785 }
1786 1786
1787 irq = res->start; 1787 irq = res->start;
1788 err = devm_request_irq(&pdev->dev, irq, venc_isr, IRQF_DISABLED, 1788 err = devm_request_irq(&pdev->dev, irq, venc_isr, 0,
1789 VPBE_DISPLAY_DRIVER, disp_dev); 1789 VPBE_DISPLAY_DRIVER, disp_dev);
1790 if (err) { 1790 if (err) {
1791 v4l2_err(&disp_dev->vpbe_dev->v4l2_dev, 1791 v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 93609091cb23..d762246eabf5 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -688,7 +688,7 @@ static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
688 frame_format = ccdc_dev->hw_ops.get_frame_format(); 688 frame_format = ccdc_dev->hw_ops.get_frame_format();
689 if (frame_format == CCDC_FRMFMT_PROGRESSIVE) { 689 if (frame_format == CCDC_FRMFMT_PROGRESSIVE) {
690 return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr, 690 return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr,
691 IRQF_DISABLED, "vpfe_capture1", 691 0, "vpfe_capture1",
692 vpfe_dev); 692 vpfe_dev);
693 } 693 }
694 return 0; 694 return 0;
@@ -1863,7 +1863,7 @@ static int vpfe_probe(struct platform_device *pdev)
1863 } 1863 }
1864 vpfe_dev->ccdc_irq1 = res1->start; 1864 vpfe_dev->ccdc_irq1 = res1->start;
1865 1865
1866 ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, IRQF_DISABLED, 1866 ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0,
1867 "vpfe_capture0", vpfe_dev); 1867 "vpfe_capture0", vpfe_dev);
1868 1868
1869 if (0 != ret) { 1869 if (0 != ret) {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 1089834a4efe..52ac5e6c8625 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -2154,7 +2154,7 @@ static __init int vpif_probe(struct platform_device *pdev)
2154 2154
2155 if (!vpif_obj.sd[i]) { 2155 if (!vpif_obj.sd[i]) {
2156 vpif_err("Error registering v4l2 subdevice\n"); 2156 vpif_err("Error registering v4l2 subdevice\n");
2157 err = -ENOMEM; 2157 err = -ENODEV;
2158 goto probe_subdev_out; 2158 goto probe_subdev_out;
2159 } 2159 }
2160 v4l2_info(&vpif_obj.v4l2_dev, 2160 v4l2_info(&vpif_obj.v4l2_dev,
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 76435d3bf62d..ef0a6564cef9 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -45,6 +45,7 @@
45#define GSC_DST_FMT (1 << 2) 45#define GSC_DST_FMT (1 << 2)
46#define GSC_CTX_M2M (1 << 3) 46#define GSC_CTX_M2M (1 << 3)
47#define GSC_CTX_STOP_REQ (1 << 6) 47#define GSC_CTX_STOP_REQ (1 << 6)
48#define GSC_CTX_ABORT (1 << 7)
48 49
49enum gsc_dev_flags { 50enum gsc_dev_flags {
50 /* for global */ 51 /* for global */
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index e576ff2de3de..810c3e13970c 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -46,6 +46,17 @@ static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
46 return ret == 0 ? -ETIMEDOUT : ret; 46 return ret == 0 ? -ETIMEDOUT : ret;
47} 47}
48 48
49static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
50{
51 int ret;
52
53 ret = gsc_m2m_ctx_stop_req(ctx);
54 if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) {
55 gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx);
56 gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
57 }
58}
59
49static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count) 60static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
50{ 61{
51 struct gsc_ctx *ctx = q->drv_priv; 62 struct gsc_ctx *ctx = q->drv_priv;
@@ -58,11 +69,8 @@ static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
58static int gsc_m2m_stop_streaming(struct vb2_queue *q) 69static int gsc_m2m_stop_streaming(struct vb2_queue *q)
59{ 70{
60 struct gsc_ctx *ctx = q->drv_priv; 71 struct gsc_ctx *ctx = q->drv_priv;
61 int ret;
62 72
63 ret = gsc_m2m_ctx_stop_req(ctx); 73 __gsc_m2m_job_abort(ctx);
64 if (ret == -ETIMEDOUT)
65 gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
66 74
67 pm_runtime_put(&ctx->gsc_dev->pdev->dev); 75 pm_runtime_put(&ctx->gsc_dev->pdev->dev);
68 76
@@ -91,15 +99,9 @@ void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
91 } 99 }
92} 100}
93 101
94
95static void gsc_m2m_job_abort(void *priv) 102static void gsc_m2m_job_abort(void *priv)
96{ 103{
97 struct gsc_ctx *ctx = priv; 104 __gsc_m2m_job_abort((struct gsc_ctx *)priv);
98 int ret;
99
100 ret = gsc_m2m_ctx_stop_req(ctx);
101 if (ret == -ETIMEDOUT)
102 gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
103} 105}
104 106
105static int gsc_get_bufs(struct gsc_ctx *ctx) 107static int gsc_get_bufs(struct gsc_ctx *ctx)
@@ -150,9 +152,10 @@ static void gsc_m2m_device_run(void *priv)
150 gsc->m2m.ctx = ctx; 152 gsc->m2m.ctx = ctx;
151 } 153 }
152 154
153 is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0; 155 is_set = ctx->state & GSC_CTX_STOP_REQ;
154 ctx->state &= ~GSC_CTX_STOP_REQ;
155 if (is_set) { 156 if (is_set) {
157 ctx->state &= ~GSC_CTX_STOP_REQ;
158 ctx->state |= GSC_CTX_ABORT;
156 wake_up(&gsc->irq_queue); 159 wake_up(&gsc->irq_queue);
157 goto put_device; 160 goto put_device;
158 } 161 }
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index d2e6cba3566d..f3c6136aa5b4 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -511,7 +511,7 @@ static int __ctrl_set_metering(struct fimc_is *is, unsigned int value)
511 break; 511 break;
512 default: 512 default:
513 return -EINVAL; 513 return -EINVAL;
514 }; 514 }
515 515
516 __is_set_isp_metering(is, IS_METERING_CONFIG_CMD, val); 516 __is_set_isp_metering(is, IS_METERING_CONFIG_CMD, val);
517 return 0; 517 return 0;
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 540516ca872c..36513e896413 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -1084,8 +1084,7 @@ free_dev:
1084 1084
1085static int deinterlace_remove(struct platform_device *pdev) 1085static int deinterlace_remove(struct platform_device *pdev)
1086{ 1086{
1087 struct deinterlace_dev *pcdev = 1087 struct deinterlace_dev *pcdev = platform_get_drvdata(pdev);
1088 (struct deinterlace_dev *)platform_get_drvdata(pdev);
1089 1088
1090 v4l2_info(&pcdev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME); 1089 v4l2_info(&pcdev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
1091 v4l2_m2m_release(pcdev->m2m_dev); 1090 v4l2_m2m_release(pcdev->m2m_dev);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 5184887b155c..32fab30a9105 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1221,16 +1221,16 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
1221{ 1221{
1222 struct mcam_vb_buffer *mvb = vb_to_mvb(vb); 1222 struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
1223 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); 1223 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1224 struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0); 1224 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
1225 struct mcam_dma_desc *desc = mvb->dma_desc; 1225 struct mcam_dma_desc *desc = mvb->dma_desc;
1226 struct scatterlist *sg; 1226 struct scatterlist *sg;
1227 int i; 1227 int i;
1228 1228
1229 mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages, 1229 mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl,
1230 DMA_FROM_DEVICE); 1230 sg_table->nents, DMA_FROM_DEVICE);
1231 if (mvb->dma_desc_nent <= 0) 1231 if (mvb->dma_desc_nent <= 0)
1232 return -EIO; /* Not sure what's right here */ 1232 return -EIO; /* Not sure what's right here */
1233 for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) { 1233 for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) {
1234 desc->dma_addr = sg_dma_address(sg); 1234 desc->dma_addr = sg_dma_address(sg);
1235 desc->segment_len = sg_dma_len(sg); 1235 desc->segment_len = sg_dma_len(sg);
1236 desc++; 1236 desc++;
@@ -1241,9 +1241,11 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
1241static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb) 1241static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
1242{ 1242{
1243 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); 1243 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1244 struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0); 1244 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
1245 1245
1246 dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE); 1246 if (sg_table)
1247 dma_unmap_sg(cam->dev, sg_table->sgl,
1248 sg_table->nents, DMA_FROM_DEVICE);
1247 return 0; 1249 return 0;
1248} 1250}
1249 1251
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index b5a19af5c587..3458fa0e2fd5 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -481,7 +481,6 @@ static int mmpcam_remove(struct mmp_camera *cam)
481 struct mmp_camera_platform_data *pdata; 481 struct mmp_camera_platform_data *pdata;
482 482
483 mmpcam_remove_device(cam); 483 mmpcam_remove_device(cam);
484 free_irq(cam->irq, mcam);
485 mccic_shutdown(mcam); 484 mccic_shutdown(mcam);
486 mmpcam_power_down(mcam); 485 mmpcam_power_down(mcam);
487 pdata = cam->pdev->dev.platform_data; 486 pdata = cam->pdev->dev.platform_data;
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
index 6a17676f9d72..8df5975b700a 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -1090,8 +1090,7 @@ unreg_dev:
1090 1090
1091static int m2mtest_remove(struct platform_device *pdev) 1091static int m2mtest_remove(struct platform_device *pdev)
1092{ 1092{
1093 struct m2mtest_dev *dev = 1093 struct m2mtest_dev *dev = platform_get_drvdata(pdev);
1094 (struct m2mtest_dev *)platform_get_drvdata(pdev);
1095 1094
1096 v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME); 1095 v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
1097 v4l2_m2m_release(dev->m2m_dev); 1096 v4l2_m2m_release(dev->m2m_dev);
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index fd6289d60cde..0b2948376aee 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -840,7 +840,7 @@ put_clk:
840 840
841static int g2d_remove(struct platform_device *pdev) 841static int g2d_remove(struct platform_device *pdev)
842{ 842{
843 struct g2d_dev *dev = (struct g2d_dev *)platform_get_drvdata(pdev); 843 struct g2d_dev *dev = platform_get_drvdata(pdev);
844 844
845 v4l2_info(&dev->v4l2_dev, "Removing " G2D_NAME); 845 v4l2_info(&dev->v4l2_dev, "Removing " G2D_NAME);
846 v4l2_m2m_release(dev->m2m_dev); 846 v4l2_m2m_release(dev->m2m_dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 084263dd126f..5f2c4ad6c2cb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -404,7 +404,11 @@ leave_handle_frame:
404 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 404 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
405 BUG(); 405 BUG();
406 s5p_mfc_clock_off(); 406 s5p_mfc_clock_off();
407 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); 407 /* if suspending, wake up device and do not try_run again*/
408 if (test_bit(0, &dev->enter_suspend))
409 wake_up_dev(dev, reason, err);
410 else
411 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
408} 412}
409 413
410/* Error handling for interrupt */ 414/* Error handling for interrupt */
@@ -1101,7 +1105,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
1101 } 1105 }
1102 dev->irq = res->start; 1106 dev->irq = res->start;
1103 ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq, 1107 ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
1104 IRQF_DISABLED, pdev->name, dev); 1108 0, pdev->name, dev);
1105 if (ret) { 1109 if (ret) {
1106 dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret); 1110 dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
1107 goto err_res; 1111 goto err_res;
@@ -1286,9 +1290,7 @@ static int s5p_mfc_suspend(struct device *dev)
1286 /* Try and lock the HW */ 1290 /* Try and lock the HW */
1287 /* Wait on the interrupt waitqueue */ 1291 /* Wait on the interrupt waitqueue */
1288 ret = wait_event_interruptible_timeout(m_dev->queue, 1292 ret = wait_event_interruptible_timeout(m_dev->queue,
1289 m_dev->int_cond || m_dev->ctx[m_dev->curr_ctx]->int_cond, 1293 m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT));
1290 msecs_to_jiffies(MFC_INT_TIMEOUT));
1291
1292 if (ret == 0) { 1294 if (ret == 0) {
1293 mfc_err("Waiting for hardware to finish timed out\n"); 1295 mfc_err("Waiting for hardware to finish timed out\n");
1294 return -EIO; 1296 return -EIO;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
index ad4f1df0a18e..9a6efd6c1329 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
@@ -111,7 +111,7 @@ static int s5p_mfc_open_inst_cmd_v5(struct s5p_mfc_ctx *ctx)
111 break; 111 break;
112 default: 112 default:
113 h2r_args.arg[0] = S5P_FIMV_CODEC_NONE; 113 h2r_args.arg[0] = S5P_FIMV_CODEC_NONE;
114 }; 114 }
115 h2r_args.arg[1] = 0; /* no crc & no pixelcache */ 115 h2r_args.arg[1] = 0; /* no crc & no pixelcache */
116 h2r_args.arg[2] = ctx->ctx.ofs; 116 h2r_args.arg[2] = ctx->ctx.ofs;
117 h2r_args.arg[3] = ctx->ctx.size; 117 h2r_args.arg[3] = ctx->ctx.size;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
index db796c8e7874..ec1a5947ed7d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -113,7 +113,7 @@ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
113 break; 113 break;
114 default: 114 default:
115 codec_type = S5P_FIMV_CODEC_NONE_V6; 115 codec_type = S5P_FIMV_CODEC_NONE_V6;
116 }; 116 }
117 mfc_write(dev, codec_type, S5P_FIMV_CODEC_TYPE_V6); 117 mfc_write(dev, codec_type, S5P_FIMV_CODEC_TYPE_V6);
118 mfc_write(dev, ctx->ctx.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6); 118 mfc_write(dev, ctx->ctx.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
119 mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6); 119 mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 41f5a3c10dbd..4ff3b6cd6842 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -113,7 +113,7 @@ static struct mfc_control controls[] = {
113 .minimum = 0, 113 .minimum = 0,
114 .maximum = (1 << 16) - 1, 114 .maximum = (1 << 16) - 1,
115 .step = 1, 115 .step = 1,
116 .default_value = 0, 116 .default_value = 12,
117 }, 117 },
118 { 118 {
119 .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE, 119 .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
@@ -356,7 +356,7 @@ static struct mfc_control controls[] = {
356 .minimum = 0, 356 .minimum = 0,
357 .maximum = 51, 357 .maximum = 51,
358 .step = 1, 358 .step = 1,
359 .default_value = 1, 359 .default_value = 51,
360 }, 360 },
361 { 361 {
362 .id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 362 .id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
@@ -399,7 +399,7 @@ static struct mfc_control controls[] = {
399 .minimum = 1, 399 .minimum = 1,
400 .maximum = 31, 400 .maximum = 31,
401 .step = 1, 401 .step = 1,
402 .default_value = 1, 402 .default_value = 31,
403 }, 403 },
404 { 404 {
405 .id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP, 405 .id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP,
@@ -444,7 +444,7 @@ static struct mfc_control controls[] = {
444 .minimum = 0, 444 .minimum = 0,
445 .maximum = 51, 445 .maximum = 51,
446 .step = 1, 446 .step = 1,
447 .default_value = 1, 447 .default_value = 51,
448 }, 448 },
449 { 449 {
450 .id = V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, 450 .id = V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 368582b091bf..58ec7bb26ebc 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1582,7 +1582,7 @@ static int s5p_mfc_get_int_reason_v5(struct s5p_mfc_dev *dev)
1582 break; 1582 break;
1583 default: 1583 default:
1584 reason = S5P_MFC_R2H_CMD_EMPTY; 1584 reason = S5P_MFC_R2H_CMD_EMPTY;
1585 }; 1585 }
1586 return reason; 1586 return reason;
1587} 1587}
1588 1588
diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
index b93a21f5aa13..74344c764daa 100644
--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
@@ -226,7 +226,7 @@ static void mxr_graph_fix_geometry(struct mxr_layer *layer,
226 src->width + src->x_offset, 32767); 226 src->width + src->x_offset, 32767);
227 src->full_height = clamp_val(src->full_height, 227 src->full_height = clamp_val(src->full_height,
228 src->height + src->y_offset, 2047); 228 src->height + src->y_offset, 2047);
229 }; 229 }
230} 230}
231 231
232/* PUBLIC API */ 232/* PUBLIC API */
diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
index 3d13a636877b..c9388c45ad75 100644
--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
@@ -197,7 +197,7 @@ static void mxr_vp_fix_geometry(struct mxr_layer *layer,
197 ALIGN(src->width + src->x_offset, 8), 8192U); 197 ALIGN(src->width + src->x_offset, 8), 8192U);
198 src->full_height = clamp(src->full_height, 198 src->full_height = clamp(src->full_height,
199 src->height + src->y_offset, 8192U); 199 src->height + src->y_offset, 8192U);
200 }; 200 }
201} 201}
202 202
203/* PUBLIC API */ 203/* PUBLIC API */
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index d02a7e0b773f..b21f777f55e7 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -105,6 +105,7 @@
105#define VIN_MAX_HEIGHT 2048 105#define VIN_MAX_HEIGHT 2048
106 106
107enum chip_id { 107enum chip_id {
108 RCAR_H2,
108 RCAR_H1, 109 RCAR_H1,
109 RCAR_M1, 110 RCAR_M1,
110 RCAR_E1, 111 RCAR_E1,
@@ -300,7 +301,8 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
300 dmr = 0; 301 dmr = 0;
301 break; 302 break;
302 case V4L2_PIX_FMT_RGB32: 303 case V4L2_PIX_FMT_RGB32:
303 if (priv->chip == RCAR_H1 || priv->chip == RCAR_E1) { 304 if (priv->chip == RCAR_H2 || priv->chip == RCAR_H1 ||
305 priv->chip == RCAR_E1) {
304 dmr = VNDMR_EXRGB; 306 dmr = VNDMR_EXRGB;
305 break; 307 break;
306 } 308 }
@@ -1381,6 +1383,7 @@ static struct soc_camera_host_ops rcar_vin_host_ops = {
1381}; 1383};
1382 1384
1383static struct platform_device_id rcar_vin_id_table[] = { 1385static struct platform_device_id rcar_vin_id_table[] = {
1386 { "r8a7790-vin", RCAR_H2 },
1384 { "r8a7779-vin", RCAR_H1 }, 1387 { "r8a7779-vin", RCAR_H1 },
1385 { "r8a7778-vin", RCAR_M1 }, 1388 { "r8a7778-vin", RCAR_M1 },
1386 { "uPD35004-vin", RCAR_E1 }, 1389 { "uPD35004-vin", RCAR_E1 },
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 8df22f779175..150bd4df413c 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1800,7 +1800,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
1800 1800
1801 /* request irq */ 1801 /* request irq */
1802 err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq, 1802 err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq,
1803 IRQF_DISABLED, dev_name(&pdev->dev), pcdev); 1803 0, dev_name(&pdev->dev), pcdev);
1804 if (err) { 1804 if (err) {
1805 dev_err(&pdev->dev, "Unable to register CEU interrupt.\n"); 1805 dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
1806 goto exit_release_mem; 1806 goto exit_release_mem;
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 387a232d95a4..4b8c024fc487 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -71,13 +71,23 @@ static int video_dev_create(struct soc_camera_device *icd);
71int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd, 71int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd,
72 struct v4l2_clk *clk) 72 struct v4l2_clk *clk)
73{ 73{
74 int ret = clk ? v4l2_clk_enable(clk) : 0; 74 int ret;
75 if (ret < 0) { 75 bool clock_toggle;
76 dev_err(dev, "Cannot enable clock: %d\n", ret); 76
77 return ret; 77 if (clk && (!ssdd->unbalanced_power ||
78 !test_and_set_bit(0, &ssdd->clock_state))) {
79 ret = v4l2_clk_enable(clk);
80 if (ret < 0) {
81 dev_err(dev, "Cannot enable clock: %d\n", ret);
82 return ret;
83 }
84 clock_toggle = true;
85 } else {
86 clock_toggle = false;
78 } 87 }
79 ret = regulator_bulk_enable(ssdd->num_regulators, 88
80 ssdd->regulators); 89 ret = regulator_bulk_enable(ssdd->sd_pdata.num_regulators,
90 ssdd->sd_pdata.regulators);
81 if (ret < 0) { 91 if (ret < 0) {
82 dev_err(dev, "Cannot enable regulators\n"); 92 dev_err(dev, "Cannot enable regulators\n");
83 goto eregenable; 93 goto eregenable;
@@ -95,10 +105,10 @@ int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd,
95 return 0; 105 return 0;
96 106
97epwron: 107epwron:
98 regulator_bulk_disable(ssdd->num_regulators, 108 regulator_bulk_disable(ssdd->sd_pdata.num_regulators,
99 ssdd->regulators); 109 ssdd->sd_pdata.regulators);
100eregenable: 110eregenable:
101 if (clk) 111 if (clock_toggle)
102 v4l2_clk_disable(clk); 112 v4l2_clk_disable(clk);
103 113
104 return ret; 114 return ret;
@@ -120,14 +130,14 @@ int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd
120 } 130 }
121 } 131 }
122 132
123 err = regulator_bulk_disable(ssdd->num_regulators, 133 err = regulator_bulk_disable(ssdd->sd_pdata.num_regulators,
124 ssdd->regulators); 134 ssdd->sd_pdata.regulators);
125 if (err < 0) { 135 if (err < 0) {
126 dev_err(dev, "Cannot disable regulators\n"); 136 dev_err(dev, "Cannot disable regulators\n");
127 ret = ret ? : err; 137 ret = ret ? : err;
128 } 138 }
129 139
130 if (clk) 140 if (clk && (!ssdd->unbalanced_power || test_and_clear_bit(0, &ssdd->clock_state)))
131 v4l2_clk_disable(clk); 141 v4l2_clk_disable(clk);
132 142
133 return ret; 143 return ret;
@@ -137,8 +147,8 @@ EXPORT_SYMBOL(soc_camera_power_off);
137int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd) 147int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd)
138{ 148{
139 /* Should not have any effect in synchronous case */ 149 /* Should not have any effect in synchronous case */
140 return devm_regulator_bulk_get(dev, ssdd->num_regulators, 150 return devm_regulator_bulk_get(dev, ssdd->sd_pdata.num_regulators,
141 ssdd->regulators); 151 ssdd->sd_pdata.regulators);
142} 152}
143EXPORT_SYMBOL(soc_camera_power_init); 153EXPORT_SYMBOL(soc_camera_power_init);
144 154
@@ -1346,8 +1356,8 @@ static int soc_camera_i2c_init(struct soc_camera_device *icd,
1346 * soc_camera_pdrv_probe(), make sure the subdevice driver doesn't try 1356 * soc_camera_pdrv_probe(), make sure the subdevice driver doesn't try
1347 * to allocate them again. 1357 * to allocate them again.
1348 */ 1358 */
1349 ssdd->num_regulators = 0; 1359 ssdd->sd_pdata.num_regulators = 0;
1350 ssdd->regulators = NULL; 1360 ssdd->sd_pdata.regulators = NULL;
1351 shd->board_info->platform_data = ssdd; 1361 shd->board_info->platform_data = ssdd;
1352 1362
1353 snprintf(clk_name, sizeof(clk_name), "%d-%04x", 1363 snprintf(clk_name, sizeof(clk_name), "%d-%04x",
@@ -2020,8 +2030,8 @@ static int soc_camera_pdrv_probe(struct platform_device *pdev)
2020 * that case regulators are attached to the I2C device and not to the 2030 * that case regulators are attached to the I2C device and not to the
2021 * camera platform device. 2031 * camera platform device.
2022 */ 2032 */
2023 ret = devm_regulator_bulk_get(&pdev->dev, ssdd->num_regulators, 2033 ret = devm_regulator_bulk_get(&pdev->dev, ssdd->sd_pdata.num_regulators,
2024 ssdd->regulators); 2034 ssdd->sd_pdata.regulators);
2025 if (ret < 0) 2035 if (ret < 0)
2026 return ret; 2036 return ret;
2027 2037
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
new file mode 100644
index 000000000000..cbf0a806ba1d
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
2
3ti-vpe-y := vpe.o vpdma.o
4
5ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
new file mode 100644
index 000000000000..af0a5ffcaa98
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -0,0 +1,846 @@
1/*
2 * VPDMA helper library
3 *
4 * Copyright (c) 2013 Texas Instruments Inc.
5 *
6 * David Griego, <dagriego@biglakesoftware.com>
7 * Dale Farnsworth, <dale@farnsworth.org>
8 * Archit Taneja, <archit@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 */
14
15#include <linux/delay.h>
16#include <linux/dma-mapping.h>
17#include <linux/err.h>
18#include <linux/firmware.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/sched.h>
23#include <linux/slab.h>
24#include <linux/videodev2.h>
25
26#include "vpdma.h"
27#include "vpdma_priv.h"
28
29#define VPDMA_FIRMWARE "vpdma-1b8.bin"
30
31const struct vpdma_data_format vpdma_yuv_fmts[] = {
32 [VPDMA_DATA_FMT_Y444] = {
33 .data_type = DATA_TYPE_Y444,
34 .depth = 8,
35 },
36 [VPDMA_DATA_FMT_Y422] = {
37 .data_type = DATA_TYPE_Y422,
38 .depth = 8,
39 },
40 [VPDMA_DATA_FMT_Y420] = {
41 .data_type = DATA_TYPE_Y420,
42 .depth = 8,
43 },
44 [VPDMA_DATA_FMT_C444] = {
45 .data_type = DATA_TYPE_C444,
46 .depth = 8,
47 },
48 [VPDMA_DATA_FMT_C422] = {
49 .data_type = DATA_TYPE_C422,
50 .depth = 8,
51 },
52 [VPDMA_DATA_FMT_C420] = {
53 .data_type = DATA_TYPE_C420,
54 .depth = 4,
55 },
56 [VPDMA_DATA_FMT_YC422] = {
57 .data_type = DATA_TYPE_YC422,
58 .depth = 16,
59 },
60 [VPDMA_DATA_FMT_YC444] = {
61 .data_type = DATA_TYPE_YC444,
62 .depth = 24,
63 },
64 [VPDMA_DATA_FMT_CY422] = {
65 .data_type = DATA_TYPE_CY422,
66 .depth = 16,
67 },
68};
69
70const struct vpdma_data_format vpdma_rgb_fmts[] = {
71 [VPDMA_DATA_FMT_RGB565] = {
72 .data_type = DATA_TYPE_RGB16_565,
73 .depth = 16,
74 },
75 [VPDMA_DATA_FMT_ARGB16_1555] = {
76 .data_type = DATA_TYPE_ARGB_1555,
77 .depth = 16,
78 },
79 [VPDMA_DATA_FMT_ARGB16] = {
80 .data_type = DATA_TYPE_ARGB_4444,
81 .depth = 16,
82 },
83 [VPDMA_DATA_FMT_RGBA16_5551] = {
84 .data_type = DATA_TYPE_RGBA_5551,
85 .depth = 16,
86 },
87 [VPDMA_DATA_FMT_RGBA16] = {
88 .data_type = DATA_TYPE_RGBA_4444,
89 .depth = 16,
90 },
91 [VPDMA_DATA_FMT_ARGB24] = {
92 .data_type = DATA_TYPE_ARGB24_6666,
93 .depth = 24,
94 },
95 [VPDMA_DATA_FMT_RGB24] = {
96 .data_type = DATA_TYPE_RGB24_888,
97 .depth = 24,
98 },
99 [VPDMA_DATA_FMT_ARGB32] = {
100 .data_type = DATA_TYPE_ARGB32_8888,
101 .depth = 32,
102 },
103 [VPDMA_DATA_FMT_RGBA24] = {
104 .data_type = DATA_TYPE_RGBA24_6666,
105 .depth = 24,
106 },
107 [VPDMA_DATA_FMT_RGBA32] = {
108 .data_type = DATA_TYPE_RGBA32_8888,
109 .depth = 32,
110 },
111 [VPDMA_DATA_FMT_BGR565] = {
112 .data_type = DATA_TYPE_BGR16_565,
113 .depth = 16,
114 },
115 [VPDMA_DATA_FMT_ABGR16_1555] = {
116 .data_type = DATA_TYPE_ABGR_1555,
117 .depth = 16,
118 },
119 [VPDMA_DATA_FMT_ABGR16] = {
120 .data_type = DATA_TYPE_ABGR_4444,
121 .depth = 16,
122 },
123 [VPDMA_DATA_FMT_BGRA16_5551] = {
124 .data_type = DATA_TYPE_BGRA_5551,
125 .depth = 16,
126 },
127 [VPDMA_DATA_FMT_BGRA16] = {
128 .data_type = DATA_TYPE_BGRA_4444,
129 .depth = 16,
130 },
131 [VPDMA_DATA_FMT_ABGR24] = {
132 .data_type = DATA_TYPE_ABGR24_6666,
133 .depth = 24,
134 },
135 [VPDMA_DATA_FMT_BGR24] = {
136 .data_type = DATA_TYPE_BGR24_888,
137 .depth = 24,
138 },
139 [VPDMA_DATA_FMT_ABGR32] = {
140 .data_type = DATA_TYPE_ABGR32_8888,
141 .depth = 32,
142 },
143 [VPDMA_DATA_FMT_BGRA24] = {
144 .data_type = DATA_TYPE_BGRA24_6666,
145 .depth = 24,
146 },
147 [VPDMA_DATA_FMT_BGRA32] = {
148 .data_type = DATA_TYPE_BGRA32_8888,
149 .depth = 32,
150 },
151};
152
153const struct vpdma_data_format vpdma_misc_fmts[] = {
154 [VPDMA_DATA_FMT_MV] = {
155 .data_type = DATA_TYPE_MV,
156 .depth = 4,
157 },
158};
159
160struct vpdma_channel_info {
161 int num; /* VPDMA channel number */
162 int cstat_offset; /* client CSTAT register offset */
163};
164
165static const struct vpdma_channel_info chan_info[] = {
166 [VPE_CHAN_LUMA1_IN] = {
167 .num = VPE_CHAN_NUM_LUMA1_IN,
168 .cstat_offset = VPDMA_DEI_LUMA1_CSTAT,
169 },
170 [VPE_CHAN_CHROMA1_IN] = {
171 .num = VPE_CHAN_NUM_CHROMA1_IN,
172 .cstat_offset = VPDMA_DEI_CHROMA1_CSTAT,
173 },
174 [VPE_CHAN_LUMA2_IN] = {
175 .num = VPE_CHAN_NUM_LUMA2_IN,
176 .cstat_offset = VPDMA_DEI_LUMA2_CSTAT,
177 },
178 [VPE_CHAN_CHROMA2_IN] = {
179 .num = VPE_CHAN_NUM_CHROMA2_IN,
180 .cstat_offset = VPDMA_DEI_CHROMA2_CSTAT,
181 },
182 [VPE_CHAN_LUMA3_IN] = {
183 .num = VPE_CHAN_NUM_LUMA3_IN,
184 .cstat_offset = VPDMA_DEI_LUMA3_CSTAT,
185 },
186 [VPE_CHAN_CHROMA3_IN] = {
187 .num = VPE_CHAN_NUM_CHROMA3_IN,
188 .cstat_offset = VPDMA_DEI_CHROMA3_CSTAT,
189 },
190 [VPE_CHAN_MV_IN] = {
191 .num = VPE_CHAN_NUM_MV_IN,
192 .cstat_offset = VPDMA_DEI_MV_IN_CSTAT,
193 },
194 [VPE_CHAN_MV_OUT] = {
195 .num = VPE_CHAN_NUM_MV_OUT,
196 .cstat_offset = VPDMA_DEI_MV_OUT_CSTAT,
197 },
198 [VPE_CHAN_LUMA_OUT] = {
199 .num = VPE_CHAN_NUM_LUMA_OUT,
200 .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
201 },
202 [VPE_CHAN_CHROMA_OUT] = {
203 .num = VPE_CHAN_NUM_CHROMA_OUT,
204 .cstat_offset = VPDMA_VIP_UP_UV_CSTAT,
205 },
206 [VPE_CHAN_RGB_OUT] = {
207 .num = VPE_CHAN_NUM_RGB_OUT,
208 .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
209 },
210};
211
212static u32 read_reg(struct vpdma_data *vpdma, int offset)
213{
214 return ioread32(vpdma->base + offset);
215}
216
217static void write_reg(struct vpdma_data *vpdma, int offset, u32 value)
218{
219 iowrite32(value, vpdma->base + offset);
220}
221
222static int read_field_reg(struct vpdma_data *vpdma, int offset,
223 u32 mask, int shift)
224{
225 return (read_reg(vpdma, offset) & (mask << shift)) >> shift;
226}
227
228static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field,
229 u32 mask, int shift)
230{
231 u32 val = read_reg(vpdma, offset);
232
233 val &= ~(mask << shift);
234 val |= (field & mask) << shift;
235
236 write_reg(vpdma, offset, val);
237}
238
239void vpdma_dump_regs(struct vpdma_data *vpdma)
240{
241 struct device *dev = &vpdma->pdev->dev;
242
243#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
244
245 dev_dbg(dev, "VPDMA Registers:\n");
246
247 DUMPREG(PID);
248 DUMPREG(LIST_ADDR);
249 DUMPREG(LIST_ATTR);
250 DUMPREG(LIST_STAT_SYNC);
251 DUMPREG(BG_RGB);
252 DUMPREG(BG_YUV);
253 DUMPREG(SETUP);
254 DUMPREG(MAX_SIZE1);
255 DUMPREG(MAX_SIZE2);
256 DUMPREG(MAX_SIZE3);
257
258 /*
259 * dumping registers of only group0 and group3, because VPE channels
260 * lie within group0 and group3 registers
261 */
262 DUMPREG(INT_CHAN_STAT(0));
263 DUMPREG(INT_CHAN_MASK(0));
264 DUMPREG(INT_CHAN_STAT(3));
265 DUMPREG(INT_CHAN_MASK(3));
266 DUMPREG(INT_CLIENT0_STAT);
267 DUMPREG(INT_CLIENT0_MASK);
268 DUMPREG(INT_CLIENT1_STAT);
269 DUMPREG(INT_CLIENT1_MASK);
270 DUMPREG(INT_LIST0_STAT);
271 DUMPREG(INT_LIST0_MASK);
272
273 /*
274 * these are registers specific to VPE clients, we can make this
275 * function dump client registers specific to VPE or VIP based on
276 * who is using it
277 */
278 DUMPREG(DEI_CHROMA1_CSTAT);
279 DUMPREG(DEI_LUMA1_CSTAT);
280 DUMPREG(DEI_CHROMA2_CSTAT);
281 DUMPREG(DEI_LUMA2_CSTAT);
282 DUMPREG(DEI_CHROMA3_CSTAT);
283 DUMPREG(DEI_LUMA3_CSTAT);
284 DUMPREG(DEI_MV_IN_CSTAT);
285 DUMPREG(DEI_MV_OUT_CSTAT);
286 DUMPREG(VIP_UP_Y_CSTAT);
287 DUMPREG(VIP_UP_UV_CSTAT);
288 DUMPREG(VPI_CTL_CSTAT);
289}
290
291/*
292 * Allocate a DMA buffer
293 */
294int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
295{
296 buf->size = size;
297 buf->mapped = false;
298 buf->addr = kzalloc(size, GFP_KERNEL);
299 if (!buf->addr)
300 return -ENOMEM;
301
302 WARN_ON((u32) buf->addr & VPDMA_DESC_ALIGN);
303
304 return 0;
305}
306
307void vpdma_free_desc_buf(struct vpdma_buf *buf)
308{
309 WARN_ON(buf->mapped);
310 kfree(buf->addr);
311 buf->addr = NULL;
312 buf->size = 0;
313}
314
315/*
316 * map descriptor/payload DMA buffer, enabling DMA access
317 */
318int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
319{
320 struct device *dev = &vpdma->pdev->dev;
321
322 WARN_ON(buf->mapped);
323 buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
324 DMA_TO_DEVICE);
325 if (dma_mapping_error(dev, buf->dma_addr)) {
326 dev_err(dev, "failed to map buffer\n");
327 return -EINVAL;
328 }
329
330 buf->mapped = true;
331
332 return 0;
333}
334
335/*
336 * unmap descriptor/payload DMA buffer, disabling DMA access and
337 * allowing the main processor to acces the data
338 */
339void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
340{
341 struct device *dev = &vpdma->pdev->dev;
342
343 if (buf->mapped)
344 dma_unmap_single(dev, buf->dma_addr, buf->size, DMA_TO_DEVICE);
345
346 buf->mapped = false;
347}
348
349/*
350 * create a descriptor list, the user of this list will append configuration,
351 * control and data descriptors to this list, this list will be submitted to
352 * VPDMA. VPDMA's list parser will go through each descriptor and perform the
353 * required DMA operations
354 */
355int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
356{
357 int r;
358
359 r = vpdma_alloc_desc_buf(&list->buf, size);
360 if (r)
361 return r;
362
363 list->next = list->buf.addr;
364
365 list->type = type;
366
367 return 0;
368}
369
370/*
371 * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
372 * to allow new descriptors to be added to the list.
373 */
374void vpdma_reset_desc_list(struct vpdma_desc_list *list)
375{
376 list->next = list->buf.addr;
377}
378
379/*
380 * free the buffer allocated fot the VPDMA descriptor list, this should be
381 * called when the user doesn't want to use VPDMA any more.
382 */
383void vpdma_free_desc_list(struct vpdma_desc_list *list)
384{
385 vpdma_free_desc_buf(&list->buf);
386
387 list->next = NULL;
388}
389
390static bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
391{
392 return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
393}
394
395/*
396 * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
397 */
398int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
399{
400 /* we always use the first list */
401 int list_num = 0;
402 int list_size;
403
404 if (vpdma_list_busy(vpdma, list_num))
405 return -EBUSY;
406
407 /* 16-byte granularity */
408 list_size = (list->next - list->buf.addr) >> 4;
409
410 write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
411
412 write_reg(vpdma, VPDMA_LIST_ATTR,
413 (list_num << VPDMA_LIST_NUM_SHFT) |
414 (list->type << VPDMA_LIST_TYPE_SHFT) |
415 list_size);
416
417 return 0;
418}
419
420static void dump_cfd(struct vpdma_cfd *cfd)
421{
422 int class;
423
424 class = cfd_get_class(cfd);
425
426 pr_debug("config descriptor of payload class: %s\n",
427 class == CFD_CLS_BLOCK ? "simple block" :
428 "address data block");
429
430 if (class == CFD_CLS_BLOCK)
431 pr_debug("word0: dst_addr_offset = 0x%08x\n",
432 cfd->dest_addr_offset);
433
434 if (class == CFD_CLS_BLOCK)
435 pr_debug("word1: num_data_wrds = %d\n", cfd->block_len);
436
437 pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
438
439 pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, "
440 "payload_len = %d\n", cfd_get_pkt_type(cfd),
441 cfd_get_direct(cfd), class, cfd_get_dest(cfd),
442 cfd_get_payload_len(cfd));
443}
444
445/*
446 * append a configuration descriptor to the given descriptor list, where the
447 * payload is in the form of a simple data block specified in the descriptor
448 * header, this is used to upload scaler coefficients to the scaler module
449 */
450void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
451 struct vpdma_buf *blk, u32 dest_offset)
452{
453 struct vpdma_cfd *cfd;
454 int len = blk->size;
455
456 WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN);
457
458 cfd = list->next;
459 WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
460
461 cfd->dest_addr_offset = dest_offset;
462 cfd->block_len = len;
463 cfd->payload_addr = (u32) blk->dma_addr;
464 cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK,
465 client, len >> 4);
466
467 list->next = cfd + 1;
468
469 dump_cfd(cfd);
470}
471
472/*
473 * append a configuration descriptor to the given descriptor list, where the
474 * payload is in the address data block format, this is used to a configure a
475 * discontiguous set of MMRs
476 */
477void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
478 struct vpdma_buf *adb)
479{
480 struct vpdma_cfd *cfd;
481 unsigned int len = adb->size;
482
483 WARN_ON(len & VPDMA_ADB_SIZE_ALIGN);
484 WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN);
485
486 cfd = list->next;
487 BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
488
489 cfd->w0 = 0;
490 cfd->w1 = 0;
491 cfd->payload_addr = (u32) adb->dma_addr;
492 cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB,
493 client, len >> 4);
494
495 list->next = cfd + 1;
496
497 dump_cfd(cfd);
498};
499
500/*
501 * control descriptor format change based on what type of control descriptor it
502 * is, we only use 'sync on channel' control descriptors for now, so assume it's
503 * that
504 */
505static void dump_ctd(struct vpdma_ctd *ctd)
506{
507 pr_debug("control descriptor\n");
508
509 pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
510 ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd));
511}
512
513/*
514 * append a 'sync on channel' type control descriptor to the given descriptor
515 * list, this descriptor stalls the VPDMA list till the time DMA is completed
516 * on the specified channel
517 */
518void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
519 enum vpdma_channel chan)
520{
521 struct vpdma_ctd *ctd;
522
523 ctd = list->next;
524 WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
525
526 ctd->w0 = 0;
527 ctd->w1 = 0;
528 ctd->w2 = 0;
529 ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num,
530 CTD_TYPE_SYNC_ON_CHANNEL);
531
532 list->next = ctd + 1;
533
534 dump_ctd(ctd);
535}
536
537static void dump_dtd(struct vpdma_dtd *dtd)
538{
539 int dir, chan;
540
541 dir = dtd_get_dir(dtd);
542 chan = dtd_get_chan(dtd);
543
544 pr_debug("%s data transfer descriptor for channel %d\n",
545 dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
546
547 pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, "
548 "even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
549 dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
550 dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
551 dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
552
553 if (dir == DTD_DIR_IN)
554 pr_debug("word1: line_length = %d, xfer_height = %d\n",
555 dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
556
557 pr_debug("word2: start_addr = 0x%08x\n", dtd->start_addr);
558
559 pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, "
560 "pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd),
561 dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
562 dtd_get_next_chan(dtd));
563
564 if (dir == DTD_DIR_IN)
565 pr_debug("word4: frame_width = %d, frame_height = %d\n",
566 dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
567 else
568 pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, "
569 "drp_data = %d, use_desc_reg = %d\n",
570 dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
571 dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
572
573 if (dir == DTD_DIR_IN)
574 pr_debug("word5: hor_start = %d, ver_start = %d\n",
575 dtd_get_h_start(dtd), dtd_get_v_start(dtd));
576 else
577 pr_debug("word5: max_width %d, max_height %d\n",
578 dtd_get_max_width(dtd), dtd_get_max_height(dtd));
579
580 pr_debug("word6: client specfic attr0 = 0x%08x\n", dtd->client_attr0);
581 pr_debug("word7: client specfic attr1 = 0x%08x\n", dtd->client_attr1);
582}
583
584/*
585 * append an outbound data transfer descriptor to the given descriptor list,
586 * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
587 */
588void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect,
589 const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
590 enum vpdma_channel chan, u32 flags)
591{
592 int priority = 0;
593 int field = 0;
594 int notify = 1;
595 int channel, next_chan;
596 int depth = fmt->depth;
597 int stride;
598 struct vpdma_dtd *dtd;
599
600 channel = next_chan = chan_info[chan].num;
601
602 if (fmt->data_type == DATA_TYPE_C420)
603 depth = 8;
604
605 stride = (depth * c_rect->width) >> 3;
606 dma_addr += (c_rect->left * depth) >> 3;
607
608 dtd = list->next;
609 WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
610
611 dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
612 notify,
613 field,
614 !!(flags & VPDMA_DATA_FRAME_1D),
615 !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
616 !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
617 stride);
618 dtd->w1 = 0;
619 dtd->start_addr = (u32) dma_addr;
620 dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
621 DTD_DIR_OUT, channel, priority, next_chan);
622 dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
623 dtd->max_width_height = dtd_max_width_height(MAX_OUT_WIDTH_1920,
624 MAX_OUT_HEIGHT_1080);
625 dtd->client_attr0 = 0;
626 dtd->client_attr1 = 0;
627
628 list->next = dtd + 1;
629
630 dump_dtd(dtd);
631}
632
633/*
634 * append an inbound data transfer descriptor to the given descriptor list,
635 * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
636 */
637void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width,
638 int frame_height, struct v4l2_rect *c_rect,
639 const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
640 enum vpdma_channel chan, int field, u32 flags)
641{
642 int priority = 0;
643 int notify = 1;
644 int depth = fmt->depth;
645 int channel, next_chan;
646 int stride;
647 int height = c_rect->height;
648 struct vpdma_dtd *dtd;
649
650 channel = next_chan = chan_info[chan].num;
651
652 if (fmt->data_type == DATA_TYPE_C420) {
653 height >>= 1;
654 frame_height >>= 1;
655 depth = 8;
656 }
657
658 stride = (depth * c_rect->width) >> 3;
659 dma_addr += (c_rect->left * depth) >> 3;
660
661 dtd = list->next;
662 WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
663
664 dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
665 notify,
666 field,
667 !!(flags & VPDMA_DATA_FRAME_1D),
668 !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
669 !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
670 stride);
671
672 dtd->xfer_length_height = dtd_xfer_length_height(c_rect->width, height);
673 dtd->start_addr = (u32) dma_addr;
674 dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
675 DTD_DIR_IN, channel, priority, next_chan);
676 dtd->frame_width_height = dtd_frame_width_height(frame_width,
677 frame_height);
678 dtd->start_h_v = dtd_start_h_v(c_rect->left, c_rect->top);
679 dtd->client_attr0 = 0;
680 dtd->client_attr1 = 0;
681
682 list->next = dtd + 1;
683
684 dump_dtd(dtd);
685}
686
687/* set or clear the mask for list complete interrupt */
688void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
689 bool enable)
690{
691 u32 val;
692
693 val = read_reg(vpdma, VPDMA_INT_LIST0_MASK);
694 if (enable)
695 val |= (1 << (list_num * 2));
696 else
697 val &= ~(1 << (list_num * 2));
698 write_reg(vpdma, VPDMA_INT_LIST0_MASK, val);
699}
700
701/* clear previosuly occured list intterupts in the LIST_STAT register */
702void vpdma_clear_list_stat(struct vpdma_data *vpdma)
703{
704 write_reg(vpdma, VPDMA_INT_LIST0_STAT,
705 read_reg(vpdma, VPDMA_INT_LIST0_STAT));
706}
707
708/*
709 * configures the output mode of the line buffer for the given client, the
710 * line buffer content can either be mirrored(each line repeated twice) or
711 * passed to the client as is
712 */
713void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
714 enum vpdma_channel chan)
715{
716 int client_cstat = chan_info[chan].cstat_offset;
717
718 write_field_reg(vpdma, client_cstat, line_mode,
719 VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
720}
721
722/*
723 * configures the event which should trigger VPDMA transfer for the given
724 * client
725 */
726void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
727 enum vpdma_frame_start_event fs_event,
728 enum vpdma_channel chan)
729{
730 int client_cstat = chan_info[chan].cstat_offset;
731
732 write_field_reg(vpdma, client_cstat, fs_event,
733 VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
734}
735
736static void vpdma_firmware_cb(const struct firmware *f, void *context)
737{
738 struct vpdma_data *vpdma = context;
739 struct vpdma_buf fw_dma_buf;
740 int i, r;
741
742 dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
743
744 if (!f || !f->data) {
745 dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
746 return;
747 }
748
749 /* already initialized */
750 if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
751 VPDMA_LIST_RDY_SHFT)) {
752 vpdma->ready = true;
753 return;
754 }
755
756 r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
757 if (r) {
758 dev_err(&vpdma->pdev->dev,
759 "failed to allocate dma buffer for firmware\n");
760 goto rel_fw;
761 }
762
763 memcpy(fw_dma_buf.addr, f->data, f->size);
764
765 vpdma_map_desc_buf(vpdma, &fw_dma_buf);
766
767 write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr);
768
769 for (i = 0; i < 100; i++) { /* max 1 second */
770 msleep_interruptible(10);
771
772 if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
773 VPDMA_LIST_RDY_SHFT))
774 break;
775 }
776
777 if (i == 100) {
778 dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
779 goto free_buf;
780 }
781
782 vpdma->ready = true;
783
784free_buf:
785 vpdma_unmap_desc_buf(vpdma, &fw_dma_buf);
786
787 vpdma_free_desc_buf(&fw_dma_buf);
788rel_fw:
789 release_firmware(f);
790}
791
792static int vpdma_load_firmware(struct vpdma_data *vpdma)
793{
794 int r;
795 struct device *dev = &vpdma->pdev->dev;
796
797 r = request_firmware_nowait(THIS_MODULE, 1,
798 (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma,
799 vpdma_firmware_cb);
800 if (r) {
801 dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE);
802 return r;
803 } else {
804 dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE);
805 }
806
807 return 0;
808}
809
810struct vpdma_data *vpdma_create(struct platform_device *pdev)
811{
812 struct resource *res;
813 struct vpdma_data *vpdma;
814 int r;
815
816 dev_dbg(&pdev->dev, "vpdma_create\n");
817
818 vpdma = devm_kzalloc(&pdev->dev, sizeof(*vpdma), GFP_KERNEL);
819 if (!vpdma) {
820 dev_err(&pdev->dev, "couldn't alloc vpdma_dev\n");
821 return ERR_PTR(-ENOMEM);
822 }
823
824 vpdma->pdev = pdev;
825
826 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
827 if (res == NULL) {
828 dev_err(&pdev->dev, "missing platform resources data\n");
829 return ERR_PTR(-ENODEV);
830 }
831
832 vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
833 if (!vpdma->base) {
834 dev_err(&pdev->dev, "failed to ioremap\n");
835 return ERR_PTR(-ENOMEM);
836 }
837
838 r = vpdma_load_firmware(vpdma);
839 if (r) {
840 pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
841 return ERR_PTR(r);
842 }
843
844 return vpdma;
845}
846MODULE_FIRMWARE(VPDMA_FIRMWARE);
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
new file mode 100644
index 000000000000..eaa2a71a5db9
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -0,0 +1,203 @@
1/*
2 * Copyright (c) 2013 Texas Instruments Inc.
3 *
4 * David Griego, <dagriego@biglakesoftware.com>
5 * Dale Farnsworth, <dale@farnsworth.org>
6 * Archit Taneja, <archit@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 */
12
13#ifndef __TI_VPDMA_H_
14#define __TI_VPDMA_H_
15
16/*
17 * A vpdma_buf tracks the size, DMA address and mapping status of each
18 * driver DMA area.
19 */
20struct vpdma_buf {
21 void *addr;
22 dma_addr_t dma_addr;
23 size_t size;
24 bool mapped;
25};
26
27struct vpdma_desc_list {
28 struct vpdma_buf buf;
29 void *next;
30 int type;
31};
32
33struct vpdma_data {
34 void __iomem *base;
35
36 struct platform_device *pdev;
37
38 /* tells whether vpdma firmware is loaded or not */
39 bool ready;
40};
41
42struct vpdma_data_format {
43 int data_type;
44 u8 depth;
45};
46
47#define VPDMA_DESC_ALIGN 16 /* 16-byte descriptor alignment */
48
49#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
50#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
51
52#define VPDMA_LIST_TYPE_NORMAL 0
53#define VPDMA_LIST_TYPE_SELF_MODIFYING 1
54#define VPDMA_LIST_TYPE_DOORBELL 2
55
56enum vpdma_yuv_formats {
57 VPDMA_DATA_FMT_Y444 = 0,
58 VPDMA_DATA_FMT_Y422,
59 VPDMA_DATA_FMT_Y420,
60 VPDMA_DATA_FMT_C444,
61 VPDMA_DATA_FMT_C422,
62 VPDMA_DATA_FMT_C420,
63 VPDMA_DATA_FMT_YC422,
64 VPDMA_DATA_FMT_YC444,
65 VPDMA_DATA_FMT_CY422,
66};
67
68enum vpdma_rgb_formats {
69 VPDMA_DATA_FMT_RGB565 = 0,
70 VPDMA_DATA_FMT_ARGB16_1555,
71 VPDMA_DATA_FMT_ARGB16,
72 VPDMA_DATA_FMT_RGBA16_5551,
73 VPDMA_DATA_FMT_RGBA16,
74 VPDMA_DATA_FMT_ARGB24,
75 VPDMA_DATA_FMT_RGB24,
76 VPDMA_DATA_FMT_ARGB32,
77 VPDMA_DATA_FMT_RGBA24,
78 VPDMA_DATA_FMT_RGBA32,
79 VPDMA_DATA_FMT_BGR565,
80 VPDMA_DATA_FMT_ABGR16_1555,
81 VPDMA_DATA_FMT_ABGR16,
82 VPDMA_DATA_FMT_BGRA16_5551,
83 VPDMA_DATA_FMT_BGRA16,
84 VPDMA_DATA_FMT_ABGR24,
85 VPDMA_DATA_FMT_BGR24,
86 VPDMA_DATA_FMT_ABGR32,
87 VPDMA_DATA_FMT_BGRA24,
88 VPDMA_DATA_FMT_BGRA32,
89};
90
91enum vpdma_misc_formats {
92 VPDMA_DATA_FMT_MV = 0,
93};
94
95extern const struct vpdma_data_format vpdma_yuv_fmts[];
96extern const struct vpdma_data_format vpdma_rgb_fmts[];
97extern const struct vpdma_data_format vpdma_misc_fmts[];
98
99enum vpdma_frame_start_event {
100 VPDMA_FSEVENT_HDMI_FID = 0,
101 VPDMA_FSEVENT_DVO2_FID,
102 VPDMA_FSEVENT_HDCOMP_FID,
103 VPDMA_FSEVENT_SD_FID,
104 VPDMA_FSEVENT_LM_FID0,
105 VPDMA_FSEVENT_LM_FID1,
106 VPDMA_FSEVENT_LM_FID2,
107 VPDMA_FSEVENT_CHANNEL_ACTIVE,
108};
109
110/*
111 * VPDMA channel numbers
112 */
113enum vpdma_channel {
114 VPE_CHAN_LUMA1_IN,
115 VPE_CHAN_CHROMA1_IN,
116 VPE_CHAN_LUMA2_IN,
117 VPE_CHAN_CHROMA2_IN,
118 VPE_CHAN_LUMA3_IN,
119 VPE_CHAN_CHROMA3_IN,
120 VPE_CHAN_MV_IN,
121 VPE_CHAN_MV_OUT,
122 VPE_CHAN_LUMA_OUT,
123 VPE_CHAN_CHROMA_OUT,
124 VPE_CHAN_RGB_OUT,
125};
126
127/* flags for VPDMA data descriptors */
128#define VPDMA_DATA_ODD_LINE_SKIP (1 << 0)
129#define VPDMA_DATA_EVEN_LINE_SKIP (1 << 1)
130#define VPDMA_DATA_FRAME_1D (1 << 2)
131#define VPDMA_DATA_MODE_TILED (1 << 3)
132
133/*
134 * client identifiers used for configuration descriptors
135 */
136#define CFD_MMR_CLIENT 0
137#define CFD_SC_CLIENT 4
138
139/* Address data block header format */
140struct vpdma_adb_hdr {
141 u32 offset;
142 u32 nwords;
143 u32 reserved0;
144 u32 reserved1;
145};
146
147/* helpers for creating ADB headers for config descriptors MMRs as client */
148#define ADB_ADDR(dma_buf, str, fld) ((dma_buf)->addr + offsetof(str, fld))
149#define MMR_ADB_ADDR(buf, str, fld) ADB_ADDR(&(buf), struct str, fld)
150
151#define VPDMA_SET_MMR_ADB_HDR(buf, str, hdr, regs, offset_a) \
152 do { \
153 struct vpdma_adb_hdr *h; \
154 struct str *adb = NULL; \
155 h = MMR_ADB_ADDR(buf, str, hdr); \
156 h->offset = (offset_a); \
157 h->nwords = sizeof(adb->regs) >> 2; \
158 } while (0)
159
160/* vpdma descriptor buffer allocation and management */
161int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size);
162void vpdma_free_desc_buf(struct vpdma_buf *buf);
163int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf);
164void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf);
165
166/* vpdma descriptor list funcs */
167int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type);
168void vpdma_reset_desc_list(struct vpdma_desc_list *list);
169void vpdma_free_desc_list(struct vpdma_desc_list *list);
170int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list);
171
172/* helpers for creating vpdma descriptors */
173void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
174 struct vpdma_buf *blk, u32 dest_offset);
175void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
176 struct vpdma_buf *adb);
177void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
178 enum vpdma_channel chan);
179void vpdma_add_out_dtd(struct vpdma_desc_list *list, struct v4l2_rect *c_rect,
180 const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
181 enum vpdma_channel chan, u32 flags);
182void vpdma_add_in_dtd(struct vpdma_desc_list *list, int frame_width,
183 int frame_height, struct v4l2_rect *c_rect,
184 const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
185 enum vpdma_channel chan, int field, u32 flags);
186
187/* vpdma list interrupt management */
188void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
189 bool enable);
190void vpdma_clear_list_stat(struct vpdma_data *vpdma);
191
192/* vpdma client configuration */
193void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
194 enum vpdma_channel chan);
195void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
196 enum vpdma_frame_start_event fs_event, enum vpdma_channel chan);
197
198void vpdma_dump_regs(struct vpdma_data *vpdma);
199
200/* initialize vpdma, passed with VPE's platform device pointer */
201struct vpdma_data *vpdma_create(struct platform_device *pdev);
202
203#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
new file mode 100644
index 000000000000..f0e9a8038c1b
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -0,0 +1,641 @@
1/*
2 * Copyright (c) 2013 Texas Instruments Inc.
3 *
4 * David Griego, <dagriego@biglakesoftware.com>
5 * Dale Farnsworth, <dale@farnsworth.org>
6 * Archit Taneja, <archit@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 */
12
13#ifndef _TI_VPDMA_PRIV_H_
14#define _TI_VPDMA_PRIV_H_
15
16/*
17 * VPDMA Register offsets
18 */
19
20/* Top level */
21#define VPDMA_PID 0x00
22#define VPDMA_LIST_ADDR 0x04
23#define VPDMA_LIST_ATTR 0x08
24#define VPDMA_LIST_STAT_SYNC 0x0c
25#define VPDMA_BG_RGB 0x18
26#define VPDMA_BG_YUV 0x1c
27#define VPDMA_SETUP 0x30
28#define VPDMA_MAX_SIZE1 0x34
29#define VPDMA_MAX_SIZE2 0x38
30#define VPDMA_MAX_SIZE3 0x3c
31
32/* Interrupts */
33#define VPDMA_INT_CHAN_STAT(grp) (0x40 + grp * 8)
34#define VPDMA_INT_CHAN_MASK(grp) (VPDMA_INT_CHAN_STAT(grp) + 4)
35#define VPDMA_INT_CLIENT0_STAT 0x78
36#define VPDMA_INT_CLIENT0_MASK 0x7c
37#define VPDMA_INT_CLIENT1_STAT 0x80
38#define VPDMA_INT_CLIENT1_MASK 0x84
39#define VPDMA_INT_LIST0_STAT 0x88
40#define VPDMA_INT_LIST0_MASK 0x8c
41
42#define VPDMA_PERFMON(i) (0x200 + i * 4)
43
44/* VPE specific client registers */
45#define VPDMA_DEI_CHROMA1_CSTAT 0x0300
46#define VPDMA_DEI_LUMA1_CSTAT 0x0304
47#define VPDMA_DEI_LUMA2_CSTAT 0x0308
48#define VPDMA_DEI_CHROMA2_CSTAT 0x030c
49#define VPDMA_DEI_LUMA3_CSTAT 0x0310
50#define VPDMA_DEI_CHROMA3_CSTAT 0x0314
51#define VPDMA_DEI_MV_IN_CSTAT 0x0330
52#define VPDMA_DEI_MV_OUT_CSTAT 0x033c
53#define VPDMA_VIP_UP_Y_CSTAT 0x0390
54#define VPDMA_VIP_UP_UV_CSTAT 0x0394
55#define VPDMA_VPI_CTL_CSTAT 0x03d0
56
57/* Reg field info for VPDMA_CLIENT_CSTAT registers */
58#define VPDMA_CSTAT_LINE_MODE_MASK 0x03
59#define VPDMA_CSTAT_LINE_MODE_SHIFT 8
60#define VPDMA_CSTAT_FRAME_START_MASK 0xf
61#define VPDMA_CSTAT_FRAME_START_SHIFT 10
62
63#define VPDMA_LIST_NUM_MASK 0x07
64#define VPDMA_LIST_NUM_SHFT 24
65#define VPDMA_LIST_STOP_SHFT 20
66#define VPDMA_LIST_RDY_MASK 0x01
67#define VPDMA_LIST_RDY_SHFT 19
68#define VPDMA_LIST_TYPE_MASK 0x03
69#define VPDMA_LIST_TYPE_SHFT 16
70#define VPDMA_LIST_SIZE_MASK 0xffff
71
72/* VPDMA data type values for data formats */
73#define DATA_TYPE_Y444 0x0
74#define DATA_TYPE_Y422 0x1
75#define DATA_TYPE_Y420 0x2
76#define DATA_TYPE_C444 0x4
77#define DATA_TYPE_C422 0x5
78#define DATA_TYPE_C420 0x6
79#define DATA_TYPE_YC422 0x7
80#define DATA_TYPE_YC444 0x8
81#define DATA_TYPE_CY422 0x23
82
83#define DATA_TYPE_RGB16_565 0x0
84#define DATA_TYPE_ARGB_1555 0x1
85#define DATA_TYPE_ARGB_4444 0x2
86#define DATA_TYPE_RGBA_5551 0x3
87#define DATA_TYPE_RGBA_4444 0x4
88#define DATA_TYPE_ARGB24_6666 0x5
89#define DATA_TYPE_RGB24_888 0x6
90#define DATA_TYPE_ARGB32_8888 0x7
91#define DATA_TYPE_RGBA24_6666 0x8
92#define DATA_TYPE_RGBA32_8888 0x9
93#define DATA_TYPE_BGR16_565 0x10
94#define DATA_TYPE_ABGR_1555 0x11
95#define DATA_TYPE_ABGR_4444 0x12
96#define DATA_TYPE_BGRA_5551 0x13
97#define DATA_TYPE_BGRA_4444 0x14
98#define DATA_TYPE_ABGR24_6666 0x15
99#define DATA_TYPE_BGR24_888 0x16
100#define DATA_TYPE_ABGR32_8888 0x17
101#define DATA_TYPE_BGRA24_6666 0x18
102#define DATA_TYPE_BGRA32_8888 0x19
103
104#define DATA_TYPE_MV 0x3
105
106/* VPDMA channel numbers(only VPE channels for now) */
107#define VPE_CHAN_NUM_LUMA1_IN 0
108#define VPE_CHAN_NUM_CHROMA1_IN 1
109#define VPE_CHAN_NUM_LUMA2_IN 2
110#define VPE_CHAN_NUM_CHROMA2_IN 3
111#define VPE_CHAN_NUM_LUMA3_IN 4
112#define VPE_CHAN_NUM_CHROMA3_IN 5
113#define VPE_CHAN_NUM_MV_IN 12
114#define VPE_CHAN_NUM_MV_OUT 15
115#define VPE_CHAN_NUM_LUMA_OUT 102
116#define VPE_CHAN_NUM_CHROMA_OUT 103
117#define VPE_CHAN_NUM_RGB_OUT 106
118
119/*
120 * a VPDMA address data block payload for a configuration descriptor needs to
121 * have each sub block length as a multiple of 16 bytes. Therefore, the overall
122 * size of the payload also needs to be a multiple of 16 bytes. The sub block
123 * lengths should be ensured to be aligned by the VPDMA user.
124 */
125#define VPDMA_ADB_SIZE_ALIGN 0x0f
126
127/*
128 * data transfer descriptor
129 */
130struct vpdma_dtd {
131 u32 type_ctl_stride;
132 union {
133 u32 xfer_length_height;
134 u32 w1;
135 };
136 dma_addr_t start_addr;
137 u32 pkt_ctl;
138 union {
139 u32 frame_width_height; /* inbound */
140 dma_addr_t desc_write_addr; /* outbound */
141 };
142 union {
143 u32 start_h_v; /* inbound */
144 u32 max_width_height; /* outbound */
145 };
146 u32 client_attr0;
147 u32 client_attr1;
148};
149
150/* Data Transfer Descriptor specifics */
151#define DTD_NO_NOTIFY 0
152#define DTD_NOTIFY 1
153
154#define DTD_PKT_TYPE 0xa
155#define DTD_DIR_IN 0
156#define DTD_DIR_OUT 1
157
158/* type_ctl_stride */
159#define DTD_DATA_TYPE_MASK 0x3f
160#define DTD_DATA_TYPE_SHFT 26
161#define DTD_NOTIFY_MASK 0x01
162#define DTD_NOTIFY_SHFT 25
163#define DTD_FIELD_MASK 0x01
164#define DTD_FIELD_SHFT 24
165#define DTD_1D_MASK 0x01
166#define DTD_1D_SHFT 23
167#define DTD_EVEN_LINE_SKIP_MASK 0x01
168#define DTD_EVEN_LINE_SKIP_SHFT 20
169#define DTD_ODD_LINE_SKIP_MASK 0x01
170#define DTD_ODD_LINE_SKIP_SHFT 16
171#define DTD_LINE_STRIDE_MASK 0xffff
172#define DTD_LINE_STRIDE_SHFT 0
173
174/* xfer_length_height */
175#define DTD_LINE_LENGTH_MASK 0xffff
176#define DTD_LINE_LENGTH_SHFT 16
177#define DTD_XFER_HEIGHT_MASK 0xffff
178#define DTD_XFER_HEIGHT_SHFT 0
179
180/* pkt_ctl */
181#define DTD_PKT_TYPE_MASK 0x1f
182#define DTD_PKT_TYPE_SHFT 27
183#define DTD_MODE_MASK 0x01
184#define DTD_MODE_SHFT 26
185#define DTD_DIR_MASK 0x01
186#define DTD_DIR_SHFT 25
187#define DTD_CHAN_MASK 0x01ff
188#define DTD_CHAN_SHFT 16
189#define DTD_PRI_MASK 0x0f
190#define DTD_PRI_SHFT 9
191#define DTD_NEXT_CHAN_MASK 0x01ff
192#define DTD_NEXT_CHAN_SHFT 0
193
194/* frame_width_height */
195#define DTD_FRAME_WIDTH_MASK 0xffff
196#define DTD_FRAME_WIDTH_SHFT 16
197#define DTD_FRAME_HEIGHT_MASK 0xffff
198#define DTD_FRAME_HEIGHT_SHFT 0
199
200/* start_h_v */
201#define DTD_H_START_MASK 0xffff
202#define DTD_H_START_SHFT 16
203#define DTD_V_START_MASK 0xffff
204#define DTD_V_START_SHFT 0
205
206#define DTD_DESC_START_SHIFT 5
207#define DTD_WRITE_DESC_MASK 0x01
208#define DTD_WRITE_DESC_SHIFT 2
209#define DTD_DROP_DATA_MASK 0x01
210#define DTD_DROP_DATA_SHIFT 1
211#define DTD_USE_DESC_MASK 0x01
212#define DTD_USE_DESC_SHIFT 0
213
214/* max_width_height */
215#define DTD_MAX_WIDTH_MASK 0x07
216#define DTD_MAX_WIDTH_SHFT 4
217#define DTD_MAX_HEIGHT_MASK 0x07
218#define DTD_MAX_HEIGHT_SHFT 0
219
220/* max width configurations */
221 /* unlimited width */
222#define MAX_OUT_WIDTH_UNLIMITED 0
223/* as specified in max_size1 reg */
224#define MAX_OUT_WIDTH_REG1 1
225/* as specified in max_size2 reg */
226#define MAX_OUT_WIDTH_REG2 2
227/* as specified in max_size3 reg */
228#define MAX_OUT_WIDTH_REG3 3
229/* maximum of 352 pixels as width */
230#define MAX_OUT_WIDTH_352 4
231/* maximum of 768 pixels as width */
232#define MAX_OUT_WIDTH_768 5
233/* maximum of 1280 pixels width */
234#define MAX_OUT_WIDTH_1280 6
235/* maximum of 1920 pixels as width */
236#define MAX_OUT_WIDTH_1920 7
237
238/* max height configurations */
239 /* unlimited height */
240#define MAX_OUT_HEIGHT_UNLIMITED 0
241/* as specified in max_size1 reg */
242#define MAX_OUT_HEIGHT_REG1 1
243/* as specified in max_size2 reg */
244#define MAX_OUT_HEIGHT_REG2 2
245/* as specified in max_size3 reg */
246#define MAX_OUT_HEIGHT_REG3 3
247/* maximum of 288 lines as height */
248#define MAX_OUT_HEIGHT_288 4
249/* maximum of 576 lines as height */
250#define MAX_OUT_HEIGHT_576 5
251/* maximum of 720 lines as height */
252#define MAX_OUT_HEIGHT_720 6
253/* maximum of 1080 lines as height */
254#define MAX_OUT_HEIGHT_1080 7
255
256static inline u32 dtd_type_ctl_stride(int type, bool notify, int field,
257 bool one_d, bool even_line_skip, bool odd_line_skip,
258 int line_stride)
259{
260 return (type << DTD_DATA_TYPE_SHFT) | (notify << DTD_NOTIFY_SHFT) |
261 (field << DTD_FIELD_SHFT) | (one_d << DTD_1D_SHFT) |
262 (even_line_skip << DTD_EVEN_LINE_SKIP_SHFT) |
263 (odd_line_skip << DTD_ODD_LINE_SKIP_SHFT) |
264 line_stride;
265}
266
267static inline u32 dtd_xfer_length_height(int line_length, int xfer_height)
268{
269 return (line_length << DTD_LINE_LENGTH_SHFT) | xfer_height;
270}
271
272static inline u32 dtd_pkt_ctl(bool mode, bool dir, int chan, int pri,
273 int next_chan)
274{
275 return (DTD_PKT_TYPE << DTD_PKT_TYPE_SHFT) | (mode << DTD_MODE_SHFT) |
276 (dir << DTD_DIR_SHFT) | (chan << DTD_CHAN_SHFT) |
277 (pri << DTD_PRI_SHFT) | next_chan;
278}
279
280static inline u32 dtd_frame_width_height(int width, int height)
281{
282 return (width << DTD_FRAME_WIDTH_SHFT) | height;
283}
284
285static inline u32 dtd_desc_write_addr(unsigned int addr, bool write_desc,
286 bool drop_data, bool use_desc)
287{
288 return (addr << DTD_DESC_START_SHIFT) |
289 (write_desc << DTD_WRITE_DESC_SHIFT) |
290 (drop_data << DTD_DROP_DATA_SHIFT) |
291 use_desc;
292}
293
294static inline u32 dtd_start_h_v(int h_start, int v_start)
295{
296 return (h_start << DTD_H_START_SHFT) | v_start;
297}
298
299static inline u32 dtd_max_width_height(int max_width, int max_height)
300{
301 return (max_width << DTD_MAX_WIDTH_SHFT) | max_height;
302}
303
304static inline int dtd_get_data_type(struct vpdma_dtd *dtd)
305{
306 return dtd->type_ctl_stride >> DTD_DATA_TYPE_SHFT;
307}
308
309static inline bool dtd_get_notify(struct vpdma_dtd *dtd)
310{
311 return (dtd->type_ctl_stride >> DTD_NOTIFY_SHFT) & DTD_NOTIFY_MASK;
312}
313
314static inline int dtd_get_field(struct vpdma_dtd *dtd)
315{
316 return (dtd->type_ctl_stride >> DTD_FIELD_SHFT) & DTD_FIELD_MASK;
317}
318
319static inline bool dtd_get_1d(struct vpdma_dtd *dtd)
320{
321 return (dtd->type_ctl_stride >> DTD_1D_SHFT) & DTD_1D_MASK;
322}
323
324static inline bool dtd_get_even_line_skip(struct vpdma_dtd *dtd)
325{
326 return (dtd->type_ctl_stride >> DTD_EVEN_LINE_SKIP_SHFT)
327 & DTD_EVEN_LINE_SKIP_MASK;
328}
329
330static inline bool dtd_get_odd_line_skip(struct vpdma_dtd *dtd)
331{
332 return (dtd->type_ctl_stride >> DTD_ODD_LINE_SKIP_SHFT)
333 & DTD_ODD_LINE_SKIP_MASK;
334}
335
336static inline int dtd_get_line_stride(struct vpdma_dtd *dtd)
337{
338 return dtd->type_ctl_stride & DTD_LINE_STRIDE_MASK;
339}
340
341static inline int dtd_get_line_length(struct vpdma_dtd *dtd)
342{
343 return dtd->xfer_length_height >> DTD_LINE_LENGTH_SHFT;
344}
345
346static inline int dtd_get_xfer_height(struct vpdma_dtd *dtd)
347{
348 return dtd->xfer_length_height & DTD_XFER_HEIGHT_MASK;
349}
350
351static inline int dtd_get_pkt_type(struct vpdma_dtd *dtd)
352{
353 return dtd->pkt_ctl >> DTD_PKT_TYPE_SHFT;
354}
355
356static inline bool dtd_get_mode(struct vpdma_dtd *dtd)
357{
358 return (dtd->pkt_ctl >> DTD_MODE_SHFT) & DTD_MODE_MASK;
359}
360
361static inline bool dtd_get_dir(struct vpdma_dtd *dtd)
362{
363 return (dtd->pkt_ctl >> DTD_DIR_SHFT) & DTD_DIR_MASK;
364}
365
366static inline int dtd_get_chan(struct vpdma_dtd *dtd)
367{
368 return (dtd->pkt_ctl >> DTD_CHAN_SHFT) & DTD_CHAN_MASK;
369}
370
371static inline int dtd_get_priority(struct vpdma_dtd *dtd)
372{
373 return (dtd->pkt_ctl >> DTD_PRI_SHFT) & DTD_PRI_MASK;
374}
375
376static inline int dtd_get_next_chan(struct vpdma_dtd *dtd)
377{
378 return (dtd->pkt_ctl >> DTD_NEXT_CHAN_SHFT) & DTD_NEXT_CHAN_MASK;
379}
380
381static inline int dtd_get_frame_width(struct vpdma_dtd *dtd)
382{
383 return dtd->frame_width_height >> DTD_FRAME_WIDTH_SHFT;
384}
385
386static inline int dtd_get_frame_height(struct vpdma_dtd *dtd)
387{
388 return dtd->frame_width_height & DTD_FRAME_HEIGHT_MASK;
389}
390
391static inline int dtd_get_desc_write_addr(struct vpdma_dtd *dtd)
392{
393 return dtd->desc_write_addr >> DTD_DESC_START_SHIFT;
394}
395
396static inline bool dtd_get_write_desc(struct vpdma_dtd *dtd)
397{
398 return (dtd->desc_write_addr >> DTD_WRITE_DESC_SHIFT) &
399 DTD_WRITE_DESC_MASK;
400}
401
402static inline bool dtd_get_drop_data(struct vpdma_dtd *dtd)
403{
404 return (dtd->desc_write_addr >> DTD_DROP_DATA_SHIFT) &
405 DTD_DROP_DATA_MASK;
406}
407
408static inline bool dtd_get_use_desc(struct vpdma_dtd *dtd)
409{
410 return dtd->desc_write_addr & DTD_USE_DESC_MASK;
411}
412
413static inline int dtd_get_h_start(struct vpdma_dtd *dtd)
414{
415 return dtd->start_h_v >> DTD_H_START_SHFT;
416}
417
418static inline int dtd_get_v_start(struct vpdma_dtd *dtd)
419{
420 return dtd->start_h_v & DTD_V_START_MASK;
421}
422
423static inline int dtd_get_max_width(struct vpdma_dtd *dtd)
424{
425 return (dtd->max_width_height >> DTD_MAX_WIDTH_SHFT) &
426 DTD_MAX_WIDTH_MASK;
427}
428
429static inline int dtd_get_max_height(struct vpdma_dtd *dtd)
430{
431 return (dtd->max_width_height >> DTD_MAX_HEIGHT_SHFT) &
432 DTD_MAX_HEIGHT_MASK;
433}
434
435/*
436 * configuration descriptor
437 */
438struct vpdma_cfd {
439 union {
440 u32 dest_addr_offset;
441 u32 w0;
442 };
443 union {
444 u32 block_len; /* in words */
445 u32 w1;
446 };
447 u32 payload_addr;
448 u32 ctl_payload_len; /* in words */
449};
450
451/* Configuration descriptor specifics */
452
453#define CFD_PKT_TYPE 0xb
454
455#define CFD_DIRECT 1
456#define CFD_INDIRECT 0
457#define CFD_CLS_ADB 0
458#define CFD_CLS_BLOCK 1
459
460/* block_len */
461#define CFD__BLOCK_LEN_MASK 0xffff
462#define CFD__BLOCK_LEN_SHFT 0
463
464/* ctl_payload_len */
465#define CFD_PKT_TYPE_MASK 0x1f
466#define CFD_PKT_TYPE_SHFT 27
467#define CFD_DIRECT_MASK 0x01
468#define CFD_DIRECT_SHFT 26
469#define CFD_CLASS_MASK 0x03
470#define CFD_CLASS_SHFT 24
471#define CFD_DEST_MASK 0xff
472#define CFD_DEST_SHFT 16
473#define CFD_PAYLOAD_LEN_MASK 0xffff
474#define CFD_PAYLOAD_LEN_SHFT 0
475
476static inline u32 cfd_pkt_payload_len(bool direct, int cls, int dest,
477 int payload_len)
478{
479 return (CFD_PKT_TYPE << CFD_PKT_TYPE_SHFT) |
480 (direct << CFD_DIRECT_SHFT) |
481 (cls << CFD_CLASS_SHFT) |
482 (dest << CFD_DEST_SHFT) |
483 payload_len;
484}
485
486static inline int cfd_get_pkt_type(struct vpdma_cfd *cfd)
487{
488 return cfd->ctl_payload_len >> CFD_PKT_TYPE_SHFT;
489}
490
491static inline bool cfd_get_direct(struct vpdma_cfd *cfd)
492{
493 return (cfd->ctl_payload_len >> CFD_DIRECT_SHFT) & CFD_DIRECT_MASK;
494}
495
496static inline bool cfd_get_class(struct vpdma_cfd *cfd)
497{
498 return (cfd->ctl_payload_len >> CFD_CLASS_SHFT) & CFD_CLASS_MASK;
499}
500
501static inline int cfd_get_dest(struct vpdma_cfd *cfd)
502{
503 return (cfd->ctl_payload_len >> CFD_DEST_SHFT) & CFD_DEST_MASK;
504}
505
506static inline int cfd_get_payload_len(struct vpdma_cfd *cfd)
507{
508 return cfd->ctl_payload_len & CFD_PAYLOAD_LEN_MASK;
509}
510
511/*
512 * control descriptor
513 */
514struct vpdma_ctd {
515 union {
516 u32 timer_value;
517 u32 list_addr;
518 u32 w0;
519 };
520 union {
521 u32 pixel_line_count;
522 u32 list_size;
523 u32 w1;
524 };
525 union {
526 u32 event;
527 u32 fid_ctl;
528 u32 w2;
529 };
530 u32 type_source_ctl;
531};
532
533/* control descriptor types */
534#define CTD_TYPE_SYNC_ON_CLIENT 0
535#define CTD_TYPE_SYNC_ON_LIST 1
536#define CTD_TYPE_SYNC_ON_EXT 2
537#define CTD_TYPE_SYNC_ON_LM_TIMER 3
538#define CTD_TYPE_SYNC_ON_CHANNEL 4
539#define CTD_TYPE_CHNG_CLIENT_IRQ 5
540#define CTD_TYPE_SEND_IRQ 6
541#define CTD_TYPE_RELOAD_LIST 7
542#define CTD_TYPE_ABORT_CHANNEL 8
543
544#define CTD_PKT_TYPE 0xc
545
546/* timer_value */
547#define CTD_TIMER_VALUE_MASK 0xffff
548#define CTD_TIMER_VALUE_SHFT 0
549
550/* pixel_line_count */
551#define CTD_PIXEL_COUNT_MASK 0xffff
552#define CTD_PIXEL_COUNT_SHFT 16
553#define CTD_LINE_COUNT_MASK 0xffff
554#define CTD_LINE_COUNT_SHFT 0
555
556/* list_size */
557#define CTD_LIST_SIZE_MASK 0xffff
558#define CTD_LIST_SIZE_SHFT 0
559
560/* event */
561#define CTD_EVENT_MASK 0x0f
562#define CTD_EVENT_SHFT 0
563
564/* fid_ctl */
565#define CTD_FID2_MASK 0x03
566#define CTD_FID2_SHFT 4
567#define CTD_FID1_MASK 0x03
568#define CTD_FID1_SHFT 2
569#define CTD_FID0_MASK 0x03
570#define CTD_FID0_SHFT 0
571
572/* type_source_ctl */
573#define CTD_PKT_TYPE_MASK 0x1f
574#define CTD_PKT_TYPE_SHFT 27
575#define CTD_SOURCE_MASK 0xff
576#define CTD_SOURCE_SHFT 16
577#define CTD_CONTROL_MASK 0x0f
578#define CTD_CONTROL_SHFT 0
579
580static inline u32 ctd_pixel_line_count(int pixel_count, int line_count)
581{
582 return (pixel_count << CTD_PIXEL_COUNT_SHFT) | line_count;
583}
584
585static inline u32 ctd_set_fid_ctl(int fid0, int fid1, int fid2)
586{
587 return (fid2 << CTD_FID2_SHFT) | (fid1 << CTD_FID1_SHFT) | fid0;
588}
589
590static inline u32 ctd_type_source_ctl(int source, int control)
591{
592 return (CTD_PKT_TYPE << CTD_PKT_TYPE_SHFT) |
593 (source << CTD_SOURCE_SHFT) | control;
594}
595
596static inline u32 ctd_get_pixel_count(struct vpdma_ctd *ctd)
597{
598 return ctd->pixel_line_count >> CTD_PIXEL_COUNT_SHFT;
599}
600
601static inline int ctd_get_line_count(struct vpdma_ctd *ctd)
602{
603 return ctd->pixel_line_count & CTD_LINE_COUNT_MASK;
604}
605
606static inline int ctd_get_event(struct vpdma_ctd *ctd)
607{
608 return ctd->event & CTD_EVENT_MASK;
609}
610
611static inline int ctd_get_fid2_ctl(struct vpdma_ctd *ctd)
612{
613 return (ctd->fid_ctl >> CTD_FID2_SHFT) & CTD_FID2_MASK;
614}
615
616static inline int ctd_get_fid1_ctl(struct vpdma_ctd *ctd)
617{
618 return (ctd->fid_ctl >> CTD_FID1_SHFT) & CTD_FID1_MASK;
619}
620
621static inline int ctd_get_fid0_ctl(struct vpdma_ctd *ctd)
622{
623 return ctd->fid_ctl & CTD_FID2_MASK;
624}
625
626static inline int ctd_get_pkt_type(struct vpdma_ctd *ctd)
627{
628 return ctd->type_source_ctl >> CTD_PKT_TYPE_SHFT;
629}
630
631static inline int ctd_get_source(struct vpdma_ctd *ctd)
632{
633 return (ctd->type_source_ctl >> CTD_SOURCE_SHFT) & CTD_SOURCE_MASK;
634}
635
636static inline int ctd_get_ctl(struct vpdma_ctd *ctd)
637{
638 return ctd->type_source_ctl & CTD_CONTROL_MASK;
639}
640
641#endif
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
new file mode 100644
index 000000000000..4e58069e24ff
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -0,0 +1,2099 @@
1/*
2 * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
3 *
4 * Copyright (c) 2013 Texas Instruments Inc.
5 * David Griego, <dagriego@biglakesoftware.com>
6 * Dale Farnsworth, <dale@farnsworth.org>
7 * Archit Taneja, <archit@ti.com>
8 *
9 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
10 * Pawel Osciak, <pawel@osciak.com>
11 * Marek Szyprowski, <m.szyprowski@samsung.com>
12 *
13 * Based on the virtual v4l2-mem2mem example device
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License version 2 as published by
17 * the Free Software Foundation
18 */
19
20#include <linux/delay.h>
21#include <linux/dma-mapping.h>
22#include <linux/err.h>
23#include <linux/fs.h>
24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/ioctl.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/pm_runtime.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/videodev2.h>
33
34#include <media/v4l2-common.h>
35#include <media/v4l2-ctrls.h>
36#include <media/v4l2-device.h>
37#include <media/v4l2-event.h>
38#include <media/v4l2-ioctl.h>
39#include <media/v4l2-mem2mem.h>
40#include <media/videobuf2-core.h>
41#include <media/videobuf2-dma-contig.h>
42
43#include "vpdma.h"
44#include "vpe_regs.h"
45
46#define VPE_MODULE_NAME "vpe"
47
48/* minimum and maximum frame sizes */
49#define MIN_W 128
50#define MIN_H 128
51#define MAX_W 1920
52#define MAX_H 1080
53
54/* required alignments */
55#define S_ALIGN 0 /* multiple of 1 */
56#define H_ALIGN 1 /* multiple of 2 */
57#define W_ALIGN 1 /* multiple of 2 */
58
59/* multiple of 128 bits, line stride, 16 bytes */
60#define L_ALIGN 4
61
62/* flags that indicate a format can be used for capture/output */
63#define VPE_FMT_TYPE_CAPTURE (1 << 0)
64#define VPE_FMT_TYPE_OUTPUT (1 << 1)
65
66/* used as plane indices */
67#define VPE_MAX_PLANES 2
68#define VPE_LUMA 0
69#define VPE_CHROMA 1
70
71/* per m2m context info */
72#define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */
73
74#define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
75
76/*
77 * each VPE context can need up to 3 config desciptors, 7 input descriptors,
78 * 3 output descriptors, and 10 control descriptors
79 */
80#define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
81 13 * VPDMA_CFD_CTD_DESC_SIZE)
82
83#define vpe_dbg(vpedev, fmt, arg...) \
84 dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
85#define vpe_err(vpedev, fmt, arg...) \
86 dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
87
88struct vpe_us_coeffs {
89 unsigned short anchor_fid0_c0;
90 unsigned short anchor_fid0_c1;
91 unsigned short anchor_fid0_c2;
92 unsigned short anchor_fid0_c3;
93 unsigned short interp_fid0_c0;
94 unsigned short interp_fid0_c1;
95 unsigned short interp_fid0_c2;
96 unsigned short interp_fid0_c3;
97 unsigned short anchor_fid1_c0;
98 unsigned short anchor_fid1_c1;
99 unsigned short anchor_fid1_c2;
100 unsigned short anchor_fid1_c3;
101 unsigned short interp_fid1_c0;
102 unsigned short interp_fid1_c1;
103 unsigned short interp_fid1_c2;
104 unsigned short interp_fid1_c3;
105};
106
107/*
108 * Default upsampler coefficients
109 */
110static const struct vpe_us_coeffs us_coeffs[] = {
111 {
112 /* Coefficients for progressive input */
113 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
114 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
115 },
116 {
117 /* Coefficients for Top Field Interlaced input */
118 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3,
119 /* Coefficients for Bottom Field Interlaced input */
120 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9,
121 },
122};
123
124/*
125 * the following registers are for configuring some of the parameters of the
126 * motion and edge detection blocks inside DEI, these generally remain the same,
127 * these could be passed later via userspace if some one needs to tweak these.
128 */
129struct vpe_dei_regs {
130 unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */
131 unsigned long edi_config_reg; /* VPE_DEI_REG3 */
132 unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */
133 unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */
134 unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */
135 unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */
136};
137
138/*
139 * default expert DEI register values, unlikely to be modified.
140 */
141static const struct vpe_dei_regs dei_regs = {
142 0x020C0804u,
143 0x0118100Fu,
144 0x08040200u,
145 0x1010100Cu,
146 0x10101010u,
147 0x10101010u,
148};
149
150/*
151 * The port_data structure contains per-port data.
152 */
153struct vpe_port_data {
154 enum vpdma_channel channel; /* VPDMA channel */
155 u8 vb_index; /* input frame f, f-1, f-2 index */
156 u8 vb_part; /* plane index for co-panar formats */
157};
158
159/*
160 * Define indices into the port_data tables
161 */
162#define VPE_PORT_LUMA1_IN 0
163#define VPE_PORT_CHROMA1_IN 1
164#define VPE_PORT_LUMA2_IN 2
165#define VPE_PORT_CHROMA2_IN 3
166#define VPE_PORT_LUMA3_IN 4
167#define VPE_PORT_CHROMA3_IN 5
168#define VPE_PORT_MV_IN 6
169#define VPE_PORT_MV_OUT 7
170#define VPE_PORT_LUMA_OUT 8
171#define VPE_PORT_CHROMA_OUT 9
172#define VPE_PORT_RGB_OUT 10
173
174static const struct vpe_port_data port_data[11] = {
175 [VPE_PORT_LUMA1_IN] = {
176 .channel = VPE_CHAN_LUMA1_IN,
177 .vb_index = 0,
178 .vb_part = VPE_LUMA,
179 },
180 [VPE_PORT_CHROMA1_IN] = {
181 .channel = VPE_CHAN_CHROMA1_IN,
182 .vb_index = 0,
183 .vb_part = VPE_CHROMA,
184 },
185 [VPE_PORT_LUMA2_IN] = {
186 .channel = VPE_CHAN_LUMA2_IN,
187 .vb_index = 1,
188 .vb_part = VPE_LUMA,
189 },
190 [VPE_PORT_CHROMA2_IN] = {
191 .channel = VPE_CHAN_CHROMA2_IN,
192 .vb_index = 1,
193 .vb_part = VPE_CHROMA,
194 },
195 [VPE_PORT_LUMA3_IN] = {
196 .channel = VPE_CHAN_LUMA3_IN,
197 .vb_index = 2,
198 .vb_part = VPE_LUMA,
199 },
200 [VPE_PORT_CHROMA3_IN] = {
201 .channel = VPE_CHAN_CHROMA3_IN,
202 .vb_index = 2,
203 .vb_part = VPE_CHROMA,
204 },
205 [VPE_PORT_MV_IN] = {
206 .channel = VPE_CHAN_MV_IN,
207 },
208 [VPE_PORT_MV_OUT] = {
209 .channel = VPE_CHAN_MV_OUT,
210 },
211 [VPE_PORT_LUMA_OUT] = {
212 .channel = VPE_CHAN_LUMA_OUT,
213 .vb_part = VPE_LUMA,
214 },
215 [VPE_PORT_CHROMA_OUT] = {
216 .channel = VPE_CHAN_CHROMA_OUT,
217 .vb_part = VPE_CHROMA,
218 },
219 [VPE_PORT_RGB_OUT] = {
220 .channel = VPE_CHAN_RGB_OUT,
221 .vb_part = VPE_LUMA,
222 },
223};
224
225
226/* driver info for each of the supported video formats */
227struct vpe_fmt {
228 char *name; /* human-readable name */
229 u32 fourcc; /* standard format identifier */
230 u8 types; /* CAPTURE and/or OUTPUT */
231 u8 coplanar; /* set for unpacked Luma and Chroma */
232 /* vpdma format info for each plane */
233 struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES];
234};
235
236static struct vpe_fmt vpe_formats[] = {
237 {
238 .name = "YUV 422 co-planar",
239 .fourcc = V4L2_PIX_FMT_NV16,
240 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
241 .coplanar = 1,
242 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444],
243 &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444],
244 },
245 },
246 {
247 .name = "YUV 420 co-planar",
248 .fourcc = V4L2_PIX_FMT_NV12,
249 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
250 .coplanar = 1,
251 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
252 &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
253 },
254 },
255 {
256 .name = "YUYV 422 packed",
257 .fourcc = V4L2_PIX_FMT_YUYV,
258 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
259 .coplanar = 0,
260 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422],
261 },
262 },
263 {
264 .name = "UYVY 422 packed",
265 .fourcc = V4L2_PIX_FMT_UYVY,
266 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
267 .coplanar = 0,
268 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
269 },
270 },
271};
272
273/*
274 * per-queue, driver-specific private data.
275 * there is one source queue and one destination queue for each m2m context.
276 */
277struct vpe_q_data {
278 unsigned int width; /* frame width */
279 unsigned int height; /* frame height */
280 unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
281 enum v4l2_colorspace colorspace;
282 enum v4l2_field field; /* supported field value */
283 unsigned int flags;
284 unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
285 struct v4l2_rect c_rect; /* crop/compose rectangle */
286 struct vpe_fmt *fmt; /* format info */
287};
288
289/* vpe_q_data flag bits */
290#define Q_DATA_FRAME_1D (1 << 0)
291#define Q_DATA_MODE_TILED (1 << 1)
292#define Q_DATA_INTERLACED (1 << 2)
293
294enum {
295 Q_DATA_SRC = 0,
296 Q_DATA_DST = 1,
297};
298
299/* find our format description corresponding to the passed v4l2_format */
300static struct vpe_fmt *find_format(struct v4l2_format *f)
301{
302 struct vpe_fmt *fmt;
303 unsigned int k;
304
305 for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
306 fmt = &vpe_formats[k];
307 if (fmt->fourcc == f->fmt.pix.pixelformat)
308 return fmt;
309 }
310
311 return NULL;
312}
313
314/*
315 * there is one vpe_dev structure in the driver, it is shared by
316 * all instances.
317 */
318struct vpe_dev {
319 struct v4l2_device v4l2_dev;
320 struct video_device vfd;
321 struct v4l2_m2m_dev *m2m_dev;
322
323 atomic_t num_instances; /* count of driver instances */
324 dma_addr_t loaded_mmrs; /* shadow mmrs in device */
325 struct mutex dev_mutex;
326 spinlock_t lock;
327
328 int irq;
329 void __iomem *base;
330
331 struct vb2_alloc_ctx *alloc_ctx;
332 struct vpdma_data *vpdma; /* vpdma data handle */
333};
334
335/*
336 * There is one vpe_ctx structure for each m2m context.
337 */
338struct vpe_ctx {
339 struct v4l2_fh fh;
340 struct vpe_dev *dev;
341 struct v4l2_m2m_ctx *m2m_ctx;
342 struct v4l2_ctrl_handler hdl;
343
344 unsigned int field; /* current field */
345 unsigned int sequence; /* current frame/field seq */
346 unsigned int aborting; /* abort after next irq */
347
348 unsigned int bufs_per_job; /* input buffers per batch */
349 unsigned int bufs_completed; /* bufs done in this batch */
350
351 struct vpe_q_data q_data[2]; /* src & dst queue data */
352 struct vb2_buffer *src_vbs[VPE_MAX_SRC_BUFS];
353 struct vb2_buffer *dst_vb;
354
355 dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */
356 void *mv_buf[2]; /* virtual addrs of motion vector bufs */
357 size_t mv_buf_size; /* current motion vector buffer size */
358 struct vpdma_buf mmr_adb; /* shadow reg addr/data block */
359 struct vpdma_desc_list desc_list; /* DMA descriptor list */
360
361 bool deinterlacing; /* using de-interlacer */
362 bool load_mmrs; /* have new shadow reg values */
363
364 unsigned int src_mv_buf_selector;
365};
366
367
368/*
369 * M2M devices get 2 queues.
370 * Return the queue given the type.
371 */
372static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
373 enum v4l2_buf_type type)
374{
375 switch (type) {
376 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
377 return &ctx->q_data[Q_DATA_SRC];
378 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
379 return &ctx->q_data[Q_DATA_DST];
380 default:
381 BUG();
382 }
383 return NULL;
384}
385
386static u32 read_reg(struct vpe_dev *dev, int offset)
387{
388 return ioread32(dev->base + offset);
389}
390
391static void write_reg(struct vpe_dev *dev, int offset, u32 value)
392{
393 iowrite32(value, dev->base + offset);
394}
395
396/* register field read/write helpers */
397static int get_field(u32 value, u32 mask, int shift)
398{
399 return (value & (mask << shift)) >> shift;
400}
401
402static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift)
403{
404 return get_field(read_reg(dev, offset), mask, shift);
405}
406
407static void write_field(u32 *valp, u32 field, u32 mask, int shift)
408{
409 u32 val = *valp;
410
411 val &= ~(mask << shift);
412 val |= (field & mask) << shift;
413 *valp = val;
414}
415
416static void write_field_reg(struct vpe_dev *dev, int offset, u32 field,
417 u32 mask, int shift)
418{
419 u32 val = read_reg(dev, offset);
420
421 write_field(&val, field, mask, shift);
422
423 write_reg(dev, offset, val);
424}
425
426/*
427 * DMA address/data block for the shadow registers
428 */
429struct vpe_mmr_adb {
430 struct vpdma_adb_hdr out_fmt_hdr;
431 u32 out_fmt_reg[1];
432 u32 out_fmt_pad[3];
433 struct vpdma_adb_hdr us1_hdr;
434 u32 us1_regs[8];
435 struct vpdma_adb_hdr us2_hdr;
436 u32 us2_regs[8];
437 struct vpdma_adb_hdr us3_hdr;
438 u32 us3_regs[8];
439 struct vpdma_adb_hdr dei_hdr;
440 u32 dei_regs[8];
441 struct vpdma_adb_hdr sc_hdr;
442 u32 sc_regs[1];
443 u32 sc_pad[3];
444 struct vpdma_adb_hdr csc_hdr;
445 u32 csc_regs[6];
446 u32 csc_pad[2];
447};
448
449#define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
450 VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
451/*
452 * Set the headers for all of the address/data block structures.
453 */
454static void init_adb_hdrs(struct vpe_ctx *ctx)
455{
456 VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT);
457 VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0);
458 VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
459 VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
460 VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
461 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr, sc_regs, VPE_SC_MP_SC0);
462 VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, VPE_CSC_CSC00);
463};
464
465/*
466 * Allocate or re-allocate the motion vector DMA buffers
467 * There are two buffers, one for input and one for output.
468 * However, the roles are reversed after each field is processed.
469 * In other words, after each field is processed, the previous
470 * output (dst) MV buffer becomes the new input (src) MV buffer.
471 */
472static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size)
473{
474 struct device *dev = ctx->dev->v4l2_dev.dev;
475
476 if (ctx->mv_buf_size == size)
477 return 0;
478
479 if (ctx->mv_buf[0])
480 dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0],
481 ctx->mv_buf_dma[0]);
482
483 if (ctx->mv_buf[1])
484 dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1],
485 ctx->mv_buf_dma[1]);
486
487 if (size == 0)
488 return 0;
489
490 ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0],
491 GFP_KERNEL);
492 if (!ctx->mv_buf[0]) {
493 vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
494 return -ENOMEM;
495 }
496
497 ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1],
498 GFP_KERNEL);
499 if (!ctx->mv_buf[1]) {
500 vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
501 dma_free_coherent(dev, size, ctx->mv_buf[0],
502 ctx->mv_buf_dma[0]);
503
504 return -ENOMEM;
505 }
506
507 ctx->mv_buf_size = size;
508 ctx->src_mv_buf_selector = 0;
509
510 return 0;
511}
512
513static void free_mv_buffers(struct vpe_ctx *ctx)
514{
515 realloc_mv_buffers(ctx, 0);
516}
517
518/*
519 * While de-interlacing, we keep the two most recent input buffers
520 * around. This function frees those two buffers when we have
521 * finished processing the current stream.
522 */
523static void free_vbs(struct vpe_ctx *ctx)
524{
525 struct vpe_dev *dev = ctx->dev;
526 unsigned long flags;
527
528 if (ctx->src_vbs[2] == NULL)
529 return;
530
531 spin_lock_irqsave(&dev->lock, flags);
532 if (ctx->src_vbs[2]) {
533 v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
534 v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
535 }
536 spin_unlock_irqrestore(&dev->lock, flags);
537}
538
539/*
540 * Enable or disable the VPE clocks
541 */
542static void vpe_set_clock_enable(struct vpe_dev *dev, bool on)
543{
544 u32 val = 0;
545
546 if (on)
547 val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE;
548 write_reg(dev, VPE_CLK_ENABLE, val);
549}
550
551static void vpe_top_reset(struct vpe_dev *dev)
552{
553
554 write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK,
555 VPE_DATA_PATH_CLK_RESET_SHIFT);
556
557 usleep_range(100, 150);
558
559 write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK,
560 VPE_DATA_PATH_CLK_RESET_SHIFT);
561}
562
563static void vpe_top_vpdma_reset(struct vpe_dev *dev)
564{
565 write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK,
566 VPE_VPDMA_CLK_RESET_SHIFT);
567
568 usleep_range(100, 150);
569
570 write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK,
571 VPE_VPDMA_CLK_RESET_SHIFT);
572}
573
574/*
575 * Load the correct of upsampler coefficients into the shadow MMRs
576 */
577static void set_us_coefficients(struct vpe_ctx *ctx)
578{
579 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
580 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
581 u32 *us1_reg = &mmr_adb->us1_regs[0];
582 u32 *us2_reg = &mmr_adb->us2_regs[0];
583 u32 *us3_reg = &mmr_adb->us3_regs[0];
584 const unsigned short *cp, *end_cp;
585
586 cp = &us_coeffs[0].anchor_fid0_c0;
587
588 if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */
589 cp += sizeof(us_coeffs[0]) / sizeof(*cp);
590
591 end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
592
593 while (cp < end_cp) {
594 write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT);
595 write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT);
596 *us2_reg++ = *us1_reg;
597 *us3_reg++ = *us1_reg++;
598 }
599 ctx->load_mmrs = true;
600}
601
602/*
603 * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
604 */
605static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
606{
607 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
608 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
609 u32 *us1_reg0 = &mmr_adb->us1_regs[0];
610 u32 *us2_reg0 = &mmr_adb->us2_regs[0];
611 u32 *us3_reg0 = &mmr_adb->us3_regs[0];
612 int line_mode = 1;
613 int cfg_mode = 1;
614
615 /*
616 * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
617 * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
618 */
619
620 if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
621 cfg_mode = 0;
622 line_mode = 0; /* double lines to line buffer */
623 }
624
625 write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
626 write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
627 write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
628
629 /* regs for now */
630 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
631 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
632 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN);
633
634 /* frame start for input luma */
635 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
636 VPE_CHAN_LUMA1_IN);
637 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
638 VPE_CHAN_LUMA2_IN);
639 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
640 VPE_CHAN_LUMA3_IN);
641
642 /* frame start for input chroma */
643 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
644 VPE_CHAN_CHROMA1_IN);
645 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
646 VPE_CHAN_CHROMA2_IN);
647 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
648 VPE_CHAN_CHROMA3_IN);
649
650 /* frame start for MV in client */
651 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
652 VPE_CHAN_MV_IN);
653
654 ctx->load_mmrs = true;
655}
656
657/*
658 * Set the shadow registers that are modified when the source
659 * format changes.
660 */
661static void set_src_registers(struct vpe_ctx *ctx)
662{
663 set_us_coefficients(ctx);
664}
665
666/*
667 * Set the shadow registers that are modified when the destination
668 * format changes.
669 */
670static void set_dst_registers(struct vpe_ctx *ctx)
671{
672 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
673 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
674 u32 val = 0;
675
676 /* select RGB path when color space conversion is supported in future */
677 if (fmt->fourcc == V4L2_PIX_FMT_RGB24)
678 val |= VPE_RGB_OUT_SELECT | VPE_CSC_SRC_DEI_SCALER;
679 else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
680 val |= VPE_COLOR_SEPARATE_422;
681
682 /* The source of CHR_DS is always the scaler, whether it's used or not */
683 val |= VPE_DS_SRC_DEI_SCALER;
684
685 if (fmt->fourcc != V4L2_PIX_FMT_NV12)
686 val |= VPE_DS_BYPASS;
687
688 mmr_adb->out_fmt_reg[0] = val;
689
690 ctx->load_mmrs = true;
691}
692
693/*
694 * Set the de-interlacer shadow register values
695 */
696static void set_dei_regs(struct vpe_ctx *ctx)
697{
698 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
699 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
700 unsigned int src_h = s_q_data->c_rect.height;
701 unsigned int src_w = s_q_data->c_rect.width;
702 u32 *dei_mmr0 = &mmr_adb->dei_regs[0];
703 bool deinterlace = true;
704 u32 val = 0;
705
706 /*
707 * according to TRM, we should set DEI in progressive bypass mode when
708 * the input content is progressive, however, DEI is bypassed correctly
709 * for both progressive and interlace content in interlace bypass mode.
710 * It has been recommended not to use progressive bypass mode.
711 */
712 if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) ||
713 !(s_q_data->flags & Q_DATA_INTERLACED)) {
714 deinterlace = false;
715 val = VPE_DEI_INTERLACE_BYPASS;
716 }
717
718 src_h = deinterlace ? src_h * 2 : src_h;
719
720 val |= (src_h << VPE_DEI_HEIGHT_SHIFT) |
721 (src_w << VPE_DEI_WIDTH_SHIFT) |
722 VPE_DEI_FIELD_FLUSH;
723
724 *dei_mmr0 = val;
725
726 ctx->load_mmrs = true;
727}
728
729static void set_dei_shadow_registers(struct vpe_ctx *ctx)
730{
731 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
732 u32 *dei_mmr = &mmr_adb->dei_regs[0];
733 const struct vpe_dei_regs *cur = &dei_regs;
734
735 dei_mmr[2] = cur->mdt_spacial_freq_thr_reg;
736 dei_mmr[3] = cur->edi_config_reg;
737 dei_mmr[4] = cur->edi_lut_reg0;
738 dei_mmr[5] = cur->edi_lut_reg1;
739 dei_mmr[6] = cur->edi_lut_reg2;
740 dei_mmr[7] = cur->edi_lut_reg3;
741
742 ctx->load_mmrs = true;
743}
744
745static void set_csc_coeff_bypass(struct vpe_ctx *ctx)
746{
747 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
748 u32 *shadow_csc_reg5 = &mmr_adb->csc_regs[5];
749
750 *shadow_csc_reg5 |= VPE_CSC_BYPASS;
751
752 ctx->load_mmrs = true;
753}
754
755static void set_sc_regs_bypass(struct vpe_ctx *ctx)
756{
757 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
758 u32 *sc_reg0 = &mmr_adb->sc_regs[0];
759 u32 val = 0;
760
761 val |= VPE_SC_BYPASS;
762 *sc_reg0 = val;
763
764 ctx->load_mmrs = true;
765}
766
767/*
768 * Set the shadow registers whose values are modified when either the
769 * source or destination format is changed.
770 */
771static int set_srcdst_params(struct vpe_ctx *ctx)
772{
773 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
774 struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
775 size_t mv_buf_size;
776 int ret;
777
778 ctx->sequence = 0;
779 ctx->field = V4L2_FIELD_TOP;
780
781 if ((s_q_data->flags & Q_DATA_INTERLACED) &&
782 !(d_q_data->flags & Q_DATA_INTERLACED)) {
783 const struct vpdma_data_format *mv =
784 &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
785
786 ctx->deinterlacing = 1;
787 mv_buf_size =
788 (s_q_data->width * s_q_data->height * mv->depth) >> 3;
789 } else {
790 ctx->deinterlacing = 0;
791 mv_buf_size = 0;
792 }
793
794 free_vbs(ctx);
795
796 ret = realloc_mv_buffers(ctx, mv_buf_size);
797 if (ret)
798 return ret;
799
800 set_cfg_and_line_modes(ctx);
801 set_dei_regs(ctx);
802 set_csc_coeff_bypass(ctx);
803 set_sc_regs_bypass(ctx);
804
805 return 0;
806}
807
808/*
809 * Return the vpe_ctx structure for a given struct file
810 */
811static struct vpe_ctx *file2ctx(struct file *file)
812{
813 return container_of(file->private_data, struct vpe_ctx, fh);
814}
815
816/*
817 * mem2mem callbacks
818 */
819
820/**
821 * job_ready() - check whether an instance is ready to be scheduled to run
822 */
823static int job_ready(void *priv)
824{
825 struct vpe_ctx *ctx = priv;
826 int needed = ctx->bufs_per_job;
827
828 if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
829 needed += 2; /* need additional two most recent fields */
830
831 if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
832 return 0;
833
834 return 1;
835}
836
837static void job_abort(void *priv)
838{
839 struct vpe_ctx *ctx = priv;
840
841 /* Will cancel the transaction in the next interrupt handler */
842 ctx->aborting = 1;
843}
844
845/*
846 * Lock access to the device
847 */
848static void vpe_lock(void *priv)
849{
850 struct vpe_ctx *ctx = priv;
851 struct vpe_dev *dev = ctx->dev;
852 mutex_lock(&dev->dev_mutex);
853}
854
855static void vpe_unlock(void *priv)
856{
857 struct vpe_ctx *ctx = priv;
858 struct vpe_dev *dev = ctx->dev;
859 mutex_unlock(&dev->dev_mutex);
860}
861
862static void vpe_dump_regs(struct vpe_dev *dev)
863{
864#define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
865
866 vpe_dbg(dev, "VPE Registers:\n");
867
868 DUMPREG(PID);
869 DUMPREG(SYSCONFIG);
870 DUMPREG(INT0_STATUS0_RAW);
871 DUMPREG(INT0_STATUS0);
872 DUMPREG(INT0_ENABLE0);
873 DUMPREG(INT0_STATUS1_RAW);
874 DUMPREG(INT0_STATUS1);
875 DUMPREG(INT0_ENABLE1);
876 DUMPREG(CLK_ENABLE);
877 DUMPREG(CLK_RESET);
878 DUMPREG(CLK_FORMAT_SELECT);
879 DUMPREG(CLK_RANGE_MAP);
880 DUMPREG(US1_R0);
881 DUMPREG(US1_R1);
882 DUMPREG(US1_R2);
883 DUMPREG(US1_R3);
884 DUMPREG(US1_R4);
885 DUMPREG(US1_R5);
886 DUMPREG(US1_R6);
887 DUMPREG(US1_R7);
888 DUMPREG(US2_R0);
889 DUMPREG(US2_R1);
890 DUMPREG(US2_R2);
891 DUMPREG(US2_R3);
892 DUMPREG(US2_R4);
893 DUMPREG(US2_R5);
894 DUMPREG(US2_R6);
895 DUMPREG(US2_R7);
896 DUMPREG(US3_R0);
897 DUMPREG(US3_R1);
898 DUMPREG(US3_R2);
899 DUMPREG(US3_R3);
900 DUMPREG(US3_R4);
901 DUMPREG(US3_R5);
902 DUMPREG(US3_R6);
903 DUMPREG(US3_R7);
904 DUMPREG(DEI_FRAME_SIZE);
905 DUMPREG(MDT_BYPASS);
906 DUMPREG(MDT_SF_THRESHOLD);
907 DUMPREG(EDI_CONFIG);
908 DUMPREG(DEI_EDI_LUT_R0);
909 DUMPREG(DEI_EDI_LUT_R1);
910 DUMPREG(DEI_EDI_LUT_R2);
911 DUMPREG(DEI_EDI_LUT_R3);
912 DUMPREG(DEI_FMD_WINDOW_R0);
913 DUMPREG(DEI_FMD_WINDOW_R1);
914 DUMPREG(DEI_FMD_CONTROL_R0);
915 DUMPREG(DEI_FMD_CONTROL_R1);
916 DUMPREG(DEI_FMD_STATUS_R0);
917 DUMPREG(DEI_FMD_STATUS_R1);
918 DUMPREG(DEI_FMD_STATUS_R2);
919 DUMPREG(SC_MP_SC0);
920 DUMPREG(SC_MP_SC1);
921 DUMPREG(SC_MP_SC2);
922 DUMPREG(SC_MP_SC3);
923 DUMPREG(SC_MP_SC4);
924 DUMPREG(SC_MP_SC5);
925 DUMPREG(SC_MP_SC6);
926 DUMPREG(SC_MP_SC8);
927 DUMPREG(SC_MP_SC9);
928 DUMPREG(SC_MP_SC10);
929 DUMPREG(SC_MP_SC11);
930 DUMPREG(SC_MP_SC12);
931 DUMPREG(SC_MP_SC13);
932 DUMPREG(SC_MP_SC17);
933 DUMPREG(SC_MP_SC18);
934 DUMPREG(SC_MP_SC19);
935 DUMPREG(SC_MP_SC20);
936 DUMPREG(SC_MP_SC21);
937 DUMPREG(SC_MP_SC22);
938 DUMPREG(SC_MP_SC23);
939 DUMPREG(SC_MP_SC24);
940 DUMPREG(SC_MP_SC25);
941 DUMPREG(CSC_CSC00);
942 DUMPREG(CSC_CSC01);
943 DUMPREG(CSC_CSC02);
944 DUMPREG(CSC_CSC03);
945 DUMPREG(CSC_CSC04);
946 DUMPREG(CSC_CSC05);
947#undef DUMPREG
948}
949
950static void add_out_dtd(struct vpe_ctx *ctx, int port)
951{
952 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
953 const struct vpe_port_data *p_data = &port_data[port];
954 struct vb2_buffer *vb = ctx->dst_vb;
955 struct v4l2_rect *c_rect = &q_data->c_rect;
956 struct vpe_fmt *fmt = q_data->fmt;
957 const struct vpdma_data_format *vpdma_fmt;
958 int mv_buf_selector = !ctx->src_mv_buf_selector;
959 dma_addr_t dma_addr;
960 u32 flags = 0;
961
962 if (port == VPE_PORT_MV_OUT) {
963 vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
964 dma_addr = ctx->mv_buf_dma[mv_buf_selector];
965 } else {
966 /* to incorporate interleaved formats */
967 int plane = fmt->coplanar ? p_data->vb_part : 0;
968
969 vpdma_fmt = fmt->vpdma_fmt[plane];
970 dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
971 if (!dma_addr) {
972 vpe_err(ctx->dev,
973 "acquiring output buffer(%d) dma_addr failed\n",
974 port);
975 return;
976 }
977 }
978
979 if (q_data->flags & Q_DATA_FRAME_1D)
980 flags |= VPDMA_DATA_FRAME_1D;
981 if (q_data->flags & Q_DATA_MODE_TILED)
982 flags |= VPDMA_DATA_MODE_TILED;
983
984 vpdma_add_out_dtd(&ctx->desc_list, c_rect, vpdma_fmt, dma_addr,
985 p_data->channel, flags);
986}
987
988static void add_in_dtd(struct vpe_ctx *ctx, int port)
989{
990 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
991 const struct vpe_port_data *p_data = &port_data[port];
992 struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index];
993 struct v4l2_rect *c_rect = &q_data->c_rect;
994 struct vpe_fmt *fmt = q_data->fmt;
995 const struct vpdma_data_format *vpdma_fmt;
996 int mv_buf_selector = ctx->src_mv_buf_selector;
997 int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM;
998 dma_addr_t dma_addr;
999 u32 flags = 0;
1000
1001 if (port == VPE_PORT_MV_IN) {
1002 vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
1003 dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1004 } else {
1005 /* to incorporate interleaved formats */
1006 int plane = fmt->coplanar ? p_data->vb_part : 0;
1007
1008 vpdma_fmt = fmt->vpdma_fmt[plane];
1009
1010 dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1011 if (!dma_addr) {
1012 vpe_err(ctx->dev,
1013 "acquiring input buffer(%d) dma_addr failed\n",
1014 port);
1015 return;
1016 }
1017 }
1018
1019 if (q_data->flags & Q_DATA_FRAME_1D)
1020 flags |= VPDMA_DATA_FRAME_1D;
1021 if (q_data->flags & Q_DATA_MODE_TILED)
1022 flags |= VPDMA_DATA_MODE_TILED;
1023
1024 vpdma_add_in_dtd(&ctx->desc_list, q_data->width, q_data->height,
1025 c_rect, vpdma_fmt, dma_addr, p_data->channel, field, flags);
1026}
1027
1028/*
1029 * Enable the expected IRQ sources
1030 */
1031static void enable_irqs(struct vpe_ctx *ctx)
1032{
1033 write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE);
1034 write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
1035 VPE_DS1_UV_ERROR_INT);
1036
1037 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
1038}
1039
1040static void disable_irqs(struct vpe_ctx *ctx)
1041{
1042 write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
1043 write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
1044
1045 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
1046}
1047
1048/* device_run() - prepares and starts the device
1049 *
1050 * This function is only called when both the source and destination
1051 * buffers are in place.
1052 */
1053static void device_run(void *priv)
1054{
1055 struct vpe_ctx *ctx = priv;
1056 struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
1057
1058 if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
1059 ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
1060 WARN_ON(ctx->src_vbs[2] == NULL);
1061 ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
1062 WARN_ON(ctx->src_vbs[1] == NULL);
1063 }
1064
1065 ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
1066 WARN_ON(ctx->src_vbs[0] == NULL);
1067 ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
1068 WARN_ON(ctx->dst_vb == NULL);
1069
1070 /* config descriptors */
1071 if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
1072 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
1073 vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
1074 ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
1075 ctx->load_mmrs = false;
1076 }
1077
1078 /* output data descriptors */
1079 if (ctx->deinterlacing)
1080 add_out_dtd(ctx, VPE_PORT_MV_OUT);
1081
1082 add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
1083 if (d_q_data->fmt->coplanar)
1084 add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
1085
1086 /* input data descriptors */
1087 if (ctx->deinterlacing) {
1088 add_in_dtd(ctx, VPE_PORT_LUMA3_IN);
1089 add_in_dtd(ctx, VPE_PORT_CHROMA3_IN);
1090
1091 add_in_dtd(ctx, VPE_PORT_LUMA2_IN);
1092 add_in_dtd(ctx, VPE_PORT_CHROMA2_IN);
1093 }
1094
1095 add_in_dtd(ctx, VPE_PORT_LUMA1_IN);
1096 add_in_dtd(ctx, VPE_PORT_CHROMA1_IN);
1097
1098 if (ctx->deinterlacing)
1099 add_in_dtd(ctx, VPE_PORT_MV_IN);
1100
1101 /* sync on channel control descriptors for input ports */
1102 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN);
1103 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN);
1104
1105 if (ctx->deinterlacing) {
1106 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1107 VPE_CHAN_LUMA2_IN);
1108 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1109 VPE_CHAN_CHROMA2_IN);
1110
1111 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1112 VPE_CHAN_LUMA3_IN);
1113 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1114 VPE_CHAN_CHROMA3_IN);
1115
1116 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN);
1117 }
1118
1119 /* sync on channel control descriptors for output ports */
1120 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT);
1121 if (d_q_data->fmt->coplanar)
1122 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT);
1123
1124 if (ctx->deinterlacing)
1125 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
1126
1127 enable_irqs(ctx);
1128
1129 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
1130 vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
1131}
1132
1133static void dei_error(struct vpe_ctx *ctx)
1134{
1135 dev_warn(ctx->dev->v4l2_dev.dev,
1136 "received DEI error interrupt\n");
1137}
1138
1139static void ds1_uv_error(struct vpe_ctx *ctx)
1140{
1141 dev_warn(ctx->dev->v4l2_dev.dev,
1142 "received downsampler error interrupt\n");
1143}
1144
1145static irqreturn_t vpe_irq(int irq_vpe, void *data)
1146{
1147 struct vpe_dev *dev = (struct vpe_dev *)data;
1148 struct vpe_ctx *ctx;
1149 struct vpe_q_data *d_q_data;
1150 struct vb2_buffer *s_vb, *d_vb;
1151 struct v4l2_buffer *s_buf, *d_buf;
1152 unsigned long flags;
1153 u32 irqst0, irqst1;
1154
1155 irqst0 = read_reg(dev, VPE_INT0_STATUS0);
1156 if (irqst0) {
1157 write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0);
1158 vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0);
1159 }
1160
1161 irqst1 = read_reg(dev, VPE_INT0_STATUS1);
1162 if (irqst1) {
1163 write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1);
1164 vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1);
1165 }
1166
1167 ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
1168 if (!ctx) {
1169 vpe_err(dev, "instance released before end of transaction\n");
1170 goto handled;
1171 }
1172
1173 if (irqst1) {
1174 if (irqst1 & VPE_DEI_ERROR_INT) {
1175 irqst1 &= ~VPE_DEI_ERROR_INT;
1176 dei_error(ctx);
1177 }
1178 if (irqst1 & VPE_DS1_UV_ERROR_INT) {
1179 irqst1 &= ~VPE_DS1_UV_ERROR_INT;
1180 ds1_uv_error(ctx);
1181 }
1182 }
1183
1184 if (irqst0) {
1185 if (irqst0 & VPE_INT0_LIST0_COMPLETE)
1186 vpdma_clear_list_stat(ctx->dev->vpdma);
1187
1188 irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
1189 }
1190
1191 if (irqst0 | irqst1) {
1192 dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: "
1193 "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
1194 irqst0, irqst1);
1195 }
1196
1197 disable_irqs(ctx);
1198
1199 vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
1200 vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
1201
1202 vpdma_reset_desc_list(&ctx->desc_list);
1203
1204 /* the previous dst mv buffer becomes the next src mv buffer */
1205 ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
1206
1207 if (ctx->aborting)
1208 goto finished;
1209
1210 s_vb = ctx->src_vbs[0];
1211 d_vb = ctx->dst_vb;
1212 s_buf = &s_vb->v4l2_buf;
1213 d_buf = &d_vb->v4l2_buf;
1214
1215 d_buf->timestamp = s_buf->timestamp;
1216 if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) {
1217 d_buf->flags |= V4L2_BUF_FLAG_TIMECODE;
1218 d_buf->timecode = s_buf->timecode;
1219 }
1220 d_buf->sequence = ctx->sequence;
1221 d_buf->field = ctx->field;
1222
1223 d_q_data = &ctx->q_data[Q_DATA_DST];
1224 if (d_q_data->flags & Q_DATA_INTERLACED) {
1225 if (ctx->field == V4L2_FIELD_BOTTOM) {
1226 ctx->sequence++;
1227 ctx->field = V4L2_FIELD_TOP;
1228 } else {
1229 WARN_ON(ctx->field != V4L2_FIELD_TOP);
1230 ctx->field = V4L2_FIELD_BOTTOM;
1231 }
1232 } else {
1233 ctx->sequence++;
1234 }
1235
1236 if (ctx->deinterlacing)
1237 s_vb = ctx->src_vbs[2];
1238
1239 spin_lock_irqsave(&dev->lock, flags);
1240 v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
1241 v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
1242 spin_unlock_irqrestore(&dev->lock, flags);
1243
1244 if (ctx->deinterlacing) {
1245 ctx->src_vbs[2] = ctx->src_vbs[1];
1246 ctx->src_vbs[1] = ctx->src_vbs[0];
1247 }
1248
1249 ctx->bufs_completed++;
1250 if (ctx->bufs_completed < ctx->bufs_per_job) {
1251 device_run(ctx);
1252 goto handled;
1253 }
1254
1255finished:
1256 vpe_dbg(ctx->dev, "finishing transaction\n");
1257 ctx->bufs_completed = 0;
1258 v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
1259handled:
1260 return IRQ_HANDLED;
1261}
1262
1263/*
1264 * video ioctls
1265 */
1266static int vpe_querycap(struct file *file, void *priv,
1267 struct v4l2_capability *cap)
1268{
1269 strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
1270 strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
1271 strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info));
1272 cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
1273 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1274 return 0;
1275}
1276
1277static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
1278{
1279 int i, index;
1280 struct vpe_fmt *fmt = NULL;
1281
1282 index = 0;
1283 for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) {
1284 if (vpe_formats[i].types & type) {
1285 if (index == f->index) {
1286 fmt = &vpe_formats[i];
1287 break;
1288 }
1289 index++;
1290 }
1291 }
1292
1293 if (!fmt)
1294 return -EINVAL;
1295
1296 strncpy(f->description, fmt->name, sizeof(f->description) - 1);
1297 f->pixelformat = fmt->fourcc;
1298 return 0;
1299}
1300
1301static int vpe_enum_fmt(struct file *file, void *priv,
1302 struct v4l2_fmtdesc *f)
1303{
1304 if (V4L2_TYPE_IS_OUTPUT(f->type))
1305 return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT);
1306
1307 return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE);
1308}
1309
1310static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1311{
1312 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1313 struct vpe_ctx *ctx = file2ctx(file);
1314 struct vb2_queue *vq;
1315 struct vpe_q_data *q_data;
1316 int i;
1317
1318 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
1319 if (!vq)
1320 return -EINVAL;
1321
1322 q_data = get_q_data(ctx, f->type);
1323
1324 pix->width = q_data->width;
1325 pix->height = q_data->height;
1326 pix->pixelformat = q_data->fmt->fourcc;
1327 pix->field = q_data->field;
1328
1329 if (V4L2_TYPE_IS_OUTPUT(f->type)) {
1330 pix->colorspace = q_data->colorspace;
1331 } else {
1332 struct vpe_q_data *s_q_data;
1333
1334 /* get colorspace from the source queue */
1335 s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
1336
1337 pix->colorspace = s_q_data->colorspace;
1338 }
1339
1340 pix->num_planes = q_data->fmt->coplanar ? 2 : 1;
1341
1342 for (i = 0; i < pix->num_planes; i++) {
1343 pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1344 pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1345 }
1346
1347 return 0;
1348}
1349
1350static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
1351 struct vpe_fmt *fmt, int type)
1352{
1353 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1354 struct v4l2_plane_pix_format *plane_fmt;
1355 int i;
1356
1357 if (!fmt || !(fmt->types & type)) {
1358 vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
1359 pix->pixelformat);
1360 return -EINVAL;
1361 }
1362
1363 if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
1364 pix->field = V4L2_FIELD_NONE;
1365
1366 v4l_bound_align_image(&pix->width, MIN_W, MAX_W, W_ALIGN,
1367 &pix->height, MIN_H, MAX_H, H_ALIGN,
1368 S_ALIGN);
1369
1370 pix->num_planes = fmt->coplanar ? 2 : 1;
1371 pix->pixelformat = fmt->fourcc;
1372
1373 if (type == VPE_FMT_TYPE_CAPTURE) {
1374 struct vpe_q_data *s_q_data;
1375
1376 /* get colorspace from the source queue */
1377 s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
1378
1379 pix->colorspace = s_q_data->colorspace;
1380 } else {
1381 if (!pix->colorspace)
1382 pix->colorspace = V4L2_COLORSPACE_SMPTE240M;
1383 }
1384
1385 for (i = 0; i < pix->num_planes; i++) {
1386 int depth;
1387
1388 plane_fmt = &pix->plane_fmt[i];
1389 depth = fmt->vpdma_fmt[i]->depth;
1390
1391 if (i == VPE_LUMA)
1392 plane_fmt->bytesperline =
1393 round_up((pix->width * depth) >> 3,
1394 1 << L_ALIGN);
1395 else
1396 plane_fmt->bytesperline = pix->width;
1397
1398 plane_fmt->sizeimage =
1399 (pix->height * pix->width * depth) >> 3;
1400 }
1401
1402 return 0;
1403}
1404
1405static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
1406{
1407 struct vpe_ctx *ctx = file2ctx(file);
1408 struct vpe_fmt *fmt = find_format(f);
1409
1410 if (V4L2_TYPE_IS_OUTPUT(f->type))
1411 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT);
1412 else
1413 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE);
1414}
1415
1416static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
1417{
1418 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1419 struct v4l2_plane_pix_format *plane_fmt;
1420 struct vpe_q_data *q_data;
1421 struct vb2_queue *vq;
1422 int i;
1423
1424 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
1425 if (!vq)
1426 return -EINVAL;
1427
1428 if (vb2_is_busy(vq)) {
1429 vpe_err(ctx->dev, "queue busy\n");
1430 return -EBUSY;
1431 }
1432
1433 q_data = get_q_data(ctx, f->type);
1434 if (!q_data)
1435 return -EINVAL;
1436
1437 q_data->fmt = find_format(f);
1438 q_data->width = pix->width;
1439 q_data->height = pix->height;
1440 q_data->colorspace = pix->colorspace;
1441 q_data->field = pix->field;
1442
1443 for (i = 0; i < pix->num_planes; i++) {
1444 plane_fmt = &pix->plane_fmt[i];
1445
1446 q_data->bytesperline[i] = plane_fmt->bytesperline;
1447 q_data->sizeimage[i] = plane_fmt->sizeimage;
1448 }
1449
1450 q_data->c_rect.left = 0;
1451 q_data->c_rect.top = 0;
1452 q_data->c_rect.width = q_data->width;
1453 q_data->c_rect.height = q_data->height;
1454
1455 if (q_data->field == V4L2_FIELD_ALTERNATE)
1456 q_data->flags |= Q_DATA_INTERLACED;
1457 else
1458 q_data->flags &= ~Q_DATA_INTERLACED;
1459
1460 vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
1461 f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
1462 q_data->bytesperline[VPE_LUMA]);
1463 if (q_data->fmt->coplanar)
1464 vpe_dbg(ctx->dev, " bpl_uv %d\n",
1465 q_data->bytesperline[VPE_CHROMA]);
1466
1467 return 0;
1468}
1469
1470static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
1471{
1472 int ret;
1473 struct vpe_ctx *ctx = file2ctx(file);
1474
1475 ret = vpe_try_fmt(file, priv, f);
1476 if (ret)
1477 return ret;
1478
1479 ret = __vpe_s_fmt(ctx, f);
1480 if (ret)
1481 return ret;
1482
1483 if (V4L2_TYPE_IS_OUTPUT(f->type))
1484 set_src_registers(ctx);
1485 else
1486 set_dst_registers(ctx);
1487
1488 return set_srcdst_params(ctx);
1489}
1490
1491static int vpe_reqbufs(struct file *file, void *priv,
1492 struct v4l2_requestbuffers *reqbufs)
1493{
1494 struct vpe_ctx *ctx = file2ctx(file);
1495
1496 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
1497}
1498
1499static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1500{
1501 struct vpe_ctx *ctx = file2ctx(file);
1502
1503 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
1504}
1505
1506static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1507{
1508 struct vpe_ctx *ctx = file2ctx(file);
1509
1510 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
1511}
1512
1513static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1514{
1515 struct vpe_ctx *ctx = file2ctx(file);
1516
1517 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
1518}
1519
1520static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
1521{
1522 struct vpe_ctx *ctx = file2ctx(file);
1523
1524 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
1525}
1526
1527static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type)
1528{
1529 struct vpe_ctx *ctx = file2ctx(file);
1530
1531 vpe_dump_regs(ctx->dev);
1532 vpdma_dump_regs(ctx->dev->vpdma);
1533
1534 return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
1535}
1536
1537/*
1538 * defines number of buffers/frames a context can process with VPE before
1539 * switching to a different context. default value is 1 buffer per context
1540 */
1541#define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0)
1542
1543static int vpe_s_ctrl(struct v4l2_ctrl *ctrl)
1544{
1545 struct vpe_ctx *ctx =
1546 container_of(ctrl->handler, struct vpe_ctx, hdl);
1547
1548 switch (ctrl->id) {
1549 case V4L2_CID_VPE_BUFS_PER_JOB:
1550 ctx->bufs_per_job = ctrl->val;
1551 break;
1552
1553 default:
1554 vpe_err(ctx->dev, "Invalid control\n");
1555 return -EINVAL;
1556 }
1557
1558 return 0;
1559}
1560
1561static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
1562 .s_ctrl = vpe_s_ctrl,
1563};
1564
1565static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
1566 .vidioc_querycap = vpe_querycap,
1567
1568 .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
1569 .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt,
1570 .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt,
1571 .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt,
1572
1573 .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
1574 .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt,
1575 .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt,
1576 .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt,
1577
1578 .vidioc_reqbufs = vpe_reqbufs,
1579 .vidioc_querybuf = vpe_querybuf,
1580
1581 .vidioc_qbuf = vpe_qbuf,
1582 .vidioc_dqbuf = vpe_dqbuf,
1583
1584 .vidioc_streamon = vpe_streamon,
1585 .vidioc_streamoff = vpe_streamoff,
1586 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1587 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1588};
1589
1590/*
1591 * Queue operations
1592 */
1593static int vpe_queue_setup(struct vb2_queue *vq,
1594 const struct v4l2_format *fmt,
1595 unsigned int *nbuffers, unsigned int *nplanes,
1596 unsigned int sizes[], void *alloc_ctxs[])
1597{
1598 int i;
1599 struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
1600 struct vpe_q_data *q_data;
1601
1602 q_data = get_q_data(ctx, vq->type);
1603
1604 *nplanes = q_data->fmt->coplanar ? 2 : 1;
1605
1606 for (i = 0; i < *nplanes; i++) {
1607 sizes[i] = q_data->sizeimage[i];
1608 alloc_ctxs[i] = ctx->dev->alloc_ctx;
1609 }
1610
1611 vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
1612 sizes[VPE_LUMA]);
1613 if (q_data->fmt->coplanar)
1614 vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
1615
1616 return 0;
1617}
1618
1619static int vpe_buf_prepare(struct vb2_buffer *vb)
1620{
1621 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1622 struct vpe_q_data *q_data;
1623 int i, num_planes;
1624
1625 vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
1626
1627 q_data = get_q_data(ctx, vb->vb2_queue->type);
1628 num_planes = q_data->fmt->coplanar ? 2 : 1;
1629
1630 for (i = 0; i < num_planes; i++) {
1631 if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
1632 vpe_err(ctx->dev,
1633 "data will not fit into plane (%lu < %lu)\n",
1634 vb2_plane_size(vb, i),
1635 (long) q_data->sizeimage[i]);
1636 return -EINVAL;
1637 }
1638 }
1639
1640 for (i = 0; i < num_planes; i++)
1641 vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
1642
1643 return 0;
1644}
1645
1646static void vpe_buf_queue(struct vb2_buffer *vb)
1647{
1648 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1649 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
1650}
1651
1652static void vpe_wait_prepare(struct vb2_queue *q)
1653{
1654 struct vpe_ctx *ctx = vb2_get_drv_priv(q);
1655 vpe_unlock(ctx);
1656}
1657
1658static void vpe_wait_finish(struct vb2_queue *q)
1659{
1660 struct vpe_ctx *ctx = vb2_get_drv_priv(q);
1661 vpe_lock(ctx);
1662}
1663
1664static struct vb2_ops vpe_qops = {
1665 .queue_setup = vpe_queue_setup,
1666 .buf_prepare = vpe_buf_prepare,
1667 .buf_queue = vpe_buf_queue,
1668 .wait_prepare = vpe_wait_prepare,
1669 .wait_finish = vpe_wait_finish,
1670};
1671
1672static int queue_init(void *priv, struct vb2_queue *src_vq,
1673 struct vb2_queue *dst_vq)
1674{
1675 struct vpe_ctx *ctx = priv;
1676 int ret;
1677
1678 memset(src_vq, 0, sizeof(*src_vq));
1679 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1680 src_vq->io_modes = VB2_MMAP;
1681 src_vq->drv_priv = ctx;
1682 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1683 src_vq->ops = &vpe_qops;
1684 src_vq->mem_ops = &vb2_dma_contig_memops;
1685 src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1686
1687 ret = vb2_queue_init(src_vq);
1688 if (ret)
1689 return ret;
1690
1691 memset(dst_vq, 0, sizeof(*dst_vq));
1692 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1693 dst_vq->io_modes = VB2_MMAP;
1694 dst_vq->drv_priv = ctx;
1695 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1696 dst_vq->ops = &vpe_qops;
1697 dst_vq->mem_ops = &vb2_dma_contig_memops;
1698 dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1699
1700 return vb2_queue_init(dst_vq);
1701}
1702
1703static const struct v4l2_ctrl_config vpe_bufs_per_job = {
1704 .ops = &vpe_ctrl_ops,
1705 .id = V4L2_CID_VPE_BUFS_PER_JOB,
1706 .name = "Buffers Per Transaction",
1707 .type = V4L2_CTRL_TYPE_INTEGER,
1708 .def = VPE_DEF_BUFS_PER_JOB,
1709 .min = 1,
1710 .max = VIDEO_MAX_FRAME,
1711 .step = 1,
1712};
1713
1714/*
1715 * File operations
1716 */
1717static int vpe_open(struct file *file)
1718{
1719 struct vpe_dev *dev = video_drvdata(file);
1720 struct vpe_ctx *ctx = NULL;
1721 struct vpe_q_data *s_q_data;
1722 struct v4l2_ctrl_handler *hdl;
1723 int ret;
1724
1725 vpe_dbg(dev, "vpe_open\n");
1726
1727 if (!dev->vpdma->ready) {
1728 vpe_err(dev, "vpdma firmware not loaded\n");
1729 return -ENODEV;
1730 }
1731
1732 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1733 if (!ctx)
1734 return -ENOMEM;
1735
1736 ctx->dev = dev;
1737
1738 if (mutex_lock_interruptible(&dev->dev_mutex)) {
1739 ret = -ERESTARTSYS;
1740 goto free_ctx;
1741 }
1742
1743 ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE,
1744 VPDMA_LIST_TYPE_NORMAL);
1745 if (ret != 0)
1746 goto unlock;
1747
1748 ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb));
1749 if (ret != 0)
1750 goto free_desc_list;
1751
1752 init_adb_hdrs(ctx);
1753
1754 v4l2_fh_init(&ctx->fh, video_devdata(file));
1755 file->private_data = &ctx->fh;
1756
1757 hdl = &ctx->hdl;
1758 v4l2_ctrl_handler_init(hdl, 1);
1759 v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL);
1760 if (hdl->error) {
1761 ret = hdl->error;
1762 goto exit_fh;
1763 }
1764 ctx->fh.ctrl_handler = hdl;
1765 v4l2_ctrl_handler_setup(hdl);
1766
1767 s_q_data = &ctx->q_data[Q_DATA_SRC];
1768 s_q_data->fmt = &vpe_formats[2];
1769 s_q_data->width = 1920;
1770 s_q_data->height = 1080;
1771 s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height *
1772 s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
1773 s_q_data->colorspace = V4L2_COLORSPACE_SMPTE240M;
1774 s_q_data->field = V4L2_FIELD_NONE;
1775 s_q_data->c_rect.left = 0;
1776 s_q_data->c_rect.top = 0;
1777 s_q_data->c_rect.width = s_q_data->width;
1778 s_q_data->c_rect.height = s_q_data->height;
1779 s_q_data->flags = 0;
1780
1781 ctx->q_data[Q_DATA_DST] = *s_q_data;
1782
1783 set_dei_shadow_registers(ctx);
1784 set_src_registers(ctx);
1785 set_dst_registers(ctx);
1786 ret = set_srcdst_params(ctx);
1787 if (ret)
1788 goto exit_fh;
1789
1790 ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
1791
1792 if (IS_ERR(ctx->m2m_ctx)) {
1793 ret = PTR_ERR(ctx->m2m_ctx);
1794 goto exit_fh;
1795 }
1796
1797 v4l2_fh_add(&ctx->fh);
1798
1799 /*
1800 * for now, just report the creation of the first instance, we can later
1801 * optimize the driver to enable or disable clocks when the first
1802 * instance is created or the last instance released
1803 */
1804 if (atomic_inc_return(&dev->num_instances) == 1)
1805 vpe_dbg(dev, "first instance created\n");
1806
1807 ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB;
1808
1809 ctx->load_mmrs = true;
1810
1811 vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
1812 ctx, ctx->m2m_ctx);
1813
1814 mutex_unlock(&dev->dev_mutex);
1815
1816 return 0;
1817exit_fh:
1818 v4l2_ctrl_handler_free(hdl);
1819 v4l2_fh_exit(&ctx->fh);
1820 vpdma_free_desc_buf(&ctx->mmr_adb);
1821free_desc_list:
1822 vpdma_free_desc_list(&ctx->desc_list);
1823unlock:
1824 mutex_unlock(&dev->dev_mutex);
1825free_ctx:
1826 kfree(ctx);
1827 return ret;
1828}
1829
1830static int vpe_release(struct file *file)
1831{
1832 struct vpe_dev *dev = video_drvdata(file);
1833 struct vpe_ctx *ctx = file2ctx(file);
1834
1835 vpe_dbg(dev, "releasing instance %p\n", ctx);
1836
1837 mutex_lock(&dev->dev_mutex);
1838 free_vbs(ctx);
1839 free_mv_buffers(ctx);
1840 vpdma_free_desc_list(&ctx->desc_list);
1841 vpdma_free_desc_buf(&ctx->mmr_adb);
1842
1843 v4l2_fh_del(&ctx->fh);
1844 v4l2_fh_exit(&ctx->fh);
1845 v4l2_ctrl_handler_free(&ctx->hdl);
1846 v4l2_m2m_ctx_release(ctx->m2m_ctx);
1847
1848 kfree(ctx);
1849
1850 /*
1851 * for now, just report the release of the last instance, we can later
1852 * optimize the driver to enable or disable clocks when the first
1853 * instance is created or the last instance released
1854 */
1855 if (atomic_dec_return(&dev->num_instances) == 0)
1856 vpe_dbg(dev, "last instance released\n");
1857
1858 mutex_unlock(&dev->dev_mutex);
1859
1860 return 0;
1861}
1862
1863static unsigned int vpe_poll(struct file *file,
1864 struct poll_table_struct *wait)
1865{
1866 struct vpe_ctx *ctx = file2ctx(file);
1867 struct vpe_dev *dev = ctx->dev;
1868 int ret;
1869
1870 mutex_lock(&dev->dev_mutex);
1871 ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
1872 mutex_unlock(&dev->dev_mutex);
1873 return ret;
1874}
1875
1876static int vpe_mmap(struct file *file, struct vm_area_struct *vma)
1877{
1878 struct vpe_ctx *ctx = file2ctx(file);
1879 struct vpe_dev *dev = ctx->dev;
1880 int ret;
1881
1882 if (mutex_lock_interruptible(&dev->dev_mutex))
1883 return -ERESTARTSYS;
1884 ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
1885 mutex_unlock(&dev->dev_mutex);
1886 return ret;
1887}
1888
1889static const struct v4l2_file_operations vpe_fops = {
1890 .owner = THIS_MODULE,
1891 .open = vpe_open,
1892 .release = vpe_release,
1893 .poll = vpe_poll,
1894 .unlocked_ioctl = video_ioctl2,
1895 .mmap = vpe_mmap,
1896};
1897
1898static struct video_device vpe_videodev = {
1899 .name = VPE_MODULE_NAME,
1900 .fops = &vpe_fops,
1901 .ioctl_ops = &vpe_ioctl_ops,
1902 .minor = -1,
1903 .release = video_device_release,
1904 .vfl_dir = VFL_DIR_M2M,
1905};
1906
1907static struct v4l2_m2m_ops m2m_ops = {
1908 .device_run = device_run,
1909 .job_ready = job_ready,
1910 .job_abort = job_abort,
1911 .lock = vpe_lock,
1912 .unlock = vpe_unlock,
1913};
1914
1915static int vpe_runtime_get(struct platform_device *pdev)
1916{
1917 int r;
1918
1919 dev_dbg(&pdev->dev, "vpe_runtime_get\n");
1920
1921 r = pm_runtime_get_sync(&pdev->dev);
1922 WARN_ON(r < 0);
1923 return r < 0 ? r : 0;
1924}
1925
1926static void vpe_runtime_put(struct platform_device *pdev)
1927{
1928
1929 int r;
1930
1931 dev_dbg(&pdev->dev, "vpe_runtime_put\n");
1932
1933 r = pm_runtime_put_sync(&pdev->dev);
1934 WARN_ON(r < 0 && r != -ENOSYS);
1935}
1936
1937static int vpe_probe(struct platform_device *pdev)
1938{
1939 struct vpe_dev *dev;
1940 struct video_device *vfd;
1941 struct resource *res;
1942 int ret, irq, func;
1943
1944 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1945 if (IS_ERR(dev))
1946 return PTR_ERR(dev);
1947
1948 spin_lock_init(&dev->lock);
1949
1950 ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
1951 if (ret)
1952 return ret;
1953
1954 atomic_set(&dev->num_instances, 0);
1955 mutex_init(&dev->dev_mutex);
1956
1957 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe_top");
1958 /*
1959 * HACK: we get resource info from device tree in the form of a list of
1960 * VPE sub blocks, the driver currently uses only the base of vpe_top
1961 * for register access, the driver should be changed later to access
1962 * registers based on the sub block base addresses
1963 */
1964 dev->base = devm_ioremap(&pdev->dev, res->start, SZ_32K);
1965 if (IS_ERR(dev->base)) {
1966 ret = PTR_ERR(dev->base);
1967 goto v4l2_dev_unreg;
1968 }
1969
1970 irq = platform_get_irq(pdev, 0);
1971 ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME,
1972 dev);
1973 if (ret)
1974 goto v4l2_dev_unreg;
1975
1976 platform_set_drvdata(pdev, dev);
1977
1978 dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
1979 if (IS_ERR(dev->alloc_ctx)) {
1980 vpe_err(dev, "Failed to alloc vb2 context\n");
1981 ret = PTR_ERR(dev->alloc_ctx);
1982 goto v4l2_dev_unreg;
1983 }
1984
1985 dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
1986 if (IS_ERR(dev->m2m_dev)) {
1987 vpe_err(dev, "Failed to init mem2mem device\n");
1988 ret = PTR_ERR(dev->m2m_dev);
1989 goto rel_ctx;
1990 }
1991
1992 pm_runtime_enable(&pdev->dev);
1993
1994 ret = vpe_runtime_get(pdev);
1995 if (ret)
1996 goto rel_m2m;
1997
1998 /* Perform clk enable followed by reset */
1999 vpe_set_clock_enable(dev, 1);
2000
2001 vpe_top_reset(dev);
2002
2003 func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK,
2004 VPE_PID_FUNC_SHIFT);
2005 vpe_dbg(dev, "VPE PID function %x\n", func);
2006
2007 vpe_top_vpdma_reset(dev);
2008
2009 dev->vpdma = vpdma_create(pdev);
2010 if (IS_ERR(dev->vpdma))
2011 goto runtime_put;
2012
2013 vfd = &dev->vfd;
2014 *vfd = vpe_videodev;
2015 vfd->lock = &dev->dev_mutex;
2016 vfd->v4l2_dev = &dev->v4l2_dev;
2017
2018 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
2019 if (ret) {
2020 vpe_err(dev, "Failed to register video device\n");
2021 goto runtime_put;
2022 }
2023
2024 video_set_drvdata(vfd, dev);
2025 snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name);
2026 dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
2027 vfd->num);
2028
2029 return 0;
2030
2031runtime_put:
2032 vpe_runtime_put(pdev);
2033rel_m2m:
2034 pm_runtime_disable(&pdev->dev);
2035 v4l2_m2m_release(dev->m2m_dev);
2036rel_ctx:
2037 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
2038v4l2_dev_unreg:
2039 v4l2_device_unregister(&dev->v4l2_dev);
2040
2041 return ret;
2042}
2043
2044static int vpe_remove(struct platform_device *pdev)
2045{
2046 struct vpe_dev *dev =
2047 (struct vpe_dev *) platform_get_drvdata(pdev);
2048
2049 v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
2050
2051 v4l2_m2m_release(dev->m2m_dev);
2052 video_unregister_device(&dev->vfd);
2053 v4l2_device_unregister(&dev->v4l2_dev);
2054 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
2055
2056 vpe_set_clock_enable(dev, 0);
2057 vpe_runtime_put(pdev);
2058 pm_runtime_disable(&pdev->dev);
2059
2060 return 0;
2061}
2062
2063#if defined(CONFIG_OF)
2064static const struct of_device_id vpe_of_match[] = {
2065 {
2066 .compatible = "ti,vpe",
2067 },
2068 {},
2069};
2070#else
2071#define vpe_of_match NULL
2072#endif
2073
2074static struct platform_driver vpe_pdrv = {
2075 .probe = vpe_probe,
2076 .remove = vpe_remove,
2077 .driver = {
2078 .name = VPE_MODULE_NAME,
2079 .owner = THIS_MODULE,
2080 .of_match_table = vpe_of_match,
2081 },
2082};
2083
2084static void __exit vpe_exit(void)
2085{
2086 platform_driver_unregister(&vpe_pdrv);
2087}
2088
2089static int __init vpe_init(void)
2090{
2091 return platform_driver_register(&vpe_pdrv);
2092}
2093
2094module_init(vpe_init);
2095module_exit(vpe_exit);
2096
2097MODULE_DESCRIPTION("TI VPE driver");
2098MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
2099MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti-vpe/vpe_regs.h
new file mode 100644
index 000000000000..ed214e828398
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpe_regs.h
@@ -0,0 +1,496 @@
1/*
2 * Copyright (c) 2013 Texas Instruments Inc.
3 *
4 * David Griego, <dagriego@biglakesoftware.com>
5 * Dale Farnsworth, <dale@farnsworth.org>
6 * Archit Taneja, <archit@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 */
12
13#ifndef __TI_VPE_REGS_H
14#define __TI_VPE_REGS_H
15
16/* VPE register offsets and field selectors */
17
18/* VPE top level regs */
19#define VPE_PID 0x0000
20#define VPE_PID_MINOR_MASK 0x3f
21#define VPE_PID_MINOR_SHIFT 0
22#define VPE_PID_CUSTOM_MASK 0x03
23#define VPE_PID_CUSTOM_SHIFT 6
24#define VPE_PID_MAJOR_MASK 0x07
25#define VPE_PID_MAJOR_SHIFT 8
26#define VPE_PID_RTL_MASK 0x1f
27#define VPE_PID_RTL_SHIFT 11
28#define VPE_PID_FUNC_MASK 0xfff
29#define VPE_PID_FUNC_SHIFT 16
30#define VPE_PID_SCHEME_MASK 0x03
31#define VPE_PID_SCHEME_SHIFT 30
32
33#define VPE_SYSCONFIG 0x0010
34#define VPE_SYSCONFIG_IDLE_MASK 0x03
35#define VPE_SYSCONFIG_IDLE_SHIFT 2
36#define VPE_SYSCONFIG_STANDBY_MASK 0x03
37#define VPE_SYSCONFIG_STANDBY_SHIFT 4
38#define VPE_FORCE_IDLE_MODE 0
39#define VPE_NO_IDLE_MODE 1
40#define VPE_SMART_IDLE_MODE 2
41#define VPE_SMART_IDLE_WAKEUP_MODE 3
42#define VPE_FORCE_STANDBY_MODE 0
43#define VPE_NO_STANDBY_MODE 1
44#define VPE_SMART_STANDBY_MODE 2
45#define VPE_SMART_STANDBY_WAKEUP_MODE 3
46
47#define VPE_INT0_STATUS0_RAW_SET 0x0020
48#define VPE_INT0_STATUS0_RAW VPE_INT0_STATUS0_RAW_SET
49#define VPE_INT0_STATUS0_CLR 0x0028
50#define VPE_INT0_STATUS0 VPE_INT0_STATUS0_CLR
51#define VPE_INT0_ENABLE0_SET 0x0030
52#define VPE_INT0_ENABLE0 VPE_INT0_ENABLE0_SET
53#define VPE_INT0_ENABLE0_CLR 0x0038
54#define VPE_INT0_LIST0_COMPLETE (1 << 0)
55#define VPE_INT0_LIST0_NOTIFY (1 << 1)
56#define VPE_INT0_LIST1_COMPLETE (1 << 2)
57#define VPE_INT0_LIST1_NOTIFY (1 << 3)
58#define VPE_INT0_LIST2_COMPLETE (1 << 4)
59#define VPE_INT0_LIST2_NOTIFY (1 << 5)
60#define VPE_INT0_LIST3_COMPLETE (1 << 6)
61#define VPE_INT0_LIST3_NOTIFY (1 << 7)
62#define VPE_INT0_LIST4_COMPLETE (1 << 8)
63#define VPE_INT0_LIST4_NOTIFY (1 << 9)
64#define VPE_INT0_LIST5_COMPLETE (1 << 10)
65#define VPE_INT0_LIST5_NOTIFY (1 << 11)
66#define VPE_INT0_LIST6_COMPLETE (1 << 12)
67#define VPE_INT0_LIST6_NOTIFY (1 << 13)
68#define VPE_INT0_LIST7_COMPLETE (1 << 14)
69#define VPE_INT0_LIST7_NOTIFY (1 << 15)
70#define VPE_INT0_DESCRIPTOR (1 << 16)
71#define VPE_DEI_FMD_INT (1 << 18)
72
73#define VPE_INT0_STATUS1_RAW_SET 0x0024
74#define VPE_INT0_STATUS1_RAW VPE_INT0_STATUS1_RAW_SET
75#define VPE_INT0_STATUS1_CLR 0x002c
76#define VPE_INT0_STATUS1 VPE_INT0_STATUS1_CLR
77#define VPE_INT0_ENABLE1_SET 0x0034
78#define VPE_INT0_ENABLE1 VPE_INT0_ENABLE1_SET
79#define VPE_INT0_ENABLE1_CLR 0x003c
80#define VPE_INT0_CHANNEL_GROUP0 (1 << 0)
81#define VPE_INT0_CHANNEL_GROUP1 (1 << 1)
82#define VPE_INT0_CHANNEL_GROUP2 (1 << 2)
83#define VPE_INT0_CHANNEL_GROUP3 (1 << 3)
84#define VPE_INT0_CHANNEL_GROUP4 (1 << 4)
85#define VPE_INT0_CHANNEL_GROUP5 (1 << 5)
86#define VPE_INT0_CLIENT (1 << 7)
87#define VPE_DEI_ERROR_INT (1 << 16)
88#define VPE_DS1_UV_ERROR_INT (1 << 22)
89
90#define VPE_INTC_EOI 0x00a0
91
92#define VPE_CLK_ENABLE 0x0100
93#define VPE_VPEDMA_CLK_ENABLE (1 << 0)
94#define VPE_DATA_PATH_CLK_ENABLE (1 << 1)
95
96#define VPE_CLK_RESET 0x0104
97#define VPE_VPDMA_CLK_RESET_MASK 0x1
98#define VPE_VPDMA_CLK_RESET_SHIFT 0
99#define VPE_DATA_PATH_CLK_RESET_MASK 0x1
100#define VPE_DATA_PATH_CLK_RESET_SHIFT 1
101#define VPE_MAIN_RESET_MASK 0x1
102#define VPE_MAIN_RESET_SHIFT 31
103
104#define VPE_CLK_FORMAT_SELECT 0x010c
105#define VPE_CSC_SRC_SELECT_MASK 0x03
106#define VPE_CSC_SRC_SELECT_SHIFT 0
107#define VPE_RGB_OUT_SELECT (1 << 8)
108#define VPE_DS_SRC_SELECT_MASK 0x07
109#define VPE_DS_SRC_SELECT_SHIFT 9
110#define VPE_DS_BYPASS (1 << 16)
111#define VPE_COLOR_SEPARATE_422 (1 << 18)
112
113#define VPE_DS_SRC_DEI_SCALER (5 << VPE_DS_SRC_SELECT_SHIFT)
114#define VPE_CSC_SRC_DEI_SCALER (3 << VPE_CSC_SRC_SELECT_SHIFT)
115
116#define VPE_CLK_RANGE_MAP 0x011c
117#define VPE_RANGE_RANGE_MAP_Y_MASK 0x07
118#define VPE_RANGE_RANGE_MAP_Y_SHIFT 0
119#define VPE_RANGE_RANGE_MAP_UV_MASK 0x07
120#define VPE_RANGE_RANGE_MAP_UV_SHIFT 3
121#define VPE_RANGE_MAP_ON (1 << 6)
122#define VPE_RANGE_REDUCTION_ON (1 << 28)
123
124/* VPE chrominance upsampler regs */
125#define VPE_US1_R0 0x0304
126#define VPE_US2_R0 0x0404
127#define VPE_US3_R0 0x0504
128#define VPE_US_C1_MASK 0x3fff
129#define VPE_US_C1_SHIFT 2
130#define VPE_US_C0_MASK 0x3fff
131#define VPE_US_C0_SHIFT 18
132#define VPE_US_MODE_MASK 0x03
133#define VPE_US_MODE_SHIFT 16
134#define VPE_ANCHOR_FID0_C1_MASK 0x3fff
135#define VPE_ANCHOR_FID0_C1_SHIFT 2
136#define VPE_ANCHOR_FID0_C0_MASK 0x3fff
137#define VPE_ANCHOR_FID0_C0_SHIFT 18
138
139#define VPE_US1_R1 0x0308
140#define VPE_US2_R1 0x0408
141#define VPE_US3_R1 0x0508
142#define VPE_ANCHOR_FID0_C3_MASK 0x3fff
143#define VPE_ANCHOR_FID0_C3_SHIFT 2
144#define VPE_ANCHOR_FID0_C2_MASK 0x3fff
145#define VPE_ANCHOR_FID0_C2_SHIFT 18
146
147#define VPE_US1_R2 0x030c
148#define VPE_US2_R2 0x040c
149#define VPE_US3_R2 0x050c
150#define VPE_INTERP_FID0_C1_MASK 0x3fff
151#define VPE_INTERP_FID0_C1_SHIFT 2
152#define VPE_INTERP_FID0_C0_MASK 0x3fff
153#define VPE_INTERP_FID0_C0_SHIFT 18
154
155#define VPE_US1_R3 0x0310
156#define VPE_US2_R3 0x0410
157#define VPE_US3_R3 0x0510
158#define VPE_INTERP_FID0_C3_MASK 0x3fff
159#define VPE_INTERP_FID0_C3_SHIFT 2
160#define VPE_INTERP_FID0_C2_MASK 0x3fff
161#define VPE_INTERP_FID0_C2_SHIFT 18
162
163#define VPE_US1_R4 0x0314
164#define VPE_US2_R4 0x0414
165#define VPE_US3_R4 0x0514
166#define VPE_ANCHOR_FID1_C1_MASK 0x3fff
167#define VPE_ANCHOR_FID1_C1_SHIFT 2
168#define VPE_ANCHOR_FID1_C0_MASK 0x3fff
169#define VPE_ANCHOR_FID1_C0_SHIFT 18
170
171#define VPE_US1_R5 0x0318
172#define VPE_US2_R5 0x0418
173#define VPE_US3_R5 0x0518
174#define VPE_ANCHOR_FID1_C3_MASK 0x3fff
175#define VPE_ANCHOR_FID1_C3_SHIFT 2
176#define VPE_ANCHOR_FID1_C2_MASK 0x3fff
177#define VPE_ANCHOR_FID1_C2_SHIFT 18
178
179#define VPE_US1_R6 0x031c
180#define VPE_US2_R6 0x041c
181#define VPE_US3_R6 0x051c
182#define VPE_INTERP_FID1_C1_MASK 0x3fff
183#define VPE_INTERP_FID1_C1_SHIFT 2
184#define VPE_INTERP_FID1_C0_MASK 0x3fff
185#define VPE_INTERP_FID1_C0_SHIFT 18
186
187#define VPE_US1_R7 0x0320
188#define VPE_US2_R7 0x0420
189#define VPE_US3_R7 0x0520
190#define VPE_INTERP_FID0_C3_MASK 0x3fff
191#define VPE_INTERP_FID0_C3_SHIFT 2
192#define VPE_INTERP_FID0_C2_MASK 0x3fff
193#define VPE_INTERP_FID0_C2_SHIFT 18
194
195/* VPE de-interlacer regs */
196#define VPE_DEI_FRAME_SIZE 0x0600
197#define VPE_DEI_WIDTH_MASK 0x07ff
198#define VPE_DEI_WIDTH_SHIFT 0
199#define VPE_DEI_HEIGHT_MASK 0x07ff
200#define VPE_DEI_HEIGHT_SHIFT 16
201#define VPE_DEI_INTERLACE_BYPASS (1 << 29)
202#define VPE_DEI_FIELD_FLUSH (1 << 30)
203#define VPE_DEI_PROGRESSIVE (1 << 31)
204
205#define VPE_MDT_BYPASS 0x0604
206#define VPE_MDT_TEMPMAX_BYPASS (1 << 0)
207#define VPE_MDT_SPATMAX_BYPASS (1 << 1)
208
209#define VPE_MDT_SF_THRESHOLD 0x0608
210#define VPE_MDT_SF_SC_THR1_MASK 0xff
211#define VPE_MDT_SF_SC_THR1_SHIFT 0
212#define VPE_MDT_SF_SC_THR2_MASK 0xff
213#define VPE_MDT_SF_SC_THR2_SHIFT 0
214#define VPE_MDT_SF_SC_THR3_MASK 0xff
215#define VPE_MDT_SF_SC_THR3_SHIFT 0
216
217#define VPE_EDI_CONFIG 0x060c
218#define VPE_EDI_INP_MODE_MASK 0x03
219#define VPE_EDI_INP_MODE_SHIFT 0
220#define VPE_EDI_ENABLE_3D (1 << 2)
221#define VPE_EDI_ENABLE_CHROMA_3D (1 << 3)
222#define VPE_EDI_CHROMA3D_COR_THR_MASK 0xff
223#define VPE_EDI_CHROMA3D_COR_THR_SHIFT 8
224#define VPE_EDI_DIR_COR_LOWER_THR_MASK 0xff
225#define VPE_EDI_DIR_COR_LOWER_THR_SHIFT 16
226#define VPE_EDI_COR_SCALE_FACTOR_MASK 0xff
227#define VPE_EDI_COR_SCALE_FACTOR_SHIFT 23
228
229#define VPE_DEI_EDI_LUT_R0 0x0610
230#define VPE_EDI_LUT0_MASK 0x1f
231#define VPE_EDI_LUT0_SHIFT 0
232#define VPE_EDI_LUT1_MASK 0x1f
233#define VPE_EDI_LUT1_SHIFT 8
234#define VPE_EDI_LUT2_MASK 0x1f
235#define VPE_EDI_LUT2_SHIFT 16
236#define VPE_EDI_LUT3_MASK 0x1f
237#define VPE_EDI_LUT3_SHIFT 24
238
239#define VPE_DEI_EDI_LUT_R1 0x0614
240#define VPE_EDI_LUT0_MASK 0x1f
241#define VPE_EDI_LUT0_SHIFT 0
242#define VPE_EDI_LUT1_MASK 0x1f
243#define VPE_EDI_LUT1_SHIFT 8
244#define VPE_EDI_LUT2_MASK 0x1f
245#define VPE_EDI_LUT2_SHIFT 16
246#define VPE_EDI_LUT3_MASK 0x1f
247#define VPE_EDI_LUT3_SHIFT 24
248
249#define VPE_DEI_EDI_LUT_R2 0x0618
250#define VPE_EDI_LUT4_MASK 0x1f
251#define VPE_EDI_LUT4_SHIFT 0
252#define VPE_EDI_LUT5_MASK 0x1f
253#define VPE_EDI_LUT5_SHIFT 8
254#define VPE_EDI_LUT6_MASK 0x1f
255#define VPE_EDI_LUT6_SHIFT 16
256#define VPE_EDI_LUT7_MASK 0x1f
257#define VPE_EDI_LUT7_SHIFT 24
258
259#define VPE_DEI_EDI_LUT_R3 0x061c
260#define VPE_EDI_LUT8_MASK 0x1f
261#define VPE_EDI_LUT8_SHIFT 0
262#define VPE_EDI_LUT9_MASK 0x1f
263#define VPE_EDI_LUT9_SHIFT 8
264#define VPE_EDI_LUT10_MASK 0x1f
265#define VPE_EDI_LUT10_SHIFT 16
266#define VPE_EDI_LUT11_MASK 0x1f
267#define VPE_EDI_LUT11_SHIFT 24
268
269#define VPE_DEI_FMD_WINDOW_R0 0x0620
270#define VPE_FMD_WINDOW_MINX_MASK 0x07ff
271#define VPE_FMD_WINDOW_MINX_SHIFT 0
272#define VPE_FMD_WINDOW_MAXX_MASK 0x07ff
273#define VPE_FMD_WINDOW_MAXX_SHIFT 16
274#define VPE_FMD_WINDOW_ENABLE (1 << 31)
275
276#define VPE_DEI_FMD_WINDOW_R1 0x0624
277#define VPE_FMD_WINDOW_MINY_MASK 0x07ff
278#define VPE_FMD_WINDOW_MINY_SHIFT 0
279#define VPE_FMD_WINDOW_MAXY_MASK 0x07ff
280#define VPE_FMD_WINDOW_MAXY_SHIFT 16
281
282#define VPE_DEI_FMD_CONTROL_R0 0x0628
283#define VPE_FMD_ENABLE (1 << 0)
284#define VPE_FMD_LOCK (1 << 1)
285#define VPE_FMD_JAM_DIR (1 << 2)
286#define VPE_FMD_BED_ENABLE (1 << 3)
287#define VPE_FMD_CAF_FIELD_THR_MASK 0xff
288#define VPE_FMD_CAF_FIELD_THR_SHIFT 16
289#define VPE_FMD_CAF_LINE_THR_MASK 0xff
290#define VPE_FMD_CAF_LINE_THR_SHIFT 24
291
292#define VPE_DEI_FMD_CONTROL_R1 0x062c
293#define VPE_FMD_CAF_THR_MASK 0x000fffff
294#define VPE_FMD_CAF_THR_SHIFT 0
295
296#define VPE_DEI_FMD_STATUS_R0 0x0630
297#define VPE_FMD_CAF_MASK 0x000fffff
298#define VPE_FMD_CAF_SHIFT 0
299#define VPE_FMD_RESET (1 << 24)
300
301#define VPE_DEI_FMD_STATUS_R1 0x0634
302#define VPE_FMD_FIELD_DIFF_MASK 0x0fffffff
303#define VPE_FMD_FIELD_DIFF_SHIFT 0
304
305#define VPE_DEI_FMD_STATUS_R2 0x0638
306#define VPE_FMD_FRAME_DIFF_MASK 0x000fffff
307#define VPE_FMD_FRAME_DIFF_SHIFT 0
308
309/* VPE scaler regs */
310#define VPE_SC_MP_SC0 0x0700
311#define VPE_INTERLACE_O (1 << 0)
312#define VPE_LINEAR (1 << 1)
313#define VPE_SC_BYPASS (1 << 2)
314#define VPE_INVT_FID (1 << 3)
315#define VPE_USE_RAV (1 << 4)
316#define VPE_ENABLE_EV (1 << 5)
317#define VPE_AUTO_HS (1 << 6)
318#define VPE_DCM_2X (1 << 7)
319#define VPE_DCM_4X (1 << 8)
320#define VPE_HP_BYPASS (1 << 9)
321#define VPE_INTERLACE_I (1 << 10)
322#define VPE_ENABLE_SIN2_VER_INTP (1 << 11)
323#define VPE_Y_PK_EN (1 << 14)
324#define VPE_TRIM (1 << 15)
325#define VPE_SELFGEN_FID (1 << 16)
326
327#define VPE_SC_MP_SC1 0x0704
328#define VPE_ROW_ACC_INC_MASK 0x07ffffff
329#define VPE_ROW_ACC_INC_SHIFT 0
330
331#define VPE_SC_MP_SC2 0x0708
332#define VPE_ROW_ACC_OFFSET_MASK 0x0fffffff
333#define VPE_ROW_ACC_OFFSET_SHIFT 0
334
335#define VPE_SC_MP_SC3 0x070c
336#define VPE_ROW_ACC_OFFSET_B_MASK 0x0fffffff
337#define VPE_ROW_ACC_OFFSET_B_SHIFT 0
338
339#define VPE_SC_MP_SC4 0x0710
340#define VPE_TAR_H_MASK 0x07ff
341#define VPE_TAR_H_SHIFT 0
342#define VPE_TAR_W_MASK 0x07ff
343#define VPE_TAR_W_SHIFT 12
344#define VPE_LIN_ACC_INC_U_MASK 0x07
345#define VPE_LIN_ACC_INC_U_SHIFT 24
346#define VPE_NLIN_ACC_INIT_U_MASK 0x07
347#define VPE_NLIN_ACC_INIT_U_SHIFT 28
348
349#define VPE_SC_MP_SC5 0x0714
350#define VPE_SRC_H_MASK 0x07ff
351#define VPE_SRC_H_SHIFT 0
352#define VPE_SRC_W_MASK 0x07ff
353#define VPE_SRC_W_SHIFT 12
354#define VPE_NLIN_ACC_INC_U_MASK 0x07
355#define VPE_NLIN_ACC_INC_U_SHIFT 24
356
357#define VPE_SC_MP_SC6 0x0718
358#define VPE_ROW_ACC_INIT_RAV_MASK 0x03ff
359#define VPE_ROW_ACC_INIT_RAV_SHIFT 0
360#define VPE_ROW_ACC_INIT_RAV_B_MASK 0x03ff
361#define VPE_ROW_ACC_INIT_RAV_B_SHIFT 10
362
363#define VPE_SC_MP_SC8 0x0720
364#define VPE_NLIN_LEFT_MASK 0x07ff
365#define VPE_NLIN_LEFT_SHIFT 0
366#define VPE_NLIN_RIGHT_MASK 0x07ff
367#define VPE_NLIN_RIGHT_SHIFT 12
368
369#define VPE_SC_MP_SC9 0x0724
370#define VPE_LIN_ACC_INC VPE_SC_MP_SC9
371
372#define VPE_SC_MP_SC10 0x0728
373#define VPE_NLIN_ACC_INIT VPE_SC_MP_SC10
374
375#define VPE_SC_MP_SC11 0x072c
376#define VPE_NLIN_ACC_INC VPE_SC_MP_SC11
377
378#define VPE_SC_MP_SC12 0x0730
379#define VPE_COL_ACC_OFFSET_MASK 0x01ffffff
380#define VPE_COL_ACC_OFFSET_SHIFT 0
381
382#define VPE_SC_MP_SC13 0x0734
383#define VPE_SC_FACTOR_RAV_MASK 0x03ff
384#define VPE_SC_FACTOR_RAV_SHIFT 0
385#define VPE_CHROMA_INTP_THR_MASK 0x03ff
386#define VPE_CHROMA_INTP_THR_SHIFT 12
387#define VPE_DELTA_CHROMA_THR_MASK 0x0f
388#define VPE_DELTA_CHROMA_THR_SHIFT 24
389
390#define VPE_SC_MP_SC17 0x0744
391#define VPE_EV_THR_MASK 0x03ff
392#define VPE_EV_THR_SHIFT 12
393#define VPE_DELTA_LUMA_THR_MASK 0x0f
394#define VPE_DELTA_LUMA_THR_SHIFT 24
395#define VPE_DELTA_EV_THR_MASK 0x0f
396#define VPE_DELTA_EV_THR_SHIFT 28
397
398#define VPE_SC_MP_SC18 0x0748
399#define VPE_HS_FACTOR_MASK 0x03ff
400#define VPE_HS_FACTOR_SHIFT 0
401#define VPE_CONF_DEFAULT_MASK 0x01ff
402#define VPE_CONF_DEFAULT_SHIFT 16
403
404#define VPE_SC_MP_SC19 0x074c
405#define VPE_HPF_COEFF0_MASK 0xff
406#define VPE_HPF_COEFF0_SHIFT 0
407#define VPE_HPF_COEFF1_MASK 0xff
408#define VPE_HPF_COEFF1_SHIFT 8
409#define VPE_HPF_COEFF2_MASK 0xff
410#define VPE_HPF_COEFF2_SHIFT 16
411#define VPE_HPF_COEFF3_MASK 0xff
412#define VPE_HPF_COEFF3_SHIFT 23
413
414#define VPE_SC_MP_SC20 0x0750
415#define VPE_HPF_COEFF4_MASK 0xff
416#define VPE_HPF_COEFF4_SHIFT 0
417#define VPE_HPF_COEFF5_MASK 0xff
418#define VPE_HPF_COEFF5_SHIFT 8
419#define VPE_HPF_NORM_SHIFT_MASK 0x07
420#define VPE_HPF_NORM_SHIFT_SHIFT 16
421#define VPE_NL_LIMIT_MASK 0x1ff
422#define VPE_NL_LIMIT_SHIFT 20
423
424#define VPE_SC_MP_SC21 0x0754
425#define VPE_NL_LO_THR_MASK 0x01ff
426#define VPE_NL_LO_THR_SHIFT 0
427#define VPE_NL_LO_SLOPE_MASK 0xff
428#define VPE_NL_LO_SLOPE_SHIFT 16
429
430#define VPE_SC_MP_SC22 0x0758
431#define VPE_NL_HI_THR_MASK 0x01ff
432#define VPE_NL_HI_THR_SHIFT 0
433#define VPE_NL_HI_SLOPE_SH_MASK 0x07
434#define VPE_NL_HI_SLOPE_SH_SHIFT 16
435
436#define VPE_SC_MP_SC23 0x075c
437#define VPE_GRADIENT_THR_MASK 0x07ff
438#define VPE_GRADIENT_THR_SHIFT 0
439#define VPE_GRADIENT_THR_RANGE_MASK 0x0f
440#define VPE_GRADIENT_THR_RANGE_SHIFT 12
441#define VPE_MIN_GY_THR_MASK 0xff
442#define VPE_MIN_GY_THR_SHIFT 16
443#define VPE_MIN_GY_THR_RANGE_MASK 0x0f
444#define VPE_MIN_GY_THR_RANGE_SHIFT 28
445
446#define VPE_SC_MP_SC24 0x0760
447#define VPE_ORG_H_MASK 0x07ff
448#define VPE_ORG_H_SHIFT 0
449#define VPE_ORG_W_MASK 0x07ff
450#define VPE_ORG_W_SHIFT 16
451
452#define VPE_SC_MP_SC25 0x0764
453#define VPE_OFF_H_MASK 0x07ff
454#define VPE_OFF_H_SHIFT 0
455#define VPE_OFF_W_MASK 0x07ff
456#define VPE_OFF_W_SHIFT 16
457
458/* VPE color space converter regs */
459#define VPE_CSC_CSC00 0x5700
460#define VPE_CSC_A0_MASK 0x1fff
461#define VPE_CSC_A0_SHIFT 0
462#define VPE_CSC_B0_MASK 0x1fff
463#define VPE_CSC_B0_SHIFT 16
464
465#define VPE_CSC_CSC01 0x5704
466#define VPE_CSC_C0_MASK 0x1fff
467#define VPE_CSC_C0_SHIFT 0
468#define VPE_CSC_A1_MASK 0x1fff
469#define VPE_CSC_A1_SHIFT 16
470
471#define VPE_CSC_CSC02 0x5708
472#define VPE_CSC_B1_MASK 0x1fff
473#define VPE_CSC_B1_SHIFT 0
474#define VPE_CSC_C1_MASK 0x1fff
475#define VPE_CSC_C1_SHIFT 16
476
477#define VPE_CSC_CSC03 0x570c
478#define VPE_CSC_A2_MASK 0x1fff
479#define VPE_CSC_A2_SHIFT 0
480#define VPE_CSC_B2_MASK 0x1fff
481#define VPE_CSC_B2_SHIFT 16
482
483#define VPE_CSC_CSC04 0x5710
484#define VPE_CSC_C2_MASK 0x1fff
485#define VPE_CSC_C2_SHIFT 0
486#define VPE_CSC_D0_MASK 0x0fff
487#define VPE_CSC_D0_SHIFT 16
488
489#define VPE_CSC_CSC05 0x5714
490#define VPE_CSC_D1_MASK 0x0fff
491#define VPE_CSC_D1_SHIFT 0
492#define VPE_CSC_D2_MASK 0x0fff
493#define VPE_CSC_D2_SHIFT 16
494#define VPE_CSC_BYPASS (1 << 28)
495
496#endif
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
index b557caf5b1a4..6a74ce040d28 100644
--- a/drivers/media/platform/timblogiw.c
+++ b/drivers/media/platform/timblogiw.c
@@ -403,7 +403,7 @@ static int timblogiw_s_input(struct file *file, void *priv, unsigned int input)
403 return 0; 403 return 0;
404} 404}
405 405
406static int timblogiw_streamon(struct file *file, void *priv, unsigned int type) 406static int timblogiw_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
407{ 407{
408 struct video_device *vdev = video_devdata(file); 408 struct video_device *vdev = video_devdata(file);
409 struct timblogiw_fh *fh = priv; 409 struct timblogiw_fh *fh = priv;
@@ -420,7 +420,7 @@ static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
420} 420}
421 421
422static int timblogiw_streamoff(struct file *file, void *priv, 422static int timblogiw_streamoff(struct file *file, void *priv,
423 unsigned int type) 423 enum v4l2_buf_type type)
424{ 424{
425 struct video_device *vdev = video_devdata(file); 425 struct video_device *vdev = video_devdata(file);
426 struct timblogiw_fh *fh = priv; 426 struct timblogiw_fh *fh = priv;