aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArchit Taneja <archit@ti.com>2013-10-16 01:36:47 -0400
committerMauro Carvalho Chehab <m.chehab@samsung.com>2013-10-28 13:17:30 -0400
commit4571912743ac6a04a6644e5a292bb9876bb5329b (patch)
tree6fb0b3c6ccd39e6c60336e7410d920eeeec593c8
parent213b8ee4001895dd60910c440f76682fb881b5cc (diff)
[media] v4l: ti-vpe: Add VPE mem to mem driver
VPE is a block which consists of a single memory to memory path which can perform chrominance up/down sampling, de-interlacing, scaling, and color space conversion of raster or tiled YUV420 coplanar, YUV422 coplanar or YUV422 interleaved video formats. We create a mem2mem driver based primarily on the mem2mem-testdev example. The de-interlacer, scaler and color space converter are all bypassed for now to keep the driver simple. Chroma up/down sampler blocks are implemented, so conversion beteen different YUV formats is possible. Each mem2mem context allocates a buffer for VPE MMR values which it will use when it gets access to the VPE HW via the mem2mem queue, it also allocates a VPDMA descriptor list to which configuration and data descriptors are added. Based on the information received via v4l2 ioctls for the source and destination queues, the driver configures the values for the MMRs, and stores them in the buffer. There are also some VPDMA parameters like frame start and line mode which needs to be configured, these are configured by direct register writes via the VPDMA helper functions. The driver's device_run() mem2mem op will add each descriptor based on how the source and destination queues are set up for the given ctx, once the list is prepared, it's submitted to VPDMA, these descriptors when parsed by VPDMA will upload MMR registers, start DMA of video buffers on the various input and output clients/ports. When the list is parsed completely(and the DMAs on all the output ports done), an interrupt is generated which we use to notify that the source and destination buffers are done. The rest of the driver is quite similar to other mem2mem drivers, we use the multiplane v4l2 ioctls as the HW support coplanar formats. Signed-off-by: Archit Taneja <archit@ti.com> Acked-by: Hans Verkuil <hans.verkuil@cisco.com> Signed-off-by: Kamil Debski <k.debski@samsung.com> Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
-rw-r--r--drivers/media/platform/Kconfig16
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/ti-vpe/Makefile5
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c1775
-rw-r--r--drivers/media/platform/ti-vpe/vpe_regs.h496
-rw-r--r--include/uapi/linux/v4l2-controls.h4
6 files changed, 2298 insertions, 0 deletions
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 29acc2d2aee6..2405ef7c6604 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -220,6 +220,22 @@ config VIDEO_RENESAS_VSP1
220 To compile this driver as a module, choose M here: the module 220 To compile this driver as a module, choose M here: the module
221 will be called vsp1. 221 will be called vsp1.
222 222
223config VIDEO_TI_VPE
224 tristate "TI VPE (Video Processing Engine) driver"
225 depends on VIDEO_DEV && VIDEO_V4L2 && SOC_DRA7XX
226 select VIDEOBUF2_DMA_CONTIG
227 select V4L2_MEM2MEM_DEV
228 default n
229 ---help---
230 Support for the TI VPE(Video Processing Engine) block
231 found on DRA7XX SoC.
232
233config VIDEO_TI_VPE_DEBUG
234 bool "VPE debug messages"
235 depends on VIDEO_TI_VPE
236 ---help---
237 Enable debug messages on VPE driver.
238
223endif # V4L_MEM2MEM_DRIVERS 239endif # V4L_MEM2MEM_DRIVERS
224 240
225menuconfig V4L_TEST_DRIVERS 241menuconfig V4L_TEST_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 4e4da482c522..1348ba1faf92 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -22,6 +22,8 @@ obj-$(CONFIG_VIDEO_VIVI) += vivi.o
22 22
23obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o 23obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
24 24
25obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
26
25obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o 27obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
26obj-$(CONFIG_VIDEO_CODA) += coda.o 28obj-$(CONFIG_VIDEO_CODA) += coda.o
27 29
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
new file mode 100644
index 000000000000..cbf0a806ba1d
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
2
3ti-vpe-y := vpe.o vpdma.o
4
5ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
new file mode 100644
index 000000000000..3bd9ca658b54
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -0,0 +1,1775 @@
1/*
2 * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
3 *
4 * Copyright (c) 2013 Texas Instruments Inc.
5 * David Griego, <dagriego@biglakesoftware.com>
6 * Dale Farnsworth, <dale@farnsworth.org>
7 * Archit Taneja, <archit@ti.com>
8 *
9 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
10 * Pawel Osciak, <pawel@osciak.com>
11 * Marek Szyprowski, <m.szyprowski@samsung.com>
12 *
13 * Based on the virtual v4l2-mem2mem example device
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License version 2 as published by
17 * the Free Software Foundation
18 */
19
20#include <linux/delay.h>
21#include <linux/dma-mapping.h>
22#include <linux/err.h>
23#include <linux/fs.h>
24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/ioctl.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/pm_runtime.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/videodev2.h>
33
34#include <media/v4l2-common.h>
35#include <media/v4l2-ctrls.h>
36#include <media/v4l2-device.h>
37#include <media/v4l2-event.h>
38#include <media/v4l2-ioctl.h>
39#include <media/v4l2-mem2mem.h>
40#include <media/videobuf2-core.h>
41#include <media/videobuf2-dma-contig.h>
42
43#include "vpdma.h"
44#include "vpe_regs.h"
45
46#define VPE_MODULE_NAME "vpe"
47
48/* minimum and maximum frame sizes */
49#define MIN_W 128
50#define MIN_H 128
51#define MAX_W 1920
52#define MAX_H 1080
53
54/* required alignments */
55#define S_ALIGN 0 /* multiple of 1 */
56#define H_ALIGN 1 /* multiple of 2 */
57#define W_ALIGN 1 /* multiple of 2 */
58
59/* multiple of 128 bits, line stride, 16 bytes */
60#define L_ALIGN 4
61
62/* flags that indicate a format can be used for capture/output */
63#define VPE_FMT_TYPE_CAPTURE (1 << 0)
64#define VPE_FMT_TYPE_OUTPUT (1 << 1)
65
66/* used as plane indices */
67#define VPE_MAX_PLANES 2
68#define VPE_LUMA 0
69#define VPE_CHROMA 1
70
71/* per m2m context info */
72#define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
73
74/*
75 * each VPE context can need up to 3 config desciptors, 7 input descriptors,
76 * 3 output descriptors, and 10 control descriptors
77 */
78#define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
79 13 * VPDMA_CFD_CTD_DESC_SIZE)
80
81#define vpe_dbg(vpedev, fmt, arg...) \
82 dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
83#define vpe_err(vpedev, fmt, arg...) \
84 dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
85
86struct vpe_us_coeffs {
87 unsigned short anchor_fid0_c0;
88 unsigned short anchor_fid0_c1;
89 unsigned short anchor_fid0_c2;
90 unsigned short anchor_fid0_c3;
91 unsigned short interp_fid0_c0;
92 unsigned short interp_fid0_c1;
93 unsigned short interp_fid0_c2;
94 unsigned short interp_fid0_c3;
95 unsigned short anchor_fid1_c0;
96 unsigned short anchor_fid1_c1;
97 unsigned short anchor_fid1_c2;
98 unsigned short anchor_fid1_c3;
99 unsigned short interp_fid1_c0;
100 unsigned short interp_fid1_c1;
101 unsigned short interp_fid1_c2;
102 unsigned short interp_fid1_c3;
103};
104
105/*
106 * Default upsampler coefficients
107 */
108static const struct vpe_us_coeffs us_coeffs[] = {
109 {
110 /* Coefficients for progressive input */
111 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
112 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
113 },
114};
115
116/*
117 * The port_data structure contains per-port data.
118 */
119struct vpe_port_data {
120 enum vpdma_channel channel; /* VPDMA channel */
121 u8 vb_part; /* plane index for co-panar formats */
122};
123
124/*
125 * Define indices into the port_data tables
126 */
127#define VPE_PORT_LUMA1_IN 0
128#define VPE_PORT_CHROMA1_IN 1
129#define VPE_PORT_LUMA_OUT 8
130#define VPE_PORT_CHROMA_OUT 9
131#define VPE_PORT_RGB_OUT 10
132
133static const struct vpe_port_data port_data[11] = {
134 [VPE_PORT_LUMA1_IN] = {
135 .channel = VPE_CHAN_LUMA1_IN,
136 .vb_part = VPE_LUMA,
137 },
138 [VPE_PORT_CHROMA1_IN] = {
139 .channel = VPE_CHAN_CHROMA1_IN,
140 .vb_part = VPE_CHROMA,
141 },
142 [VPE_PORT_LUMA_OUT] = {
143 .channel = VPE_CHAN_LUMA_OUT,
144 .vb_part = VPE_LUMA,
145 },
146 [VPE_PORT_CHROMA_OUT] = {
147 .channel = VPE_CHAN_CHROMA_OUT,
148 .vb_part = VPE_CHROMA,
149 },
150 [VPE_PORT_RGB_OUT] = {
151 .channel = VPE_CHAN_RGB_OUT,
152 .vb_part = VPE_LUMA,
153 },
154};
155
156
157/* driver info for each of the supported video formats */
158struct vpe_fmt {
159 char *name; /* human-readable name */
160 u32 fourcc; /* standard format identifier */
161 u8 types; /* CAPTURE and/or OUTPUT */
162 u8 coplanar; /* set for unpacked Luma and Chroma */
163 /* vpdma format info for each plane */
164 struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES];
165};
166
167static struct vpe_fmt vpe_formats[] = {
168 {
169 .name = "YUV 422 co-planar",
170 .fourcc = V4L2_PIX_FMT_NV16,
171 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
172 .coplanar = 1,
173 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444],
174 &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444],
175 },
176 },
177 {
178 .name = "YUV 420 co-planar",
179 .fourcc = V4L2_PIX_FMT_NV12,
180 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
181 .coplanar = 1,
182 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
183 &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
184 },
185 },
186 {
187 .name = "YUYV 422 packed",
188 .fourcc = V4L2_PIX_FMT_YUYV,
189 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
190 .coplanar = 0,
191 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422],
192 },
193 },
194 {
195 .name = "UYVY 422 packed",
196 .fourcc = V4L2_PIX_FMT_UYVY,
197 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
198 .coplanar = 0,
199 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
200 },
201 },
202};
203
204/*
205 * per-queue, driver-specific private data.
206 * there is one source queue and one destination queue for each m2m context.
207 */
208struct vpe_q_data {
209 unsigned int width; /* frame width */
210 unsigned int height; /* frame height */
211 unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
212 enum v4l2_colorspace colorspace;
213 unsigned int flags;
214 unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
215 struct v4l2_rect c_rect; /* crop/compose rectangle */
216 struct vpe_fmt *fmt; /* format info */
217};
218
219/* vpe_q_data flag bits */
220#define Q_DATA_FRAME_1D (1 << 0)
221#define Q_DATA_MODE_TILED (1 << 1)
222
223enum {
224 Q_DATA_SRC = 0,
225 Q_DATA_DST = 1,
226};
227
228/* find our format description corresponding to the passed v4l2_format */
229static struct vpe_fmt *find_format(struct v4l2_format *f)
230{
231 struct vpe_fmt *fmt;
232 unsigned int k;
233
234 for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
235 fmt = &vpe_formats[k];
236 if (fmt->fourcc == f->fmt.pix.pixelformat)
237 return fmt;
238 }
239
240 return NULL;
241}
242
243/*
244 * there is one vpe_dev structure in the driver, it is shared by
245 * all instances.
246 */
247struct vpe_dev {
248 struct v4l2_device v4l2_dev;
249 struct video_device vfd;
250 struct v4l2_m2m_dev *m2m_dev;
251
252 atomic_t num_instances; /* count of driver instances */
253 dma_addr_t loaded_mmrs; /* shadow mmrs in device */
254 struct mutex dev_mutex;
255 spinlock_t lock;
256
257 int irq;
258 void __iomem *base;
259
260 struct vb2_alloc_ctx *alloc_ctx;
261 struct vpdma_data *vpdma; /* vpdma data handle */
262};
263
264/*
265 * There is one vpe_ctx structure for each m2m context.
266 */
267struct vpe_ctx {
268 struct v4l2_fh fh;
269 struct vpe_dev *dev;
270 struct v4l2_m2m_ctx *m2m_ctx;
271 struct v4l2_ctrl_handler hdl;
272
273 unsigned int sequence; /* current frame/field seq */
274 unsigned int aborting; /* abort after next irq */
275
276 unsigned int bufs_per_job; /* input buffers per batch */
277 unsigned int bufs_completed; /* bufs done in this batch */
278
279 struct vpe_q_data q_data[2]; /* src & dst queue data */
280 struct vb2_buffer *src_vb;
281 struct vb2_buffer *dst_vb;
282
283 struct vpdma_buf mmr_adb; /* shadow reg addr/data block */
284 struct vpdma_desc_list desc_list; /* DMA descriptor list */
285
286 bool load_mmrs; /* have new shadow reg values */
287};
288
289
290/*
291 * M2M devices get 2 queues.
292 * Return the queue given the type.
293 */
294static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
295 enum v4l2_buf_type type)
296{
297 switch (type) {
298 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
299 return &ctx->q_data[Q_DATA_SRC];
300 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
301 return &ctx->q_data[Q_DATA_DST];
302 default:
303 BUG();
304 }
305 return NULL;
306}
307
308static u32 read_reg(struct vpe_dev *dev, int offset)
309{
310 return ioread32(dev->base + offset);
311}
312
313static void write_reg(struct vpe_dev *dev, int offset, u32 value)
314{
315 iowrite32(value, dev->base + offset);
316}
317
318/* register field read/write helpers */
319static int get_field(u32 value, u32 mask, int shift)
320{
321 return (value & (mask << shift)) >> shift;
322}
323
324static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift)
325{
326 return get_field(read_reg(dev, offset), mask, shift);
327}
328
329static void write_field(u32 *valp, u32 field, u32 mask, int shift)
330{
331 u32 val = *valp;
332
333 val &= ~(mask << shift);
334 val |= (field & mask) << shift;
335 *valp = val;
336}
337
338static void write_field_reg(struct vpe_dev *dev, int offset, u32 field,
339 u32 mask, int shift)
340{
341 u32 val = read_reg(dev, offset);
342
343 write_field(&val, field, mask, shift);
344
345 write_reg(dev, offset, val);
346}
347
348/*
349 * DMA address/data block for the shadow registers
350 */
351struct vpe_mmr_adb {
352 struct vpdma_adb_hdr out_fmt_hdr;
353 u32 out_fmt_reg[1];
354 u32 out_fmt_pad[3];
355 struct vpdma_adb_hdr us1_hdr;
356 u32 us1_regs[8];
357 struct vpdma_adb_hdr us2_hdr;
358 u32 us2_regs[8];
359 struct vpdma_adb_hdr us3_hdr;
360 u32 us3_regs[8];
361 struct vpdma_adb_hdr dei_hdr;
362 u32 dei_regs[1];
363 u32 dei_pad[3];
364 struct vpdma_adb_hdr sc_hdr;
365 u32 sc_regs[1];
366 u32 sc_pad[3];
367 struct vpdma_adb_hdr csc_hdr;
368 u32 csc_regs[6];
369 u32 csc_pad[2];
370};
371
372#define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
373 VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
374/*
375 * Set the headers for all of the address/data block structures.
376 */
377static void init_adb_hdrs(struct vpe_ctx *ctx)
378{
379 VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT);
380 VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0);
381 VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
382 VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
383 VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
384 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr, sc_regs, VPE_SC_MP_SC0);
385 VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, VPE_CSC_CSC00);
386};
387
388/*
389 * Enable or disable the VPE clocks
390 */
391static void vpe_set_clock_enable(struct vpe_dev *dev, bool on)
392{
393 u32 val = 0;
394
395 if (on)
396 val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE;
397 write_reg(dev, VPE_CLK_ENABLE, val);
398}
399
400static void vpe_top_reset(struct vpe_dev *dev)
401{
402
403 write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK,
404 VPE_DATA_PATH_CLK_RESET_SHIFT);
405
406 usleep_range(100, 150);
407
408 write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK,
409 VPE_DATA_PATH_CLK_RESET_SHIFT);
410}
411
412static void vpe_top_vpdma_reset(struct vpe_dev *dev)
413{
414 write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK,
415 VPE_VPDMA_CLK_RESET_SHIFT);
416
417 usleep_range(100, 150);
418
419 write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK,
420 VPE_VPDMA_CLK_RESET_SHIFT);
421}
422
423/*
424 * Load the correct of upsampler coefficients into the shadow MMRs
425 */
426static void set_us_coefficients(struct vpe_ctx *ctx)
427{
428 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
429 u32 *us1_reg = &mmr_adb->us1_regs[0];
430 u32 *us2_reg = &mmr_adb->us2_regs[0];
431 u32 *us3_reg = &mmr_adb->us3_regs[0];
432 const unsigned short *cp, *end_cp;
433
434 cp = &us_coeffs[0].anchor_fid0_c0;
435
436 end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
437
438 while (cp < end_cp) {
439 write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT);
440 write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT);
441 *us2_reg++ = *us1_reg;
442 *us3_reg++ = *us1_reg++;
443 }
444 ctx->load_mmrs = true;
445}
446
447/*
448 * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
449 */
450static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
451{
452 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
453 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
454 u32 *us1_reg0 = &mmr_adb->us1_regs[0];
455 u32 *us2_reg0 = &mmr_adb->us2_regs[0];
456 u32 *us3_reg0 = &mmr_adb->us3_regs[0];
457 int line_mode = 1;
458 int cfg_mode = 1;
459
460 /*
461 * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
462 * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
463 */
464
465 if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
466 cfg_mode = 0;
467 line_mode = 0; /* double lines to line buffer */
468 }
469
470 write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
471 write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
472 write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
473
474 /* regs for now */
475 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
476
477 /* frame start for input luma */
478 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
479 VPE_CHAN_LUMA1_IN);
480
481 /* frame start for input chroma */
482 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
483 VPE_CHAN_CHROMA1_IN);
484
485 ctx->load_mmrs = true;
486}
487
488/*
489 * Set the shadow registers that are modified when the source
490 * format changes.
491 */
492static void set_src_registers(struct vpe_ctx *ctx)
493{
494 set_us_coefficients(ctx);
495}
496
497/*
498 * Set the shadow registers that are modified when the destination
499 * format changes.
500 */
501static void set_dst_registers(struct vpe_ctx *ctx)
502{
503 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
504 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
505 u32 val = 0;
506
507 /* select RGB path when color space conversion is supported in future */
508 if (fmt->fourcc == V4L2_PIX_FMT_RGB24)
509 val |= VPE_RGB_OUT_SELECT | VPE_CSC_SRC_DEI_SCALER;
510 else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
511 val |= VPE_COLOR_SEPARATE_422;
512
513 /* The source of CHR_DS is always the scaler, whether it's used or not */
514 val |= VPE_DS_SRC_DEI_SCALER;
515
516 if (fmt->fourcc != V4L2_PIX_FMT_NV12)
517 val |= VPE_DS_BYPASS;
518
519 mmr_adb->out_fmt_reg[0] = val;
520
521 ctx->load_mmrs = true;
522}
523
524/*
525 * Set the de-interlacer shadow register values
526 */
527static void set_dei_regs_bypass(struct vpe_ctx *ctx)
528{
529 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
530 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
531 unsigned int src_h = s_q_data->c_rect.height;
532 unsigned int src_w = s_q_data->c_rect.width;
533 u32 *dei_mmr0 = &mmr_adb->dei_regs[0];
534 u32 val = 0;
535
536 /*
537 * according to TRM, we should set DEI in progressive bypass mode when
538 * the input content is progressive, however, DEI is bypassed correctly
539 * for both progressive and interlace content in interlace bypass mode.
540 * It has been recommended not to use progressive bypass mode.
541 */
542 val = VPE_DEI_INTERLACE_BYPASS;
543
544 val |= (src_h << VPE_DEI_HEIGHT_SHIFT) |
545 (src_w << VPE_DEI_WIDTH_SHIFT) |
546 VPE_DEI_FIELD_FLUSH;
547
548 *dei_mmr0 = val;
549
550 ctx->load_mmrs = true;
551}
552
553static void set_csc_coeff_bypass(struct vpe_ctx *ctx)
554{
555 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
556 u32 *shadow_csc_reg5 = &mmr_adb->csc_regs[5];
557
558 *shadow_csc_reg5 |= VPE_CSC_BYPASS;
559
560 ctx->load_mmrs = true;
561}
562
563static void set_sc_regs_bypass(struct vpe_ctx *ctx)
564{
565 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
566 u32 *sc_reg0 = &mmr_adb->sc_regs[0];
567 u32 val = 0;
568
569 val |= VPE_SC_BYPASS;
570 *sc_reg0 = val;
571
572 ctx->load_mmrs = true;
573}
574
575/*
576 * Set the shadow registers whose values are modified when either the
577 * source or destination format is changed.
578 */
579static int set_srcdst_params(struct vpe_ctx *ctx)
580{
581 ctx->sequence = 0;
582
583 set_cfg_and_line_modes(ctx);
584 set_dei_regs_bypass(ctx);
585 set_csc_coeff_bypass(ctx);
586 set_sc_regs_bypass(ctx);
587
588 return 0;
589}
590
591/*
592 * Return the vpe_ctx structure for a given struct file
593 */
594static struct vpe_ctx *file2ctx(struct file *file)
595{
596 return container_of(file->private_data, struct vpe_ctx, fh);
597}
598
599/*
600 * mem2mem callbacks
601 */
602
603/**
604 * job_ready() - check whether an instance is ready to be scheduled to run
605 */
606static int job_ready(void *priv)
607{
608 struct vpe_ctx *ctx = priv;
609 int needed = ctx->bufs_per_job;
610
611 if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
612 return 0;
613
614 return 1;
615}
616
617static void job_abort(void *priv)
618{
619 struct vpe_ctx *ctx = priv;
620
621 /* Will cancel the transaction in the next interrupt handler */
622 ctx->aborting = 1;
623}
624
625/*
626 * Lock access to the device
627 */
628static void vpe_lock(void *priv)
629{
630 struct vpe_ctx *ctx = priv;
631 struct vpe_dev *dev = ctx->dev;
632 mutex_lock(&dev->dev_mutex);
633}
634
635static void vpe_unlock(void *priv)
636{
637 struct vpe_ctx *ctx = priv;
638 struct vpe_dev *dev = ctx->dev;
639 mutex_unlock(&dev->dev_mutex);
640}
641
642static void vpe_dump_regs(struct vpe_dev *dev)
643{
644#define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
645
646 vpe_dbg(dev, "VPE Registers:\n");
647
648 DUMPREG(PID);
649 DUMPREG(SYSCONFIG);
650 DUMPREG(INT0_STATUS0_RAW);
651 DUMPREG(INT0_STATUS0);
652 DUMPREG(INT0_ENABLE0);
653 DUMPREG(INT0_STATUS1_RAW);
654 DUMPREG(INT0_STATUS1);
655 DUMPREG(INT0_ENABLE1);
656 DUMPREG(CLK_ENABLE);
657 DUMPREG(CLK_RESET);
658 DUMPREG(CLK_FORMAT_SELECT);
659 DUMPREG(CLK_RANGE_MAP);
660 DUMPREG(US1_R0);
661 DUMPREG(US1_R1);
662 DUMPREG(US1_R2);
663 DUMPREG(US1_R3);
664 DUMPREG(US1_R4);
665 DUMPREG(US1_R5);
666 DUMPREG(US1_R6);
667 DUMPREG(US1_R7);
668 DUMPREG(US2_R0);
669 DUMPREG(US2_R1);
670 DUMPREG(US2_R2);
671 DUMPREG(US2_R3);
672 DUMPREG(US2_R4);
673 DUMPREG(US2_R5);
674 DUMPREG(US2_R6);
675 DUMPREG(US2_R7);
676 DUMPREG(US3_R0);
677 DUMPREG(US3_R1);
678 DUMPREG(US3_R2);
679 DUMPREG(US3_R3);
680 DUMPREG(US3_R4);
681 DUMPREG(US3_R5);
682 DUMPREG(US3_R6);
683 DUMPREG(US3_R7);
684 DUMPREG(DEI_FRAME_SIZE);
685 DUMPREG(MDT_BYPASS);
686 DUMPREG(MDT_SF_THRESHOLD);
687 DUMPREG(EDI_CONFIG);
688 DUMPREG(DEI_EDI_LUT_R0);
689 DUMPREG(DEI_EDI_LUT_R1);
690 DUMPREG(DEI_EDI_LUT_R2);
691 DUMPREG(DEI_EDI_LUT_R3);
692 DUMPREG(DEI_FMD_WINDOW_R0);
693 DUMPREG(DEI_FMD_WINDOW_R1);
694 DUMPREG(DEI_FMD_CONTROL_R0);
695 DUMPREG(DEI_FMD_CONTROL_R1);
696 DUMPREG(DEI_FMD_STATUS_R0);
697 DUMPREG(DEI_FMD_STATUS_R1);
698 DUMPREG(DEI_FMD_STATUS_R2);
699 DUMPREG(SC_MP_SC0);
700 DUMPREG(SC_MP_SC1);
701 DUMPREG(SC_MP_SC2);
702 DUMPREG(SC_MP_SC3);
703 DUMPREG(SC_MP_SC4);
704 DUMPREG(SC_MP_SC5);
705 DUMPREG(SC_MP_SC6);
706 DUMPREG(SC_MP_SC8);
707 DUMPREG(SC_MP_SC9);
708 DUMPREG(SC_MP_SC10);
709 DUMPREG(SC_MP_SC11);
710 DUMPREG(SC_MP_SC12);
711 DUMPREG(SC_MP_SC13);
712 DUMPREG(SC_MP_SC17);
713 DUMPREG(SC_MP_SC18);
714 DUMPREG(SC_MP_SC19);
715 DUMPREG(SC_MP_SC20);
716 DUMPREG(SC_MP_SC21);
717 DUMPREG(SC_MP_SC22);
718 DUMPREG(SC_MP_SC23);
719 DUMPREG(SC_MP_SC24);
720 DUMPREG(SC_MP_SC25);
721 DUMPREG(CSC_CSC00);
722 DUMPREG(CSC_CSC01);
723 DUMPREG(CSC_CSC02);
724 DUMPREG(CSC_CSC03);
725 DUMPREG(CSC_CSC04);
726 DUMPREG(CSC_CSC05);
727#undef DUMPREG
728}
729
730static void add_out_dtd(struct vpe_ctx *ctx, int port)
731{
732 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
733 const struct vpe_port_data *p_data = &port_data[port];
734 struct vb2_buffer *vb = ctx->dst_vb;
735 struct v4l2_rect *c_rect = &q_data->c_rect;
736 struct vpe_fmt *fmt = q_data->fmt;
737 const struct vpdma_data_format *vpdma_fmt;
738 int plane = fmt->coplanar ? p_data->vb_part : 0;
739 dma_addr_t dma_addr;
740 u32 flags = 0;
741
742 vpdma_fmt = fmt->vpdma_fmt[plane];
743 dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
744 if (!dma_addr) {
745 vpe_err(ctx->dev,
746 "acquiring output buffer(%d) dma_addr failed\n",
747 port);
748 return;
749 }
750
751 if (q_data->flags & Q_DATA_FRAME_1D)
752 flags |= VPDMA_DATA_FRAME_1D;
753 if (q_data->flags & Q_DATA_MODE_TILED)
754 flags |= VPDMA_DATA_MODE_TILED;
755
756 vpdma_add_out_dtd(&ctx->desc_list, c_rect, vpdma_fmt, dma_addr,
757 p_data->channel, flags);
758}
759
760static void add_in_dtd(struct vpe_ctx *ctx, int port)
761{
762 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
763 const struct vpe_port_data *p_data = &port_data[port];
764 struct vb2_buffer *vb = ctx->src_vb;
765 struct v4l2_rect *c_rect = &q_data->c_rect;
766 struct vpe_fmt *fmt = q_data->fmt;
767 const struct vpdma_data_format *vpdma_fmt;
768 int plane = fmt->coplanar ? p_data->vb_part : 0;
769 int field = 0;
770 dma_addr_t dma_addr;
771 u32 flags = 0;
772
773 vpdma_fmt = fmt->vpdma_fmt[plane];
774
775 dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
776 if (!dma_addr) {
777 vpe_err(ctx->dev,
778 "acquiring input buffer(%d) dma_addr failed\n",
779 port);
780 return;
781 }
782
783 if (q_data->flags & Q_DATA_FRAME_1D)
784 flags |= VPDMA_DATA_FRAME_1D;
785 if (q_data->flags & Q_DATA_MODE_TILED)
786 flags |= VPDMA_DATA_MODE_TILED;
787
788 vpdma_add_in_dtd(&ctx->desc_list, q_data->width, q_data->height,
789 c_rect, vpdma_fmt, dma_addr, p_data->channel, field, flags);
790}
791
792/*
793 * Enable the expected IRQ sources
794 */
795static void enable_irqs(struct vpe_ctx *ctx)
796{
797 write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE);
798 write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DS1_UV_ERROR_INT);
799
800 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
801}
802
803static void disable_irqs(struct vpe_ctx *ctx)
804{
805 write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
806 write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
807
808 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
809}
810
811/* device_run() - prepares and starts the device
812 *
813 * This function is only called when both the source and destination
814 * buffers are in place.
815 */
816static void device_run(void *priv)
817{
818 struct vpe_ctx *ctx = priv;
819 struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
820
821 ctx->src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
822 WARN_ON(ctx->src_vb == NULL);
823 ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
824 WARN_ON(ctx->dst_vb == NULL);
825
826 /* config descriptors */
827 if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
828 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
829 vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
830 ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
831 ctx->load_mmrs = false;
832 }
833
834 add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
835 if (d_q_data->fmt->coplanar)
836 add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
837
838 add_in_dtd(ctx, VPE_PORT_LUMA1_IN);
839 add_in_dtd(ctx, VPE_PORT_CHROMA1_IN);
840
841 /* sync on channel control descriptors for input ports */
842 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN);
843 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN);
844
845 /* sync on channel control descriptors for output ports */
846 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT);
847 if (d_q_data->fmt->coplanar)
848 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT);
849
850 enable_irqs(ctx);
851
852 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
853 vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
854}
855
856static void ds1_uv_error(struct vpe_ctx *ctx)
857{
858 dev_warn(ctx->dev->v4l2_dev.dev,
859 "received downsampler error interrupt\n");
860}
861
862static irqreturn_t vpe_irq(int irq_vpe, void *data)
863{
864 struct vpe_dev *dev = (struct vpe_dev *)data;
865 struct vpe_ctx *ctx;
866 struct vb2_buffer *s_vb, *d_vb;
867 struct v4l2_buffer *s_buf, *d_buf;
868 unsigned long flags;
869 u32 irqst0, irqst1;
870
871 irqst0 = read_reg(dev, VPE_INT0_STATUS0);
872 if (irqst0) {
873 write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0);
874 vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0);
875 }
876
877 irqst1 = read_reg(dev, VPE_INT0_STATUS1);
878 if (irqst1) {
879 write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1);
880 vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1);
881 }
882
883 ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
884 if (!ctx) {
885 vpe_err(dev, "instance released before end of transaction\n");
886 goto handled;
887 }
888
889 if (irqst1 & VPE_DS1_UV_ERROR_INT) {
890 irqst1 &= ~VPE_DS1_UV_ERROR_INT;
891 ds1_uv_error(ctx);
892 }
893
894 if (irqst0) {
895 if (irqst0 & VPE_INT0_LIST0_COMPLETE)
896 vpdma_clear_list_stat(ctx->dev->vpdma);
897
898 irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
899 }
900
901 if (irqst0 | irqst1) {
902 dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: "
903 "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
904 irqst0, irqst1);
905 }
906
907 disable_irqs(ctx);
908
909 vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
910 vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
911
912 vpdma_reset_desc_list(&ctx->desc_list);
913
914 if (ctx->aborting)
915 goto finished;
916
917 s_vb = ctx->src_vb;
918 d_vb = ctx->dst_vb;
919 s_buf = &s_vb->v4l2_buf;
920 d_buf = &d_vb->v4l2_buf;
921
922 d_buf->timestamp = s_buf->timestamp;
923 if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) {
924 d_buf->flags |= V4L2_BUF_FLAG_TIMECODE;
925 d_buf->timecode = s_buf->timecode;
926 }
927
928 d_buf->sequence = ctx->sequence;
929
930 ctx->sequence++;
931
932 spin_lock_irqsave(&dev->lock, flags);
933 v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
934 v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
935 spin_unlock_irqrestore(&dev->lock, flags);
936
937 ctx->bufs_completed++;
938 if (ctx->bufs_completed < ctx->bufs_per_job) {
939 device_run(ctx);
940 goto handled;
941 }
942
943finished:
944 vpe_dbg(ctx->dev, "finishing transaction\n");
945 ctx->bufs_completed = 0;
946 v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
947handled:
948 return IRQ_HANDLED;
949}
950
951/*
952 * video ioctls
953 */
954static int vpe_querycap(struct file *file, void *priv,
955 struct v4l2_capability *cap)
956{
957 strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
958 strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
959 strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info));
960 cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
961 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
962 return 0;
963}
964
965static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
966{
967 int i, index;
968 struct vpe_fmt *fmt = NULL;
969
970 index = 0;
971 for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) {
972 if (vpe_formats[i].types & type) {
973 if (index == f->index) {
974 fmt = &vpe_formats[i];
975 break;
976 }
977 index++;
978 }
979 }
980
981 if (!fmt)
982 return -EINVAL;
983
984 strncpy(f->description, fmt->name, sizeof(f->description) - 1);
985 f->pixelformat = fmt->fourcc;
986 return 0;
987}
988
989static int vpe_enum_fmt(struct file *file, void *priv,
990 struct v4l2_fmtdesc *f)
991{
992 if (V4L2_TYPE_IS_OUTPUT(f->type))
993 return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT);
994
995 return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE);
996}
997
998static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
999{
1000 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1001 struct vpe_ctx *ctx = file2ctx(file);
1002 struct vb2_queue *vq;
1003 struct vpe_q_data *q_data;
1004 int i;
1005
1006 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
1007 if (!vq)
1008 return -EINVAL;
1009
1010 q_data = get_q_data(ctx, f->type);
1011
1012 pix->width = q_data->width;
1013 pix->height = q_data->height;
1014 pix->pixelformat = q_data->fmt->fourcc;
1015
1016 if (V4L2_TYPE_IS_OUTPUT(f->type)) {
1017 pix->colorspace = q_data->colorspace;
1018 } else {
1019 struct vpe_q_data *s_q_data;
1020
1021 /* get colorspace from the source queue */
1022 s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
1023
1024 pix->colorspace = s_q_data->colorspace;
1025 }
1026
1027 pix->num_planes = q_data->fmt->coplanar ? 2 : 1;
1028
1029 for (i = 0; i < pix->num_planes; i++) {
1030 pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1031 pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1032 }
1033
1034 return 0;
1035}
1036
1037static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
1038 struct vpe_fmt *fmt, int type)
1039{
1040 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1041 struct v4l2_plane_pix_format *plane_fmt;
1042 int i;
1043
1044 if (!fmt || !(fmt->types & type)) {
1045 vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
1046 pix->pixelformat);
1047 return -EINVAL;
1048 }
1049
1050 pix->field = V4L2_FIELD_NONE;
1051
1052 v4l_bound_align_image(&pix->width, MIN_W, MAX_W, W_ALIGN,
1053 &pix->height, MIN_H, MAX_H, H_ALIGN,
1054 S_ALIGN);
1055
1056 pix->num_planes = fmt->coplanar ? 2 : 1;
1057 pix->pixelformat = fmt->fourcc;
1058
1059 if (type == VPE_FMT_TYPE_CAPTURE) {
1060 struct vpe_q_data *s_q_data;
1061
1062 /* get colorspace from the source queue */
1063 s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
1064
1065 pix->colorspace = s_q_data->colorspace;
1066 } else {
1067 if (!pix->colorspace)
1068 pix->colorspace = V4L2_COLORSPACE_SMPTE240M;
1069 }
1070
1071 for (i = 0; i < pix->num_planes; i++) {
1072 int depth;
1073
1074 plane_fmt = &pix->plane_fmt[i];
1075 depth = fmt->vpdma_fmt[i]->depth;
1076
1077 if (i == VPE_LUMA)
1078 plane_fmt->bytesperline =
1079 round_up((pix->width * depth) >> 3,
1080 1 << L_ALIGN);
1081 else
1082 plane_fmt->bytesperline = pix->width;
1083
1084 plane_fmt->sizeimage =
1085 (pix->height * pix->width * depth) >> 3;
1086 }
1087
1088 return 0;
1089}
1090
1091static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
1092{
1093 struct vpe_ctx *ctx = file2ctx(file);
1094 struct vpe_fmt *fmt = find_format(f);
1095
1096 if (V4L2_TYPE_IS_OUTPUT(f->type))
1097 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT);
1098 else
1099 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE);
1100}
1101
1102static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
1103{
1104 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1105 struct v4l2_plane_pix_format *plane_fmt;
1106 struct vpe_q_data *q_data;
1107 struct vb2_queue *vq;
1108 int i;
1109
1110 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
1111 if (!vq)
1112 return -EINVAL;
1113
1114 if (vb2_is_busy(vq)) {
1115 vpe_err(ctx->dev, "queue busy\n");
1116 return -EBUSY;
1117 }
1118
1119 q_data = get_q_data(ctx, f->type);
1120 if (!q_data)
1121 return -EINVAL;
1122
1123 q_data->fmt = find_format(f);
1124 q_data->width = pix->width;
1125 q_data->height = pix->height;
1126 q_data->colorspace = pix->colorspace;
1127
1128 for (i = 0; i < pix->num_planes; i++) {
1129 plane_fmt = &pix->plane_fmt[i];
1130
1131 q_data->bytesperline[i] = plane_fmt->bytesperline;
1132 q_data->sizeimage[i] = plane_fmt->sizeimage;
1133 }
1134
1135 q_data->c_rect.left = 0;
1136 q_data->c_rect.top = 0;
1137 q_data->c_rect.width = q_data->width;
1138 q_data->c_rect.height = q_data->height;
1139
1140 vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
1141 f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
1142 q_data->bytesperline[VPE_LUMA]);
1143 if (q_data->fmt->coplanar)
1144 vpe_dbg(ctx->dev, " bpl_uv %d\n",
1145 q_data->bytesperline[VPE_CHROMA]);
1146
1147 return 0;
1148}
1149
1150static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
1151{
1152 int ret;
1153 struct vpe_ctx *ctx = file2ctx(file);
1154
1155 ret = vpe_try_fmt(file, priv, f);
1156 if (ret)
1157 return ret;
1158
1159 ret = __vpe_s_fmt(ctx, f);
1160 if (ret)
1161 return ret;
1162
1163 if (V4L2_TYPE_IS_OUTPUT(f->type))
1164 set_src_registers(ctx);
1165 else
1166 set_dst_registers(ctx);
1167
1168 return set_srcdst_params(ctx);
1169}
1170
1171static int vpe_reqbufs(struct file *file, void *priv,
1172 struct v4l2_requestbuffers *reqbufs)
1173{
1174 struct vpe_ctx *ctx = file2ctx(file);
1175
1176 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
1177}
1178
1179static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1180{
1181 struct vpe_ctx *ctx = file2ctx(file);
1182
1183 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
1184}
1185
1186static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1187{
1188 struct vpe_ctx *ctx = file2ctx(file);
1189
1190 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
1191}
1192
1193static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1194{
1195 struct vpe_ctx *ctx = file2ctx(file);
1196
1197 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
1198}
1199
1200static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
1201{
1202 struct vpe_ctx *ctx = file2ctx(file);
1203
1204 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
1205}
1206
1207static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type)
1208{
1209 struct vpe_ctx *ctx = file2ctx(file);
1210
1211 vpe_dump_regs(ctx->dev);
1212 vpdma_dump_regs(ctx->dev->vpdma);
1213
1214 return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
1215}
1216
1217/*
1218 * defines number of buffers/frames a context can process with VPE before
1219 * switching to a different context. default value is 1 buffer per context
1220 */
1221#define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0)
1222
1223static int vpe_s_ctrl(struct v4l2_ctrl *ctrl)
1224{
1225 struct vpe_ctx *ctx =
1226 container_of(ctrl->handler, struct vpe_ctx, hdl);
1227
1228 switch (ctrl->id) {
1229 case V4L2_CID_VPE_BUFS_PER_JOB:
1230 ctx->bufs_per_job = ctrl->val;
1231 break;
1232
1233 default:
1234 vpe_err(ctx->dev, "Invalid control\n");
1235 return -EINVAL;
1236 }
1237
1238 return 0;
1239}
1240
1241static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
1242 .s_ctrl = vpe_s_ctrl,
1243};
1244
1245static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
1246 .vidioc_querycap = vpe_querycap,
1247
1248 .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
1249 .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt,
1250 .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt,
1251 .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt,
1252
1253 .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
1254 .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt,
1255 .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt,
1256 .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt,
1257
1258 .vidioc_reqbufs = vpe_reqbufs,
1259 .vidioc_querybuf = vpe_querybuf,
1260
1261 .vidioc_qbuf = vpe_qbuf,
1262 .vidioc_dqbuf = vpe_dqbuf,
1263
1264 .vidioc_streamon = vpe_streamon,
1265 .vidioc_streamoff = vpe_streamoff,
1266 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1267 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1268};
1269
1270/*
1271 * Queue operations
1272 */
1273static int vpe_queue_setup(struct vb2_queue *vq,
1274 const struct v4l2_format *fmt,
1275 unsigned int *nbuffers, unsigned int *nplanes,
1276 unsigned int sizes[], void *alloc_ctxs[])
1277{
1278 int i;
1279 struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
1280 struct vpe_q_data *q_data;
1281
1282 q_data = get_q_data(ctx, vq->type);
1283
1284 *nplanes = q_data->fmt->coplanar ? 2 : 1;
1285
1286 for (i = 0; i < *nplanes; i++) {
1287 sizes[i] = q_data->sizeimage[i];
1288 alloc_ctxs[i] = ctx->dev->alloc_ctx;
1289 }
1290
1291 vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
1292 sizes[VPE_LUMA]);
1293 if (q_data->fmt->coplanar)
1294 vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
1295
1296 return 0;
1297}
1298
1299static int vpe_buf_prepare(struct vb2_buffer *vb)
1300{
1301 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1302 struct vpe_q_data *q_data;
1303 int i, num_planes;
1304
1305 vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
1306
1307 q_data = get_q_data(ctx, vb->vb2_queue->type);
1308 num_planes = q_data->fmt->coplanar ? 2 : 1;
1309
1310 for (i = 0; i < num_planes; i++) {
1311 if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
1312 vpe_err(ctx->dev,
1313 "data will not fit into plane (%lu < %lu)\n",
1314 vb2_plane_size(vb, i),
1315 (long) q_data->sizeimage[i]);
1316 return -EINVAL;
1317 }
1318 }
1319
1320 for (i = 0; i < num_planes; i++)
1321 vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
1322
1323 return 0;
1324}
1325
1326static void vpe_buf_queue(struct vb2_buffer *vb)
1327{
1328 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1329 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
1330}
1331
1332static void vpe_wait_prepare(struct vb2_queue *q)
1333{
1334 struct vpe_ctx *ctx = vb2_get_drv_priv(q);
1335 vpe_unlock(ctx);
1336}
1337
1338static void vpe_wait_finish(struct vb2_queue *q)
1339{
1340 struct vpe_ctx *ctx = vb2_get_drv_priv(q);
1341 vpe_lock(ctx);
1342}
1343
1344static struct vb2_ops vpe_qops = {
1345 .queue_setup = vpe_queue_setup,
1346 .buf_prepare = vpe_buf_prepare,
1347 .buf_queue = vpe_buf_queue,
1348 .wait_prepare = vpe_wait_prepare,
1349 .wait_finish = vpe_wait_finish,
1350};
1351
1352static int queue_init(void *priv, struct vb2_queue *src_vq,
1353 struct vb2_queue *dst_vq)
1354{
1355 struct vpe_ctx *ctx = priv;
1356 int ret;
1357
1358 memset(src_vq, 0, sizeof(*src_vq));
1359 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1360 src_vq->io_modes = VB2_MMAP;
1361 src_vq->drv_priv = ctx;
1362 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1363 src_vq->ops = &vpe_qops;
1364 src_vq->mem_ops = &vb2_dma_contig_memops;
1365 src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1366
1367 ret = vb2_queue_init(src_vq);
1368 if (ret)
1369 return ret;
1370
1371 memset(dst_vq, 0, sizeof(*dst_vq));
1372 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1373 dst_vq->io_modes = VB2_MMAP;
1374 dst_vq->drv_priv = ctx;
1375 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1376 dst_vq->ops = &vpe_qops;
1377 dst_vq->mem_ops = &vb2_dma_contig_memops;
1378 dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1379
1380 return vb2_queue_init(dst_vq);
1381}
1382
1383static const struct v4l2_ctrl_config vpe_bufs_per_job = {
1384 .ops = &vpe_ctrl_ops,
1385 .id = V4L2_CID_VPE_BUFS_PER_JOB,
1386 .name = "Buffers Per Transaction",
1387 .type = V4L2_CTRL_TYPE_INTEGER,
1388 .def = VPE_DEF_BUFS_PER_JOB,
1389 .min = 1,
1390 .max = VIDEO_MAX_FRAME,
1391 .step = 1,
1392};
1393
1394/*
1395 * File operations
1396 */
1397static int vpe_open(struct file *file)
1398{
1399 struct vpe_dev *dev = video_drvdata(file);
1400 struct vpe_ctx *ctx = NULL;
1401 struct vpe_q_data *s_q_data;
1402 struct v4l2_ctrl_handler *hdl;
1403 int ret;
1404
1405 vpe_dbg(dev, "vpe_open\n");
1406
1407 if (!dev->vpdma->ready) {
1408 vpe_err(dev, "vpdma firmware not loaded\n");
1409 return -ENODEV;
1410 }
1411
1412 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1413 if (!ctx)
1414 return -ENOMEM;
1415
1416 ctx->dev = dev;
1417
1418 if (mutex_lock_interruptible(&dev->dev_mutex)) {
1419 ret = -ERESTARTSYS;
1420 goto free_ctx;
1421 }
1422
1423 ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE,
1424 VPDMA_LIST_TYPE_NORMAL);
1425 if (ret != 0)
1426 goto unlock;
1427
1428 ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb));
1429 if (ret != 0)
1430 goto free_desc_list;
1431
1432 init_adb_hdrs(ctx);
1433
1434 v4l2_fh_init(&ctx->fh, video_devdata(file));
1435 file->private_data = &ctx->fh;
1436
1437 hdl = &ctx->hdl;
1438 v4l2_ctrl_handler_init(hdl, 1);
1439 v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL);
1440 if (hdl->error) {
1441 ret = hdl->error;
1442 goto exit_fh;
1443 }
1444 ctx->fh.ctrl_handler = hdl;
1445 v4l2_ctrl_handler_setup(hdl);
1446
1447 s_q_data = &ctx->q_data[Q_DATA_SRC];
1448 s_q_data->fmt = &vpe_formats[2];
1449 s_q_data->width = 1920;
1450 s_q_data->height = 1080;
1451 s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height *
1452 s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
1453 s_q_data->colorspace = V4L2_COLORSPACE_SMPTE240M;
1454 s_q_data->c_rect.left = 0;
1455 s_q_data->c_rect.top = 0;
1456 s_q_data->c_rect.width = s_q_data->width;
1457 s_q_data->c_rect.height = s_q_data->height;
1458 s_q_data->flags = 0;
1459
1460 ctx->q_data[Q_DATA_DST] = *s_q_data;
1461
1462 set_src_registers(ctx);
1463 set_dst_registers(ctx);
1464 ret = set_srcdst_params(ctx);
1465 if (ret)
1466 goto exit_fh;
1467
1468 ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
1469
1470 if (IS_ERR(ctx->m2m_ctx)) {
1471 ret = PTR_ERR(ctx->m2m_ctx);
1472 goto exit_fh;
1473 }
1474
1475 v4l2_fh_add(&ctx->fh);
1476
1477 /*
1478 * for now, just report the creation of the first instance, we can later
1479 * optimize the driver to enable or disable clocks when the first
1480 * instance is created or the last instance released
1481 */
1482 if (atomic_inc_return(&dev->num_instances) == 1)
1483 vpe_dbg(dev, "first instance created\n");
1484
1485 ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB;
1486
1487 ctx->load_mmrs = true;
1488
1489 vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
1490 ctx, ctx->m2m_ctx);
1491
1492 mutex_unlock(&dev->dev_mutex);
1493
1494 return 0;
1495exit_fh:
1496 v4l2_ctrl_handler_free(hdl);
1497 v4l2_fh_exit(&ctx->fh);
1498 vpdma_free_desc_buf(&ctx->mmr_adb);
1499free_desc_list:
1500 vpdma_free_desc_list(&ctx->desc_list);
1501unlock:
1502 mutex_unlock(&dev->dev_mutex);
1503free_ctx:
1504 kfree(ctx);
1505 return ret;
1506}
1507
1508static int vpe_release(struct file *file)
1509{
1510 struct vpe_dev *dev = video_drvdata(file);
1511 struct vpe_ctx *ctx = file2ctx(file);
1512
1513 vpe_dbg(dev, "releasing instance %p\n", ctx);
1514
1515 mutex_lock(&dev->dev_mutex);
1516 vpdma_free_desc_list(&ctx->desc_list);
1517 vpdma_free_desc_buf(&ctx->mmr_adb);
1518
1519 v4l2_fh_del(&ctx->fh);
1520 v4l2_fh_exit(&ctx->fh);
1521 v4l2_ctrl_handler_free(&ctx->hdl);
1522 v4l2_m2m_ctx_release(ctx->m2m_ctx);
1523
1524 kfree(ctx);
1525
1526 /*
1527 * for now, just report the release of the last instance, we can later
1528 * optimize the driver to enable or disable clocks when the first
1529 * instance is created or the last instance released
1530 */
1531 if (atomic_dec_return(&dev->num_instances) == 0)
1532 vpe_dbg(dev, "last instance released\n");
1533
1534 mutex_unlock(&dev->dev_mutex);
1535
1536 return 0;
1537}
1538
1539static unsigned int vpe_poll(struct file *file,
1540 struct poll_table_struct *wait)
1541{
1542 struct vpe_ctx *ctx = file2ctx(file);
1543 struct vpe_dev *dev = ctx->dev;
1544 int ret;
1545
1546 mutex_lock(&dev->dev_mutex);
1547 ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
1548 mutex_unlock(&dev->dev_mutex);
1549 return ret;
1550}
1551
1552static int vpe_mmap(struct file *file, struct vm_area_struct *vma)
1553{
1554 struct vpe_ctx *ctx = file2ctx(file);
1555 struct vpe_dev *dev = ctx->dev;
1556 int ret;
1557
1558 if (mutex_lock_interruptible(&dev->dev_mutex))
1559 return -ERESTARTSYS;
1560 ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
1561 mutex_unlock(&dev->dev_mutex);
1562 return ret;
1563}
1564
1565static const struct v4l2_file_operations vpe_fops = {
1566 .owner = THIS_MODULE,
1567 .open = vpe_open,
1568 .release = vpe_release,
1569 .poll = vpe_poll,
1570 .unlocked_ioctl = video_ioctl2,
1571 .mmap = vpe_mmap,
1572};
1573
1574static struct video_device vpe_videodev = {
1575 .name = VPE_MODULE_NAME,
1576 .fops = &vpe_fops,
1577 .ioctl_ops = &vpe_ioctl_ops,
1578 .minor = -1,
1579 .release = video_device_release,
1580 .vfl_dir = VFL_DIR_M2M,
1581};
1582
1583static struct v4l2_m2m_ops m2m_ops = {
1584 .device_run = device_run,
1585 .job_ready = job_ready,
1586 .job_abort = job_abort,
1587 .lock = vpe_lock,
1588 .unlock = vpe_unlock,
1589};
1590
1591static int vpe_runtime_get(struct platform_device *pdev)
1592{
1593 int r;
1594
1595 dev_dbg(&pdev->dev, "vpe_runtime_get\n");
1596
1597 r = pm_runtime_get_sync(&pdev->dev);
1598 WARN_ON(r < 0);
1599 return r < 0 ? r : 0;
1600}
1601
1602static void vpe_runtime_put(struct platform_device *pdev)
1603{
1604
1605 int r;
1606
1607 dev_dbg(&pdev->dev, "vpe_runtime_put\n");
1608
1609 r = pm_runtime_put_sync(&pdev->dev);
1610 WARN_ON(r < 0 && r != -ENOSYS);
1611}
1612
1613static int vpe_probe(struct platform_device *pdev)
1614{
1615 struct vpe_dev *dev;
1616 struct video_device *vfd;
1617 struct resource *res;
1618 int ret, irq, func;
1619
1620 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1621 if (IS_ERR(dev))
1622 return PTR_ERR(dev);
1623
1624 spin_lock_init(&dev->lock);
1625
1626 ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
1627 if (ret)
1628 return ret;
1629
1630 atomic_set(&dev->num_instances, 0);
1631 mutex_init(&dev->dev_mutex);
1632
1633 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe_top");
1634 /*
1635 * HACK: we get resource info from device tree in the form of a list of
1636 * VPE sub blocks, the driver currently uses only the base of vpe_top
1637 * for register access, the driver should be changed later to access
1638 * registers based on the sub block base addresses
1639 */
1640 dev->base = devm_ioremap(&pdev->dev, res->start, SZ_32K);
1641 if (IS_ERR(dev->base)) {
1642 ret = PTR_ERR(dev->base);
1643 goto v4l2_dev_unreg;
1644 }
1645
1646 irq = platform_get_irq(pdev, 0);
1647 ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME,
1648 dev);
1649 if (ret)
1650 goto v4l2_dev_unreg;
1651
1652 platform_set_drvdata(pdev, dev);
1653
1654 dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
1655 if (IS_ERR(dev->alloc_ctx)) {
1656 vpe_err(dev, "Failed to alloc vb2 context\n");
1657 ret = PTR_ERR(dev->alloc_ctx);
1658 goto v4l2_dev_unreg;
1659 }
1660
1661 dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
1662 if (IS_ERR(dev->m2m_dev)) {
1663 vpe_err(dev, "Failed to init mem2mem device\n");
1664 ret = PTR_ERR(dev->m2m_dev);
1665 goto rel_ctx;
1666 }
1667
1668 pm_runtime_enable(&pdev->dev);
1669
1670 ret = vpe_runtime_get(pdev);
1671 if (ret)
1672 goto rel_m2m;
1673
1674 /* Perform clk enable followed by reset */
1675 vpe_set_clock_enable(dev, 1);
1676
1677 vpe_top_reset(dev);
1678
1679 func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK,
1680 VPE_PID_FUNC_SHIFT);
1681 vpe_dbg(dev, "VPE PID function %x\n", func);
1682
1683 vpe_top_vpdma_reset(dev);
1684
1685 dev->vpdma = vpdma_create(pdev);
1686 if (IS_ERR(dev->vpdma))
1687 goto runtime_put;
1688
1689 vfd = &dev->vfd;
1690 *vfd = vpe_videodev;
1691 vfd->lock = &dev->dev_mutex;
1692 vfd->v4l2_dev = &dev->v4l2_dev;
1693
1694 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1695 if (ret) {
1696 vpe_err(dev, "Failed to register video device\n");
1697 goto runtime_put;
1698 }
1699
1700 video_set_drvdata(vfd, dev);
1701 snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name);
1702 dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
1703 vfd->num);
1704
1705 return 0;
1706
1707runtime_put:
1708 vpe_runtime_put(pdev);
1709rel_m2m:
1710 pm_runtime_disable(&pdev->dev);
1711 v4l2_m2m_release(dev->m2m_dev);
1712rel_ctx:
1713 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
1714v4l2_dev_unreg:
1715 v4l2_device_unregister(&dev->v4l2_dev);
1716
1717 return ret;
1718}
1719
1720static int vpe_remove(struct platform_device *pdev)
1721{
1722 struct vpe_dev *dev =
1723 (struct vpe_dev *) platform_get_drvdata(pdev);
1724
1725 v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
1726
1727 v4l2_m2m_release(dev->m2m_dev);
1728 video_unregister_device(&dev->vfd);
1729 v4l2_device_unregister(&dev->v4l2_dev);
1730 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
1731
1732 vpe_set_clock_enable(dev, 0);
1733 vpe_runtime_put(pdev);
1734 pm_runtime_disable(&pdev->dev);
1735
1736 return 0;
1737}
1738
1739#if defined(CONFIG_OF)
1740static const struct of_device_id vpe_of_match[] = {
1741 {
1742 .compatible = "ti,vpe",
1743 },
1744 {},
1745};
1746#else
1747#define vpe_of_match NULL
1748#endif
1749
1750static struct platform_driver vpe_pdrv = {
1751 .probe = vpe_probe,
1752 .remove = vpe_remove,
1753 .driver = {
1754 .name = VPE_MODULE_NAME,
1755 .owner = THIS_MODULE,
1756 .of_match_table = vpe_of_match,
1757 },
1758};
1759
1760static void __exit vpe_exit(void)
1761{
1762 platform_driver_unregister(&vpe_pdrv);
1763}
1764
1765static int __init vpe_init(void)
1766{
1767 return platform_driver_register(&vpe_pdrv);
1768}
1769
1770module_init(vpe_init);
1771module_exit(vpe_exit);
1772
1773MODULE_DESCRIPTION("TI VPE driver");
1774MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
1775MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti-vpe/vpe_regs.h
new file mode 100644
index 000000000000..ed214e828398
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/vpe_regs.h
@@ -0,0 +1,496 @@
1/*
2 * Copyright (c) 2013 Texas Instruments Inc.
3 *
4 * David Griego, <dagriego@biglakesoftware.com>
5 * Dale Farnsworth, <dale@farnsworth.org>
6 * Archit Taneja, <archit@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 */
12
13#ifndef __TI_VPE_REGS_H
14#define __TI_VPE_REGS_H
15
16/* VPE register offsets and field selectors */
17
18/* VPE top level regs */
19#define VPE_PID 0x0000
20#define VPE_PID_MINOR_MASK 0x3f
21#define VPE_PID_MINOR_SHIFT 0
22#define VPE_PID_CUSTOM_MASK 0x03
23#define VPE_PID_CUSTOM_SHIFT 6
24#define VPE_PID_MAJOR_MASK 0x07
25#define VPE_PID_MAJOR_SHIFT 8
26#define VPE_PID_RTL_MASK 0x1f
27#define VPE_PID_RTL_SHIFT 11
28#define VPE_PID_FUNC_MASK 0xfff
29#define VPE_PID_FUNC_SHIFT 16
30#define VPE_PID_SCHEME_MASK 0x03
31#define VPE_PID_SCHEME_SHIFT 30
32
33#define VPE_SYSCONFIG 0x0010
34#define VPE_SYSCONFIG_IDLE_MASK 0x03
35#define VPE_SYSCONFIG_IDLE_SHIFT 2
36#define VPE_SYSCONFIG_STANDBY_MASK 0x03
37#define VPE_SYSCONFIG_STANDBY_SHIFT 4
38#define VPE_FORCE_IDLE_MODE 0
39#define VPE_NO_IDLE_MODE 1
40#define VPE_SMART_IDLE_MODE 2
41#define VPE_SMART_IDLE_WAKEUP_MODE 3
42#define VPE_FORCE_STANDBY_MODE 0
43#define VPE_NO_STANDBY_MODE 1
44#define VPE_SMART_STANDBY_MODE 2
45#define VPE_SMART_STANDBY_WAKEUP_MODE 3
46
47#define VPE_INT0_STATUS0_RAW_SET 0x0020
48#define VPE_INT0_STATUS0_RAW VPE_INT0_STATUS0_RAW_SET
49#define VPE_INT0_STATUS0_CLR 0x0028
50#define VPE_INT0_STATUS0 VPE_INT0_STATUS0_CLR
51#define VPE_INT0_ENABLE0_SET 0x0030
52#define VPE_INT0_ENABLE0 VPE_INT0_ENABLE0_SET
53#define VPE_INT0_ENABLE0_CLR 0x0038
54#define VPE_INT0_LIST0_COMPLETE (1 << 0)
55#define VPE_INT0_LIST0_NOTIFY (1 << 1)
56#define VPE_INT0_LIST1_COMPLETE (1 << 2)
57#define VPE_INT0_LIST1_NOTIFY (1 << 3)
58#define VPE_INT0_LIST2_COMPLETE (1 << 4)
59#define VPE_INT0_LIST2_NOTIFY (1 << 5)
60#define VPE_INT0_LIST3_COMPLETE (1 << 6)
61#define VPE_INT0_LIST3_NOTIFY (1 << 7)
62#define VPE_INT0_LIST4_COMPLETE (1 << 8)
63#define VPE_INT0_LIST4_NOTIFY (1 << 9)
64#define VPE_INT0_LIST5_COMPLETE (1 << 10)
65#define VPE_INT0_LIST5_NOTIFY (1 << 11)
66#define VPE_INT0_LIST6_COMPLETE (1 << 12)
67#define VPE_INT0_LIST6_NOTIFY (1 << 13)
68#define VPE_INT0_LIST7_COMPLETE (1 << 14)
69#define VPE_INT0_LIST7_NOTIFY (1 << 15)
70#define VPE_INT0_DESCRIPTOR (1 << 16)
71#define VPE_DEI_FMD_INT (1 << 18)
72
73#define VPE_INT0_STATUS1_RAW_SET 0x0024
74#define VPE_INT0_STATUS1_RAW VPE_INT0_STATUS1_RAW_SET
75#define VPE_INT0_STATUS1_CLR 0x002c
76#define VPE_INT0_STATUS1 VPE_INT0_STATUS1_CLR
77#define VPE_INT0_ENABLE1_SET 0x0034
78#define VPE_INT0_ENABLE1 VPE_INT0_ENABLE1_SET
79#define VPE_INT0_ENABLE1_CLR 0x003c
80#define VPE_INT0_CHANNEL_GROUP0 (1 << 0)
81#define VPE_INT0_CHANNEL_GROUP1 (1 << 1)
82#define VPE_INT0_CHANNEL_GROUP2 (1 << 2)
83#define VPE_INT0_CHANNEL_GROUP3 (1 << 3)
84#define VPE_INT0_CHANNEL_GROUP4 (1 << 4)
85#define VPE_INT0_CHANNEL_GROUP5 (1 << 5)
86#define VPE_INT0_CLIENT (1 << 7)
87#define VPE_DEI_ERROR_INT (1 << 16)
88#define VPE_DS1_UV_ERROR_INT (1 << 22)
89
90#define VPE_INTC_EOI 0x00a0
91
92#define VPE_CLK_ENABLE 0x0100
93#define VPE_VPEDMA_CLK_ENABLE (1 << 0)
94#define VPE_DATA_PATH_CLK_ENABLE (1 << 1)
95
96#define VPE_CLK_RESET 0x0104
97#define VPE_VPDMA_CLK_RESET_MASK 0x1
98#define VPE_VPDMA_CLK_RESET_SHIFT 0
99#define VPE_DATA_PATH_CLK_RESET_MASK 0x1
100#define VPE_DATA_PATH_CLK_RESET_SHIFT 1
101#define VPE_MAIN_RESET_MASK 0x1
102#define VPE_MAIN_RESET_SHIFT 31
103
104#define VPE_CLK_FORMAT_SELECT 0x010c
105#define VPE_CSC_SRC_SELECT_MASK 0x03
106#define VPE_CSC_SRC_SELECT_SHIFT 0
107#define VPE_RGB_OUT_SELECT (1 << 8)
108#define VPE_DS_SRC_SELECT_MASK 0x07
109#define VPE_DS_SRC_SELECT_SHIFT 9
110#define VPE_DS_BYPASS (1 << 16)
111#define VPE_COLOR_SEPARATE_422 (1 << 18)
112
113#define VPE_DS_SRC_DEI_SCALER (5 << VPE_DS_SRC_SELECT_SHIFT)
114#define VPE_CSC_SRC_DEI_SCALER (3 << VPE_CSC_SRC_SELECT_SHIFT)
115
116#define VPE_CLK_RANGE_MAP 0x011c
117#define VPE_RANGE_RANGE_MAP_Y_MASK 0x07
118#define VPE_RANGE_RANGE_MAP_Y_SHIFT 0
119#define VPE_RANGE_RANGE_MAP_UV_MASK 0x07
120#define VPE_RANGE_RANGE_MAP_UV_SHIFT 3
121#define VPE_RANGE_MAP_ON (1 << 6)
122#define VPE_RANGE_REDUCTION_ON (1 << 28)
123
124/* VPE chrominance upsampler regs */
125#define VPE_US1_R0 0x0304
126#define VPE_US2_R0 0x0404
127#define VPE_US3_R0 0x0504
128#define VPE_US_C1_MASK 0x3fff
129#define VPE_US_C1_SHIFT 2
130#define VPE_US_C0_MASK 0x3fff
131#define VPE_US_C0_SHIFT 18
132#define VPE_US_MODE_MASK 0x03
133#define VPE_US_MODE_SHIFT 16
134#define VPE_ANCHOR_FID0_C1_MASK 0x3fff
135#define VPE_ANCHOR_FID0_C1_SHIFT 2
136#define VPE_ANCHOR_FID0_C0_MASK 0x3fff
137#define VPE_ANCHOR_FID0_C0_SHIFT 18
138
139#define VPE_US1_R1 0x0308
140#define VPE_US2_R1 0x0408
141#define VPE_US3_R1 0x0508
142#define VPE_ANCHOR_FID0_C3_MASK 0x3fff
143#define VPE_ANCHOR_FID0_C3_SHIFT 2
144#define VPE_ANCHOR_FID0_C2_MASK 0x3fff
145#define VPE_ANCHOR_FID0_C2_SHIFT 18
146
147#define VPE_US1_R2 0x030c
148#define VPE_US2_R2 0x040c
149#define VPE_US3_R2 0x050c
150#define VPE_INTERP_FID0_C1_MASK 0x3fff
151#define VPE_INTERP_FID0_C1_SHIFT 2
152#define VPE_INTERP_FID0_C0_MASK 0x3fff
153#define VPE_INTERP_FID0_C0_SHIFT 18
154
155#define VPE_US1_R3 0x0310
156#define VPE_US2_R3 0x0410
157#define VPE_US3_R3 0x0510
158#define VPE_INTERP_FID0_C3_MASK 0x3fff
159#define VPE_INTERP_FID0_C3_SHIFT 2
160#define VPE_INTERP_FID0_C2_MASK 0x3fff
161#define VPE_INTERP_FID0_C2_SHIFT 18
162
163#define VPE_US1_R4 0x0314
164#define VPE_US2_R4 0x0414
165#define VPE_US3_R4 0x0514
166#define VPE_ANCHOR_FID1_C1_MASK 0x3fff
167#define VPE_ANCHOR_FID1_C1_SHIFT 2
168#define VPE_ANCHOR_FID1_C0_MASK 0x3fff
169#define VPE_ANCHOR_FID1_C0_SHIFT 18
170
171#define VPE_US1_R5 0x0318
172#define VPE_US2_R5 0x0418
173#define VPE_US3_R5 0x0518
174#define VPE_ANCHOR_FID1_C3_MASK 0x3fff
175#define VPE_ANCHOR_FID1_C3_SHIFT 2
176#define VPE_ANCHOR_FID1_C2_MASK 0x3fff
177#define VPE_ANCHOR_FID1_C2_SHIFT 18
178
179#define VPE_US1_R6 0x031c
180#define VPE_US2_R6 0x041c
181#define VPE_US3_R6 0x051c
182#define VPE_INTERP_FID1_C1_MASK 0x3fff
183#define VPE_INTERP_FID1_C1_SHIFT 2
184#define VPE_INTERP_FID1_C0_MASK 0x3fff
185#define VPE_INTERP_FID1_C0_SHIFT 18
186
187#define VPE_US1_R7 0x0320
188#define VPE_US2_R7 0x0420
189#define VPE_US3_R7 0x0520
190#define VPE_INTERP_FID0_C3_MASK 0x3fff
191#define VPE_INTERP_FID0_C3_SHIFT 2
192#define VPE_INTERP_FID0_C2_MASK 0x3fff
193#define VPE_INTERP_FID0_C2_SHIFT 18
194
195/* VPE de-interlacer regs */
196#define VPE_DEI_FRAME_SIZE 0x0600
197#define VPE_DEI_WIDTH_MASK 0x07ff
198#define VPE_DEI_WIDTH_SHIFT 0
199#define VPE_DEI_HEIGHT_MASK 0x07ff
200#define VPE_DEI_HEIGHT_SHIFT 16
201#define VPE_DEI_INTERLACE_BYPASS (1 << 29)
202#define VPE_DEI_FIELD_FLUSH (1 << 30)
203#define VPE_DEI_PROGRESSIVE (1 << 31)
204
205#define VPE_MDT_BYPASS 0x0604
206#define VPE_MDT_TEMPMAX_BYPASS (1 << 0)
207#define VPE_MDT_SPATMAX_BYPASS (1 << 1)
208
209#define VPE_MDT_SF_THRESHOLD 0x0608
210#define VPE_MDT_SF_SC_THR1_MASK 0xff
211#define VPE_MDT_SF_SC_THR1_SHIFT 0
212#define VPE_MDT_SF_SC_THR2_MASK 0xff
213#define VPE_MDT_SF_SC_THR2_SHIFT 0
214#define VPE_MDT_SF_SC_THR3_MASK 0xff
215#define VPE_MDT_SF_SC_THR3_SHIFT 0
216
217#define VPE_EDI_CONFIG 0x060c
218#define VPE_EDI_INP_MODE_MASK 0x03
219#define VPE_EDI_INP_MODE_SHIFT 0
220#define VPE_EDI_ENABLE_3D (1 << 2)
221#define VPE_EDI_ENABLE_CHROMA_3D (1 << 3)
222#define VPE_EDI_CHROMA3D_COR_THR_MASK 0xff
223#define VPE_EDI_CHROMA3D_COR_THR_SHIFT 8
224#define VPE_EDI_DIR_COR_LOWER_THR_MASK 0xff
225#define VPE_EDI_DIR_COR_LOWER_THR_SHIFT 16
226#define VPE_EDI_COR_SCALE_FACTOR_MASK 0xff
227#define VPE_EDI_COR_SCALE_FACTOR_SHIFT 23
228
229#define VPE_DEI_EDI_LUT_R0 0x0610
230#define VPE_EDI_LUT0_MASK 0x1f
231#define VPE_EDI_LUT0_SHIFT 0
232#define VPE_EDI_LUT1_MASK 0x1f
233#define VPE_EDI_LUT1_SHIFT 8
234#define VPE_EDI_LUT2_MASK 0x1f
235#define VPE_EDI_LUT2_SHIFT 16
236#define VPE_EDI_LUT3_MASK 0x1f
237#define VPE_EDI_LUT3_SHIFT 24
238
239#define VPE_DEI_EDI_LUT_R1 0x0614
240#define VPE_EDI_LUT0_MASK 0x1f
241#define VPE_EDI_LUT0_SHIFT 0
242#define VPE_EDI_LUT1_MASK 0x1f
243#define VPE_EDI_LUT1_SHIFT 8
244#define VPE_EDI_LUT2_MASK 0x1f
245#define VPE_EDI_LUT2_SHIFT 16
246#define VPE_EDI_LUT3_MASK 0x1f
247#define VPE_EDI_LUT3_SHIFT 24
248
249#define VPE_DEI_EDI_LUT_R2 0x0618
250#define VPE_EDI_LUT4_MASK 0x1f
251#define VPE_EDI_LUT4_SHIFT 0
252#define VPE_EDI_LUT5_MASK 0x1f
253#define VPE_EDI_LUT5_SHIFT 8
254#define VPE_EDI_LUT6_MASK 0x1f
255#define VPE_EDI_LUT6_SHIFT 16
256#define VPE_EDI_LUT7_MASK 0x1f
257#define VPE_EDI_LUT7_SHIFT 24
258
259#define VPE_DEI_EDI_LUT_R3 0x061c
260#define VPE_EDI_LUT8_MASK 0x1f
261#define VPE_EDI_LUT8_SHIFT 0
262#define VPE_EDI_LUT9_MASK 0x1f
263#define VPE_EDI_LUT9_SHIFT 8
264#define VPE_EDI_LUT10_MASK 0x1f
265#define VPE_EDI_LUT10_SHIFT 16
266#define VPE_EDI_LUT11_MASK 0x1f
267#define VPE_EDI_LUT11_SHIFT 24
268
269#define VPE_DEI_FMD_WINDOW_R0 0x0620
270#define VPE_FMD_WINDOW_MINX_MASK 0x07ff
271#define VPE_FMD_WINDOW_MINX_SHIFT 0
272#define VPE_FMD_WINDOW_MAXX_MASK 0x07ff
273#define VPE_FMD_WINDOW_MAXX_SHIFT 16
274#define VPE_FMD_WINDOW_ENABLE (1 << 31)
275
276#define VPE_DEI_FMD_WINDOW_R1 0x0624
277#define VPE_FMD_WINDOW_MINY_MASK 0x07ff
278#define VPE_FMD_WINDOW_MINY_SHIFT 0
279#define VPE_FMD_WINDOW_MAXY_MASK 0x07ff
280#define VPE_FMD_WINDOW_MAXY_SHIFT 16
281
282#define VPE_DEI_FMD_CONTROL_R0 0x0628
283#define VPE_FMD_ENABLE (1 << 0)
284#define VPE_FMD_LOCK (1 << 1)
285#define VPE_FMD_JAM_DIR (1 << 2)
286#define VPE_FMD_BED_ENABLE (1 << 3)
287#define VPE_FMD_CAF_FIELD_THR_MASK 0xff
288#define VPE_FMD_CAF_FIELD_THR_SHIFT 16
289#define VPE_FMD_CAF_LINE_THR_MASK 0xff
290#define VPE_FMD_CAF_LINE_THR_SHIFT 24
291
292#define VPE_DEI_FMD_CONTROL_R1 0x062c
293#define VPE_FMD_CAF_THR_MASK 0x000fffff
294#define VPE_FMD_CAF_THR_SHIFT 0
295
296#define VPE_DEI_FMD_STATUS_R0 0x0630
297#define VPE_FMD_CAF_MASK 0x000fffff
298#define VPE_FMD_CAF_SHIFT 0
299#define VPE_FMD_RESET (1 << 24)
300
301#define VPE_DEI_FMD_STATUS_R1 0x0634
302#define VPE_FMD_FIELD_DIFF_MASK 0x0fffffff
303#define VPE_FMD_FIELD_DIFF_SHIFT 0
304
305#define VPE_DEI_FMD_STATUS_R2 0x0638
306#define VPE_FMD_FRAME_DIFF_MASK 0x000fffff
307#define VPE_FMD_FRAME_DIFF_SHIFT 0
308
309/* VPE scaler regs */
310#define VPE_SC_MP_SC0 0x0700
311#define VPE_INTERLACE_O (1 << 0)
312#define VPE_LINEAR (1 << 1)
313#define VPE_SC_BYPASS (1 << 2)
314#define VPE_INVT_FID (1 << 3)
315#define VPE_USE_RAV (1 << 4)
316#define VPE_ENABLE_EV (1 << 5)
317#define VPE_AUTO_HS (1 << 6)
318#define VPE_DCM_2X (1 << 7)
319#define VPE_DCM_4X (1 << 8)
320#define VPE_HP_BYPASS (1 << 9)
321#define VPE_INTERLACE_I (1 << 10)
322#define VPE_ENABLE_SIN2_VER_INTP (1 << 11)
323#define VPE_Y_PK_EN (1 << 14)
324#define VPE_TRIM (1 << 15)
325#define VPE_SELFGEN_FID (1 << 16)
326
327#define VPE_SC_MP_SC1 0x0704
328#define VPE_ROW_ACC_INC_MASK 0x07ffffff
329#define VPE_ROW_ACC_INC_SHIFT 0
330
331#define VPE_SC_MP_SC2 0x0708
332#define VPE_ROW_ACC_OFFSET_MASK 0x0fffffff
333#define VPE_ROW_ACC_OFFSET_SHIFT 0
334
335#define VPE_SC_MP_SC3 0x070c
336#define VPE_ROW_ACC_OFFSET_B_MASK 0x0fffffff
337#define VPE_ROW_ACC_OFFSET_B_SHIFT 0
338
339#define VPE_SC_MP_SC4 0x0710
340#define VPE_TAR_H_MASK 0x07ff
341#define VPE_TAR_H_SHIFT 0
342#define VPE_TAR_W_MASK 0x07ff
343#define VPE_TAR_W_SHIFT 12
344#define VPE_LIN_ACC_INC_U_MASK 0x07
345#define VPE_LIN_ACC_INC_U_SHIFT 24
346#define VPE_NLIN_ACC_INIT_U_MASK 0x07
347#define VPE_NLIN_ACC_INIT_U_SHIFT 28
348
349#define VPE_SC_MP_SC5 0x0714
350#define VPE_SRC_H_MASK 0x07ff
351#define VPE_SRC_H_SHIFT 0
352#define VPE_SRC_W_MASK 0x07ff
353#define VPE_SRC_W_SHIFT 12
354#define VPE_NLIN_ACC_INC_U_MASK 0x07
355#define VPE_NLIN_ACC_INC_U_SHIFT 24
356
357#define VPE_SC_MP_SC6 0x0718
358#define VPE_ROW_ACC_INIT_RAV_MASK 0x03ff
359#define VPE_ROW_ACC_INIT_RAV_SHIFT 0
360#define VPE_ROW_ACC_INIT_RAV_B_MASK 0x03ff
361#define VPE_ROW_ACC_INIT_RAV_B_SHIFT 10
362
363#define VPE_SC_MP_SC8 0x0720
364#define VPE_NLIN_LEFT_MASK 0x07ff
365#define VPE_NLIN_LEFT_SHIFT 0
366#define VPE_NLIN_RIGHT_MASK 0x07ff
367#define VPE_NLIN_RIGHT_SHIFT 12
368
369#define VPE_SC_MP_SC9 0x0724
370#define VPE_LIN_ACC_INC VPE_SC_MP_SC9
371
372#define VPE_SC_MP_SC10 0x0728
373#define VPE_NLIN_ACC_INIT VPE_SC_MP_SC10
374
375#define VPE_SC_MP_SC11 0x072c
376#define VPE_NLIN_ACC_INC VPE_SC_MP_SC11
377
378#define VPE_SC_MP_SC12 0x0730
379#define VPE_COL_ACC_OFFSET_MASK 0x01ffffff
380#define VPE_COL_ACC_OFFSET_SHIFT 0
381
382#define VPE_SC_MP_SC13 0x0734
383#define VPE_SC_FACTOR_RAV_MASK 0x03ff
384#define VPE_SC_FACTOR_RAV_SHIFT 0
385#define VPE_CHROMA_INTP_THR_MASK 0x03ff
386#define VPE_CHROMA_INTP_THR_SHIFT 12
387#define VPE_DELTA_CHROMA_THR_MASK 0x0f
388#define VPE_DELTA_CHROMA_THR_SHIFT 24
389
390#define VPE_SC_MP_SC17 0x0744
391#define VPE_EV_THR_MASK 0x03ff
392#define VPE_EV_THR_SHIFT 12
393#define VPE_DELTA_LUMA_THR_MASK 0x0f
394#define VPE_DELTA_LUMA_THR_SHIFT 24
395#define VPE_DELTA_EV_THR_MASK 0x0f
396#define VPE_DELTA_EV_THR_SHIFT 28
397
398#define VPE_SC_MP_SC18 0x0748
399#define VPE_HS_FACTOR_MASK 0x03ff
400#define VPE_HS_FACTOR_SHIFT 0
401#define VPE_CONF_DEFAULT_MASK 0x01ff
402#define VPE_CONF_DEFAULT_SHIFT 16
403
404#define VPE_SC_MP_SC19 0x074c
405#define VPE_HPF_COEFF0_MASK 0xff
406#define VPE_HPF_COEFF0_SHIFT 0
407#define VPE_HPF_COEFF1_MASK 0xff
408#define VPE_HPF_COEFF1_SHIFT 8
409#define VPE_HPF_COEFF2_MASK 0xff
410#define VPE_HPF_COEFF2_SHIFT 16
411#define VPE_HPF_COEFF3_MASK 0xff
412#define VPE_HPF_COEFF3_SHIFT 23
413
414#define VPE_SC_MP_SC20 0x0750
415#define VPE_HPF_COEFF4_MASK 0xff
416#define VPE_HPF_COEFF4_SHIFT 0
417#define VPE_HPF_COEFF5_MASK 0xff
418#define VPE_HPF_COEFF5_SHIFT 8
419#define VPE_HPF_NORM_SHIFT_MASK 0x07
420#define VPE_HPF_NORM_SHIFT_SHIFT 16
421#define VPE_NL_LIMIT_MASK 0x1ff
422#define VPE_NL_LIMIT_SHIFT 20
423
424#define VPE_SC_MP_SC21 0x0754
425#define VPE_NL_LO_THR_MASK 0x01ff
426#define VPE_NL_LO_THR_SHIFT 0
427#define VPE_NL_LO_SLOPE_MASK 0xff
428#define VPE_NL_LO_SLOPE_SHIFT 16
429
430#define VPE_SC_MP_SC22 0x0758
431#define VPE_NL_HI_THR_MASK 0x01ff
432#define VPE_NL_HI_THR_SHIFT 0
433#define VPE_NL_HI_SLOPE_SH_MASK 0x07
434#define VPE_NL_HI_SLOPE_SH_SHIFT 16
435
436#define VPE_SC_MP_SC23 0x075c
437#define VPE_GRADIENT_THR_MASK 0x07ff
438#define VPE_GRADIENT_THR_SHIFT 0
439#define VPE_GRADIENT_THR_RANGE_MASK 0x0f
440#define VPE_GRADIENT_THR_RANGE_SHIFT 12
441#define VPE_MIN_GY_THR_MASK 0xff
442#define VPE_MIN_GY_THR_SHIFT 16
443#define VPE_MIN_GY_THR_RANGE_MASK 0x0f
444#define VPE_MIN_GY_THR_RANGE_SHIFT 28
445
446#define VPE_SC_MP_SC24 0x0760
447#define VPE_ORG_H_MASK 0x07ff
448#define VPE_ORG_H_SHIFT 0
449#define VPE_ORG_W_MASK 0x07ff
450#define VPE_ORG_W_SHIFT 16
451
452#define VPE_SC_MP_SC25 0x0764
453#define VPE_OFF_H_MASK 0x07ff
454#define VPE_OFF_H_SHIFT 0
455#define VPE_OFF_W_MASK 0x07ff
456#define VPE_OFF_W_SHIFT 16
457
458/* VPE color space converter regs */
459#define VPE_CSC_CSC00 0x5700
460#define VPE_CSC_A0_MASK 0x1fff
461#define VPE_CSC_A0_SHIFT 0
462#define VPE_CSC_B0_MASK 0x1fff
463#define VPE_CSC_B0_SHIFT 16
464
465#define VPE_CSC_CSC01 0x5704
466#define VPE_CSC_C0_MASK 0x1fff
467#define VPE_CSC_C0_SHIFT 0
468#define VPE_CSC_A1_MASK 0x1fff
469#define VPE_CSC_A1_SHIFT 16
470
471#define VPE_CSC_CSC02 0x5708
472#define VPE_CSC_B1_MASK 0x1fff
473#define VPE_CSC_B1_SHIFT 0
474#define VPE_CSC_C1_MASK 0x1fff
475#define VPE_CSC_C1_SHIFT 16
476
477#define VPE_CSC_CSC03 0x570c
478#define VPE_CSC_A2_MASK 0x1fff
479#define VPE_CSC_A2_SHIFT 0
480#define VPE_CSC_B2_MASK 0x1fff
481#define VPE_CSC_B2_SHIFT 16
482
483#define VPE_CSC_CSC04 0x5710
484#define VPE_CSC_C2_MASK 0x1fff
485#define VPE_CSC_C2_SHIFT 0
486#define VPE_CSC_D0_MASK 0x0fff
487#define VPE_CSC_D0_SHIFT 16
488
489#define VPE_CSC_CSC05 0x5714
490#define VPE_CSC_D1_MASK 0x0fff
491#define VPE_CSC_D1_SHIFT 0
492#define VPE_CSC_D2_MASK 0x0fff
493#define VPE_CSC_D2_SHIFT 16
494#define VPE_CSC_BYPASS (1 << 28)
495
496#endif
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 083bb5a5aae2..1666aabbbb86 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -160,6 +160,10 @@ enum v4l2_colorfx {
160 * of controls. Total of 16 controls is reserved for this driver */ 160 * of controls. Total of 16 controls is reserved for this driver */
161#define V4L2_CID_USER_SI476X_BASE (V4L2_CID_USER_BASE + 0x1040) 161#define V4L2_CID_USER_SI476X_BASE (V4L2_CID_USER_BASE + 0x1040)
162 162
163/* The base for the TI VPE driver controls. Total of 16 controls is reserved for
164 * this driver */
165#define V4L2_CID_USER_TI_VPE_BASE (V4L2_CID_USER_BASE + 0x1050)
166
163/* MPEG-class control IDs */ 167/* MPEG-class control IDs */
164/* The MPEG controls are applicable to all codec controls 168/* The MPEG controls are applicable to all codec controls
165 * and the 'MPEG' part of the define is historical */ 169 * and the 'MPEG' part of the define is historical */