aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve Longerbeam <slongerbeam@gmail.com>2016-09-17 15:33:58 -0400
committerPhilipp Zabel <p.zabel@pengutronix.de>2016-09-19 02:30:27 -0400
commitcd98e85a6b786da83e0b120b53a182d100c19c9b (patch)
tree7da1962a7b18ce2b444553cd721de0724bbb51ae
parent8b9c3d5099b265892ab3578bc757d9b81e5655a6 (diff)
gpu: ipu-v3: Add queued image conversion support
This patch implements image conversion support using the IC tasks, with tiling to support scaling to and from images up to 4096x4096. Image rotation is also supported. Image conversion requests are added to a run queue under the IC tasks. The internal API is subsystem agnostic (no V4L2 dependency except for the use of V4L2 fourcc pixel formats). Callers prepare for image conversion by calling ipu_image_convert_prepare(), which initializes the parameters of the conversion. The caller passes in the ipu and IC task to use for the conversion, the input and output image formats, a rotation mode, and a completion callback and completion context pointer: struct ipu_image_converter_ctx * ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task, struct ipu_image *in, struct ipu_image *out, enum ipu_rotate_mode rot_mode, ipu_image_converter_cb_t complete, void *complete_context); A new conversion context is created that is added to an IC task context queue. The caller is given the new conversion context, which can then be passed to the further APIs: int ipu_image_convert_queue(struct ipu_image_converter_run *run); This queues the given image conversion request run to a run queue, and starts the conversion immediately if the run queue is empty. Only the physaddr's of the input and output image buffers are needed, since the conversion context was created previously with ipu_image_convert_prepare(). When the conversion completes, the run pointer is returned to the completion callback. void ipu_image_convert_abort(struct ipu_image_converter_ctx *ctx); This will abort any active or pending conversions for this context. Any currently active or pending runs belonging to this context are returned via the completion callback with an error status. void ipu_image_convert_unprepare(struct ipu_image_converter_ctx *ctx); Unprepares the conversion context. Any active or pending runs will be aborted by calling ipu_image_convert_abort(). Signed-off-by: Steve Longerbeam <steve_longerbeam@mentor.com> Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
-rw-r--r--drivers/gpu/ipu-v3/Makefile3
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c9
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c1709
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h5
-rw-r--r--include/video/imx-ipu-image-convert.h207
5 files changed, 1932 insertions, 1 deletions
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index aeba9dccb0af..5f961416c4ee 100644
--- a/drivers/gpu/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o 1obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
2 2
3imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \ 3imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
4 ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-smfc.o ipu-vdi.o 4 ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \
5 ipu-smfc.o ipu-vdi.o
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index e5285d23eed7..b9539f7c5e9a 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -978,6 +978,12 @@ static int ipu_submodules_init(struct ipu_soc *ipu,
978 goto err_vdi; 978 goto err_vdi;
979 } 979 }
980 980
981 ret = ipu_image_convert_init(ipu, dev);
982 if (ret) {
983 unit = "image_convert";
984 goto err_image_convert;
985 }
986
981 ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs, 987 ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
982 IPU_CONF_DI0_EN, ipu_clk); 988 IPU_CONF_DI0_EN, ipu_clk);
983 if (ret) { 989 if (ret) {
@@ -1032,6 +1038,8 @@ err_dc:
1032err_di_1: 1038err_di_1:
1033 ipu_di_exit(ipu, 0); 1039 ipu_di_exit(ipu, 0);
1034err_di_0: 1040err_di_0:
1041 ipu_image_convert_exit(ipu);
1042err_image_convert:
1035 ipu_vdi_exit(ipu); 1043 ipu_vdi_exit(ipu);
1036err_vdi: 1044err_vdi:
1037 ipu_ic_exit(ipu); 1045 ipu_ic_exit(ipu);
@@ -1118,6 +1126,7 @@ static void ipu_submodules_exit(struct ipu_soc *ipu)
1118 ipu_dc_exit(ipu); 1126 ipu_dc_exit(ipu);
1119 ipu_di_exit(ipu, 1); 1127 ipu_di_exit(ipu, 1);
1120 ipu_di_exit(ipu, 0); 1128 ipu_di_exit(ipu, 0);
1129 ipu_image_convert_exit(ipu);
1121 ipu_vdi_exit(ipu); 1130 ipu_vdi_exit(ipu);
1122 ipu_ic_exit(ipu); 1131 ipu_ic_exit(ipu);
1123 ipu_csi_exit(ipu, 1); 1132 ipu_csi_exit(ipu, 1);
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
new file mode 100644
index 000000000000..2ba7d437a2af
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -0,0 +1,1709 @@
1/*
2 * Copyright (C) 2012-2016 Mentor Graphics Inc.
3 *
4 * Queued image conversion support, with tiling and rotation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17#include <linux/interrupt.h>
18#include <linux/dma-mapping.h>
19#include <video/imx-ipu-image-convert.h>
20#include "ipu-prv.h"
21
22/*
23 * The IC Resizer has a restriction that the output frame from the
24 * resizer must be 1024 or less in both width (pixels) and height
25 * (lines).
26 *
27 * The image converter attempts to split up a conversion when
28 * the desired output (converted) frame resolution exceeds the
29 * IC resizer limit of 1024 in either dimension.
30 *
31 * If either dimension of the output frame exceeds the limit, the
32 * dimension is split into 1, 2, or 4 equal stripes, for a maximum
33 * of 4*4 or 16 tiles. A conversion is then carried out for each
34 * tile (but taking care to pass the full frame stride length to
35 * the DMA channel's parameter memory!). IDMA double-buffering is used
36 * to convert each tile back-to-back when possible (see note below
37 * when double_buffering boolean is set).
38 *
39 * Note that the input frame must be split up into the same number
40 * of tiles as the output frame.
41 *
42 * FIXME: at this point there is no attempt to deal with visible seams
43 * at the tile boundaries when upscaling. The seams are caused by a reset
44 * of the bilinear upscale interpolation when starting a new tile. The
45 * seams are barely visible for small upscale factors, but become
46 * increasingly visible as the upscale factor gets larger, since more
47 * interpolated pixels get thrown out at the tile boundaries. A possilble
48 * fix might be to overlap tiles of different sizes, but this must be done
49 * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
50 * alignment restrictions of each tile.
51 */
52
53#define MAX_STRIPES_W 4
54#define MAX_STRIPES_H 4
55#define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
56
57#define MIN_W 16
58#define MIN_H 8
59#define MAX_W 4096
60#define MAX_H 4096
61
62enum ipu_image_convert_type {
63 IMAGE_CONVERT_IN = 0,
64 IMAGE_CONVERT_OUT,
65};
66
67struct ipu_image_convert_dma_buf {
68 void *virt;
69 dma_addr_t phys;
70 unsigned long len;
71};
72
73struct ipu_image_convert_dma_chan {
74 int in;
75 int out;
76 int rot_in;
77 int rot_out;
78 int vdi_in_p;
79 int vdi_in;
80 int vdi_in_n;
81};
82
83/* dimensions of one tile */
84struct ipu_image_tile {
85 u32 width;
86 u32 height;
87 /* size and strides are in bytes */
88 u32 size;
89 u32 stride;
90 u32 rot_stride;
91 /* start Y or packed offset of this tile */
92 u32 offset;
93 /* offset from start to tile in U plane, for planar formats */
94 u32 u_off;
95 /* offset from start to tile in V plane, for planar formats */
96 u32 v_off;
97};
98
99struct ipu_image_convert_image {
100 struct ipu_image base;
101 enum ipu_image_convert_type type;
102
103 const struct ipu_image_pixfmt *fmt;
104 unsigned int stride;
105
106 /* # of rows (horizontal stripes) if dest height is > 1024 */
107 unsigned int num_rows;
108 /* # of columns (vertical stripes) if dest width is > 1024 */
109 unsigned int num_cols;
110
111 struct ipu_image_tile tile[MAX_TILES];
112};
113
114struct ipu_image_pixfmt {
115 u32 fourcc; /* V4L2 fourcc */
116 int bpp; /* total bpp */
117 int uv_width_dec; /* decimation in width for U/V planes */
118 int uv_height_dec; /* decimation in height for U/V planes */
119 bool planar; /* planar format */
120 bool uv_swapped; /* U and V planes are swapped */
121 bool uv_packed; /* partial planar (U and V in same plane) */
122};
123
124struct ipu_image_convert_ctx;
125struct ipu_image_convert_chan;
126struct ipu_image_convert_priv;
127
128struct ipu_image_convert_ctx {
129 struct ipu_image_convert_chan *chan;
130
131 ipu_image_convert_cb_t complete;
132 void *complete_context;
133
134 /* Source/destination image data and rotation mode */
135 struct ipu_image_convert_image in;
136 struct ipu_image_convert_image out;
137 enum ipu_rotate_mode rot_mode;
138
139 /* intermediate buffer for rotation */
140 struct ipu_image_convert_dma_buf rot_intermediate[2];
141
142 /* current buffer number for double buffering */
143 int cur_buf_num;
144
145 bool aborting;
146 struct completion aborted;
147
148 /* can we use double-buffering for this conversion operation? */
149 bool double_buffering;
150 /* num_rows * num_cols */
151 unsigned int num_tiles;
152 /* next tile to process */
153 unsigned int next_tile;
154 /* where to place converted tile in dest image */
155 unsigned int out_tile_map[MAX_TILES];
156
157 struct list_head list;
158};
159
160struct ipu_image_convert_chan {
161 struct ipu_image_convert_priv *priv;
162
163 enum ipu_ic_task ic_task;
164 const struct ipu_image_convert_dma_chan *dma_ch;
165
166 struct ipu_ic *ic;
167 struct ipuv3_channel *in_chan;
168 struct ipuv3_channel *out_chan;
169 struct ipuv3_channel *rotation_in_chan;
170 struct ipuv3_channel *rotation_out_chan;
171
172 /* the IPU end-of-frame irqs */
173 int out_eof_irq;
174 int rot_out_eof_irq;
175
176 spinlock_t irqlock;
177
178 /* list of convert contexts */
179 struct list_head ctx_list;
180 /* queue of conversion runs */
181 struct list_head pending_q;
182 /* queue of completed runs */
183 struct list_head done_q;
184
185 /* the current conversion run */
186 struct ipu_image_convert_run *current_run;
187};
188
189struct ipu_image_convert_priv {
190 struct ipu_image_convert_chan chan[IC_NUM_TASKS];
191 struct ipu_soc *ipu;
192};
193
194static const struct ipu_image_convert_dma_chan
195image_convert_dma_chan[IC_NUM_TASKS] = {
196 [IC_TASK_VIEWFINDER] = {
197 .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
198 .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
199 .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
200 .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
201 .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
202 .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
203 .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
204 },
205 [IC_TASK_POST_PROCESSOR] = {
206 .in = IPUV3_CHANNEL_MEM_IC_PP,
207 .out = IPUV3_CHANNEL_IC_PP_MEM,
208 .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
209 .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
210 },
211};
212
213static const struct ipu_image_pixfmt image_convert_formats[] = {
214 {
215 .fourcc = V4L2_PIX_FMT_RGB565,
216 .bpp = 16,
217 }, {
218 .fourcc = V4L2_PIX_FMT_RGB24,
219 .bpp = 24,
220 }, {
221 .fourcc = V4L2_PIX_FMT_BGR24,
222 .bpp = 24,
223 }, {
224 .fourcc = V4L2_PIX_FMT_RGB32,
225 .bpp = 32,
226 }, {
227 .fourcc = V4L2_PIX_FMT_BGR32,
228 .bpp = 32,
229 }, {
230 .fourcc = V4L2_PIX_FMT_YUYV,
231 .bpp = 16,
232 .uv_width_dec = 2,
233 .uv_height_dec = 1,
234 }, {
235 .fourcc = V4L2_PIX_FMT_UYVY,
236 .bpp = 16,
237 .uv_width_dec = 2,
238 .uv_height_dec = 1,
239 }, {
240 .fourcc = V4L2_PIX_FMT_YUV420,
241 .bpp = 12,
242 .planar = true,
243 .uv_width_dec = 2,
244 .uv_height_dec = 2,
245 }, {
246 .fourcc = V4L2_PIX_FMT_YVU420,
247 .bpp = 12,
248 .planar = true,
249 .uv_width_dec = 2,
250 .uv_height_dec = 2,
251 .uv_swapped = true,
252 }, {
253 .fourcc = V4L2_PIX_FMT_NV12,
254 .bpp = 12,
255 .planar = true,
256 .uv_width_dec = 2,
257 .uv_height_dec = 2,
258 .uv_packed = true,
259 }, {
260 .fourcc = V4L2_PIX_FMT_YUV422P,
261 .bpp = 16,
262 .planar = true,
263 .uv_width_dec = 2,
264 .uv_height_dec = 1,
265 }, {
266 .fourcc = V4L2_PIX_FMT_NV16,
267 .bpp = 16,
268 .planar = true,
269 .uv_width_dec = 2,
270 .uv_height_dec = 1,
271 .uv_packed = true,
272 },
273};
274
275static const struct ipu_image_pixfmt *get_format(u32 fourcc)
276{
277 const struct ipu_image_pixfmt *ret = NULL;
278 unsigned int i;
279
280 for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
281 if (image_convert_formats[i].fourcc == fourcc) {
282 ret = &image_convert_formats[i];
283 break;
284 }
285 }
286
287 return ret;
288}
289
290static void dump_format(struct ipu_image_convert_ctx *ctx,
291 struct ipu_image_convert_image *ic_image)
292{
293 struct ipu_image_convert_chan *chan = ctx->chan;
294 struct ipu_image_convert_priv *priv = chan->priv;
295
296 dev_dbg(priv->ipu->dev,
297 "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
298 chan->ic_task, ctx,
299 ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
300 ic_image->base.pix.width, ic_image->base.pix.height,
301 ic_image->num_cols, ic_image->num_rows,
302 ic_image->tile[0].width, ic_image->tile[0].height,
303 ic_image->fmt->fourcc & 0xff,
304 (ic_image->fmt->fourcc >> 8) & 0xff,
305 (ic_image->fmt->fourcc >> 16) & 0xff,
306 (ic_image->fmt->fourcc >> 24) & 0xff);
307}
308
309int ipu_image_convert_enum_format(int index, u32 *fourcc)
310{
311 const struct ipu_image_pixfmt *fmt;
312
313 if (index >= (int)ARRAY_SIZE(image_convert_formats))
314 return -EINVAL;
315
316 /* Format found */
317 fmt = &image_convert_formats[index];
318 *fourcc = fmt->fourcc;
319 return 0;
320}
321EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
322
323static void free_dma_buf(struct ipu_image_convert_priv *priv,
324 struct ipu_image_convert_dma_buf *buf)
325{
326 if (buf->virt)
327 dma_free_coherent(priv->ipu->dev,
328 buf->len, buf->virt, buf->phys);
329 buf->virt = NULL;
330 buf->phys = 0;
331}
332
333static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
334 struct ipu_image_convert_dma_buf *buf,
335 int size)
336{
337 buf->len = PAGE_ALIGN(size);
338 buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
339 GFP_DMA | GFP_KERNEL);
340 if (!buf->virt) {
341 dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
342 return -ENOMEM;
343 }
344
345 return 0;
346}
347
348static inline int num_stripes(int dim)
349{
350 if (dim <= 1024)
351 return 1;
352 else if (dim <= 2048)
353 return 2;
354 else
355 return 4;
356}
357
358static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
359 struct ipu_image_convert_image *image)
360{
361 int i;
362
363 for (i = 0; i < ctx->num_tiles; i++) {
364 struct ipu_image_tile *tile = &image->tile[i];
365
366 tile->height = image->base.pix.height / image->num_rows;
367 tile->width = image->base.pix.width / image->num_cols;
368 tile->size = ((tile->height * image->fmt->bpp) >> 3) *
369 tile->width;
370
371 if (image->fmt->planar) {
372 tile->stride = tile->width;
373 tile->rot_stride = tile->height;
374 } else {
375 tile->stride =
376 (image->fmt->bpp * tile->width) >> 3;
377 tile->rot_stride =
378 (image->fmt->bpp * tile->height) >> 3;
379 }
380 }
381}
382
383/*
384 * Use the rotation transformation to find the tile coordinates
385 * (row, col) of a tile in the destination frame that corresponds
386 * to the given tile coordinates of a source frame. The destination
387 * coordinate is then converted to a tile index.
388 */
389static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
390 int src_row, int src_col)
391{
392 struct ipu_image_convert_chan *chan = ctx->chan;
393 struct ipu_image_convert_priv *priv = chan->priv;
394 struct ipu_image_convert_image *s_image = &ctx->in;
395 struct ipu_image_convert_image *d_image = &ctx->out;
396 int dst_row, dst_col;
397
398 /* with no rotation it's a 1:1 mapping */
399 if (ctx->rot_mode == IPU_ROTATE_NONE)
400 return src_row * s_image->num_cols + src_col;
401
402 /*
403 * before doing the transform, first we have to translate
404 * source row,col for an origin in the center of s_image
405 */
406 src_row = src_row * 2 - (s_image->num_rows - 1);
407 src_col = src_col * 2 - (s_image->num_cols - 1);
408
409 /* do the rotation transform */
410 if (ctx->rot_mode & IPU_ROT_BIT_90) {
411 dst_col = -src_row;
412 dst_row = src_col;
413 } else {
414 dst_col = src_col;
415 dst_row = src_row;
416 }
417
418 /* apply flip */
419 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
420 dst_col = -dst_col;
421 if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
422 dst_row = -dst_row;
423
424 dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
425 chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
426
427 /*
428 * finally translate dest row,col using an origin in upper
429 * left of d_image
430 */
431 dst_row += d_image->num_rows - 1;
432 dst_col += d_image->num_cols - 1;
433 dst_row /= 2;
434 dst_col /= 2;
435
436 return dst_row * d_image->num_cols + dst_col;
437}
438
439/*
440 * Fill the out_tile_map[] with transformed destination tile indeces.
441 */
442static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
443{
444 struct ipu_image_convert_image *s_image = &ctx->in;
445 unsigned int row, col, tile = 0;
446
447 for (row = 0; row < s_image->num_rows; row++) {
448 for (col = 0; col < s_image->num_cols; col++) {
449 ctx->out_tile_map[tile] =
450 transform_tile_index(ctx, row, col);
451 tile++;
452 }
453 }
454}
455
456static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
457 struct ipu_image_convert_image *image)
458{
459 struct ipu_image_convert_chan *chan = ctx->chan;
460 struct ipu_image_convert_priv *priv = chan->priv;
461 const struct ipu_image_pixfmt *fmt = image->fmt;
462 unsigned int row, col, tile = 0;
463 u32 H, w, h, y_stride, uv_stride;
464 u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
465 u32 y_row_off, y_col_off, y_off;
466 u32 y_size, uv_size;
467
468 /* setup some convenience vars */
469 H = image->base.pix.height;
470
471 y_stride = image->stride;
472 uv_stride = y_stride / fmt->uv_width_dec;
473 if (fmt->uv_packed)
474 uv_stride *= 2;
475
476 y_size = H * y_stride;
477 uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
478
479 for (row = 0; row < image->num_rows; row++) {
480 w = image->tile[tile].width;
481 h = image->tile[tile].height;
482 y_row_off = row * h * y_stride;
483 uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
484
485 for (col = 0; col < image->num_cols; col++) {
486 y_col_off = col * w;
487 uv_col_off = y_col_off / fmt->uv_width_dec;
488 if (fmt->uv_packed)
489 uv_col_off *= 2;
490
491 y_off = y_row_off + y_col_off;
492 uv_off = uv_row_off + uv_col_off;
493
494 u_off = y_size - y_off + uv_off;
495 v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
496 if (fmt->uv_swapped) {
497 tmp = u_off;
498 u_off = v_off;
499 v_off = tmp;
500 }
501
502 image->tile[tile].offset = y_off;
503 image->tile[tile].u_off = u_off;
504 image->tile[tile++].v_off = v_off;
505
506 dev_dbg(priv->ipu->dev,
507 "task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
508 chan->ic_task, ctx,
509 image->type == IMAGE_CONVERT_IN ?
510 "Input" : "Output", row, col,
511 y_off, u_off, v_off);
512 }
513 }
514}
515
516static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
517 struct ipu_image_convert_image *image)
518{
519 struct ipu_image_convert_chan *chan = ctx->chan;
520 struct ipu_image_convert_priv *priv = chan->priv;
521 const struct ipu_image_pixfmt *fmt = image->fmt;
522 unsigned int row, col, tile = 0;
523 u32 w, h, bpp, stride;
524 u32 row_off, col_off;
525
526 /* setup some convenience vars */
527 stride = image->stride;
528 bpp = fmt->bpp;
529
530 for (row = 0; row < image->num_rows; row++) {
531 w = image->tile[tile].width;
532 h = image->tile[tile].height;
533 row_off = row * h * stride;
534
535 for (col = 0; col < image->num_cols; col++) {
536 col_off = (col * w * bpp) >> 3;
537
538 image->tile[tile].offset = row_off + col_off;
539 image->tile[tile].u_off = 0;
540 image->tile[tile++].v_off = 0;
541
542 dev_dbg(priv->ipu->dev,
543 "task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
544 chan->ic_task, ctx,
545 image->type == IMAGE_CONVERT_IN ?
546 "Input" : "Output", row, col,
547 row_off + col_off);
548 }
549 }
550}
551
552static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
553 struct ipu_image_convert_image *image)
554{
555 if (image->fmt->planar)
556 calc_tile_offsets_planar(ctx, image);
557 else
558 calc_tile_offsets_packed(ctx, image);
559}
560
561/*
562 * return the number of runs in given queue (pending_q or done_q)
563 * for this context. hold irqlock when calling.
564 */
565static int get_run_count(struct ipu_image_convert_ctx *ctx,
566 struct list_head *q)
567{
568 struct ipu_image_convert_run *run;
569 int count = 0;
570
571 lockdep_assert_held(&ctx->chan->irqlock);
572
573 list_for_each_entry(run, q, list) {
574 if (run->ctx == ctx)
575 count++;
576 }
577
578 return count;
579}
580
581static void convert_stop(struct ipu_image_convert_run *run)
582{
583 struct ipu_image_convert_ctx *ctx = run->ctx;
584 struct ipu_image_convert_chan *chan = ctx->chan;
585 struct ipu_image_convert_priv *priv = chan->priv;
586
587 dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
588 __func__, chan->ic_task, ctx, run);
589
590 /* disable IC tasks and the channels */
591 ipu_ic_task_disable(chan->ic);
592 ipu_idmac_disable_channel(chan->in_chan);
593 ipu_idmac_disable_channel(chan->out_chan);
594
595 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
596 ipu_idmac_disable_channel(chan->rotation_in_chan);
597 ipu_idmac_disable_channel(chan->rotation_out_chan);
598 ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
599 }
600
601 ipu_ic_disable(chan->ic);
602}
603
604static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
605 struct ipuv3_channel *channel,
606 struct ipu_image_convert_image *image,
607 enum ipu_rotate_mode rot_mode,
608 bool rot_swap_width_height)
609{
610 struct ipu_image_convert_chan *chan = ctx->chan;
611 unsigned int burst_size;
612 u32 width, height, stride;
613 dma_addr_t addr0, addr1 = 0;
614 struct ipu_image tile_image;
615 unsigned int tile_idx[2];
616
617 if (image->type == IMAGE_CONVERT_OUT) {
618 tile_idx[0] = ctx->out_tile_map[0];
619 tile_idx[1] = ctx->out_tile_map[1];
620 } else {
621 tile_idx[0] = 0;
622 tile_idx[1] = 1;
623 }
624
625 if (rot_swap_width_height) {
626 width = image->tile[0].height;
627 height = image->tile[0].width;
628 stride = image->tile[0].rot_stride;
629 addr0 = ctx->rot_intermediate[0].phys;
630 if (ctx->double_buffering)
631 addr1 = ctx->rot_intermediate[1].phys;
632 } else {
633 width = image->tile[0].width;
634 height = image->tile[0].height;
635 stride = image->stride;
636 addr0 = image->base.phys0 +
637 image->tile[tile_idx[0]].offset;
638 if (ctx->double_buffering)
639 addr1 = image->base.phys0 +
640 image->tile[tile_idx[1]].offset;
641 }
642
643 ipu_cpmem_zero(channel);
644
645 memset(&tile_image, 0, sizeof(tile_image));
646 tile_image.pix.width = tile_image.rect.width = width;
647 tile_image.pix.height = tile_image.rect.height = height;
648 tile_image.pix.bytesperline = stride;
649 tile_image.pix.pixelformat = image->fmt->fourcc;
650 tile_image.phys0 = addr0;
651 tile_image.phys1 = addr1;
652 ipu_cpmem_set_image(channel, &tile_image);
653
654 if (image->fmt->planar && !rot_swap_width_height)
655 ipu_cpmem_set_uv_offset(channel,
656 image->tile[tile_idx[0]].u_off,
657 image->tile[tile_idx[0]].v_off);
658
659 if (rot_mode)
660 ipu_cpmem_set_rotation(channel, rot_mode);
661
662 if (channel == chan->rotation_in_chan ||
663 channel == chan->rotation_out_chan) {
664 burst_size = 8;
665 ipu_cpmem_set_block_mode(channel);
666 } else
667 burst_size = (width % 16) ? 8 : 16;
668
669 ipu_cpmem_set_burstsize(channel, burst_size);
670
671 ipu_ic_task_idma_init(chan->ic, channel, width, height,
672 burst_size, rot_mode);
673
674 ipu_cpmem_set_axi_id(channel, 1);
675
676 ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
677}
678
679static int convert_start(struct ipu_image_convert_run *run)
680{
681 struct ipu_image_convert_ctx *ctx = run->ctx;
682 struct ipu_image_convert_chan *chan = ctx->chan;
683 struct ipu_image_convert_priv *priv = chan->priv;
684 struct ipu_image_convert_image *s_image = &ctx->in;
685 struct ipu_image_convert_image *d_image = &ctx->out;
686 enum ipu_color_space src_cs, dest_cs;
687 unsigned int dest_width, dest_height;
688 int ret;
689
690 dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
691 __func__, chan->ic_task, ctx, run);
692
693 src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
694 dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
695
696 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
697 /* swap width/height for resizer */
698 dest_width = d_image->tile[0].height;
699 dest_height = d_image->tile[0].width;
700 } else {
701 dest_width = d_image->tile[0].width;
702 dest_height = d_image->tile[0].height;
703 }
704
705 /* setup the IC resizer and CSC */
706 ret = ipu_ic_task_init(chan->ic,
707 s_image->tile[0].width,
708 s_image->tile[0].height,
709 dest_width,
710 dest_height,
711 src_cs, dest_cs);
712 if (ret) {
713 dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
714 return ret;
715 }
716
717 /* init the source MEM-->IC PP IDMAC channel */
718 init_idmac_channel(ctx, chan->in_chan, s_image,
719 IPU_ROTATE_NONE, false);
720
721 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
722 /* init the IC PP-->MEM IDMAC channel */
723 init_idmac_channel(ctx, chan->out_chan, d_image,
724 IPU_ROTATE_NONE, true);
725
726 /* init the MEM-->IC PP ROT IDMAC channel */
727 init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
728 ctx->rot_mode, true);
729
730 /* init the destination IC PP ROT-->MEM IDMAC channel */
731 init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
732 IPU_ROTATE_NONE, false);
733
734 /* now link IC PP-->MEM to MEM-->IC PP ROT */
735 ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
736 } else {
737 /* init the destination IC PP-->MEM IDMAC channel */
738 init_idmac_channel(ctx, chan->out_chan, d_image,
739 ctx->rot_mode, false);
740 }
741
742 /* enable the IC */
743 ipu_ic_enable(chan->ic);
744
745 /* set buffers ready */
746 ipu_idmac_select_buffer(chan->in_chan, 0);
747 ipu_idmac_select_buffer(chan->out_chan, 0);
748 if (ipu_rot_mode_is_irt(ctx->rot_mode))
749 ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
750 if (ctx->double_buffering) {
751 ipu_idmac_select_buffer(chan->in_chan, 1);
752 ipu_idmac_select_buffer(chan->out_chan, 1);
753 if (ipu_rot_mode_is_irt(ctx->rot_mode))
754 ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
755 }
756
757 /* enable the channels! */
758 ipu_idmac_enable_channel(chan->in_chan);
759 ipu_idmac_enable_channel(chan->out_chan);
760 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
761 ipu_idmac_enable_channel(chan->rotation_in_chan);
762 ipu_idmac_enable_channel(chan->rotation_out_chan);
763 }
764
765 ipu_ic_task_enable(chan->ic);
766
767 ipu_cpmem_dump(chan->in_chan);
768 ipu_cpmem_dump(chan->out_chan);
769 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
770 ipu_cpmem_dump(chan->rotation_in_chan);
771 ipu_cpmem_dump(chan->rotation_out_chan);
772 }
773
774 ipu_dump(priv->ipu);
775
776 return 0;
777}
778
779/* hold irqlock when calling */
780static int do_run(struct ipu_image_convert_run *run)
781{
782 struct ipu_image_convert_ctx *ctx = run->ctx;
783 struct ipu_image_convert_chan *chan = ctx->chan;
784
785 lockdep_assert_held(&chan->irqlock);
786
787 ctx->in.base.phys0 = run->in_phys;
788 ctx->out.base.phys0 = run->out_phys;
789
790 ctx->cur_buf_num = 0;
791 ctx->next_tile = 1;
792
793 /* remove run from pending_q and set as current */
794 list_del(&run->list);
795 chan->current_run = run;
796
797 return convert_start(run);
798}
799
800/* hold irqlock when calling */
801static void run_next(struct ipu_image_convert_chan *chan)
802{
803 struct ipu_image_convert_priv *priv = chan->priv;
804 struct ipu_image_convert_run *run, *tmp;
805 int ret;
806
807 lockdep_assert_held(&chan->irqlock);
808
809 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
810 /* skip contexts that are aborting */
811 if (run->ctx->aborting) {
812 dev_dbg(priv->ipu->dev,
813 "%s: task %u: skipping aborting ctx %p run %p\n",
814 __func__, chan->ic_task, run->ctx, run);
815 continue;
816 }
817
818 ret = do_run(run);
819 if (!ret)
820 break;
821
822 /*
823 * something went wrong with start, add the run
824 * to done q and continue to the next run in the
825 * pending q.
826 */
827 run->status = ret;
828 list_add_tail(&run->list, &chan->done_q);
829 chan->current_run = NULL;
830 }
831}
832
833static void empty_done_q(struct ipu_image_convert_chan *chan)
834{
835 struct ipu_image_convert_priv *priv = chan->priv;
836 struct ipu_image_convert_run *run;
837 unsigned long flags;
838
839 spin_lock_irqsave(&chan->irqlock, flags);
840
841 while (!list_empty(&chan->done_q)) {
842 run = list_entry(chan->done_q.next,
843 struct ipu_image_convert_run,
844 list);
845
846 list_del(&run->list);
847
848 dev_dbg(priv->ipu->dev,
849 "%s: task %u: completing ctx %p run %p with %d\n",
850 __func__, chan->ic_task, run->ctx, run, run->status);
851
852 /* call the completion callback and free the run */
853 spin_unlock_irqrestore(&chan->irqlock, flags);
854 run->ctx->complete(run, run->ctx->complete_context);
855 spin_lock_irqsave(&chan->irqlock, flags);
856 }
857
858 spin_unlock_irqrestore(&chan->irqlock, flags);
859}
860
861/*
862 * the bottom half thread clears out the done_q, calling the
863 * completion handler for each.
864 */
865static irqreturn_t do_bh(int irq, void *dev_id)
866{
867 struct ipu_image_convert_chan *chan = dev_id;
868 struct ipu_image_convert_priv *priv = chan->priv;
869 struct ipu_image_convert_ctx *ctx;
870 unsigned long flags;
871
872 dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
873 chan->ic_task);
874
875 empty_done_q(chan);
876
877 spin_lock_irqsave(&chan->irqlock, flags);
878
879 /*
880 * the done_q is cleared out, signal any contexts
881 * that are aborting that abort can complete.
882 */
883 list_for_each_entry(ctx, &chan->ctx_list, list) {
884 if (ctx->aborting) {
885 dev_dbg(priv->ipu->dev,
886 "%s: task %u: signaling abort for ctx %p\n",
887 __func__, chan->ic_task, ctx);
888 complete(&ctx->aborted);
889 }
890 }
891
892 spin_unlock_irqrestore(&chan->irqlock, flags);
893
894 dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
895 chan->ic_task);
896
897 return IRQ_HANDLED;
898}
899
900/* hold irqlock when calling */
901static irqreturn_t do_irq(struct ipu_image_convert_run *run)
902{
903 struct ipu_image_convert_ctx *ctx = run->ctx;
904 struct ipu_image_convert_chan *chan = ctx->chan;
905 struct ipu_image_tile *src_tile, *dst_tile;
906 struct ipu_image_convert_image *s_image = &ctx->in;
907 struct ipu_image_convert_image *d_image = &ctx->out;
908 struct ipuv3_channel *outch;
909 unsigned int dst_idx;
910
911 lockdep_assert_held(&chan->irqlock);
912
913 outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
914 chan->rotation_out_chan : chan->out_chan;
915
916 /*
917 * It is difficult to stop the channel DMA before the channels
918 * enter the paused state. Without double-buffering the channels
919 * are always in a paused state when the EOF irq occurs, so it
920 * is safe to stop the channels now. For double-buffering we
921 * just ignore the abort until the operation completes, when it
922 * is safe to shut down.
923 */
924 if (ctx->aborting && !ctx->double_buffering) {
925 convert_stop(run);
926 run->status = -EIO;
927 goto done;
928 }
929
930 if (ctx->next_tile == ctx->num_tiles) {
931 /*
932 * the conversion is complete
933 */
934 convert_stop(run);
935 run->status = 0;
936 goto done;
937 }
938
939 /*
940 * not done, place the next tile buffers.
941 */
942 if (!ctx->double_buffering) {
943
944 src_tile = &s_image->tile[ctx->next_tile];
945 dst_idx = ctx->out_tile_map[ctx->next_tile];
946 dst_tile = &d_image->tile[dst_idx];
947
948 ipu_cpmem_set_buffer(chan->in_chan, 0,
949 s_image->base.phys0 + src_tile->offset);
950 ipu_cpmem_set_buffer(outch, 0,
951 d_image->base.phys0 + dst_tile->offset);
952 if (s_image->fmt->planar)
953 ipu_cpmem_set_uv_offset(chan->in_chan,
954 src_tile->u_off,
955 src_tile->v_off);
956 if (d_image->fmt->planar)
957 ipu_cpmem_set_uv_offset(outch,
958 dst_tile->u_off,
959 dst_tile->v_off);
960
961 ipu_idmac_select_buffer(chan->in_chan, 0);
962 ipu_idmac_select_buffer(outch, 0);
963
964 } else if (ctx->next_tile < ctx->num_tiles - 1) {
965
966 src_tile = &s_image->tile[ctx->next_tile + 1];
967 dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
968 dst_tile = &d_image->tile[dst_idx];
969
970 ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
971 s_image->base.phys0 + src_tile->offset);
972 ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
973 d_image->base.phys0 + dst_tile->offset);
974
975 ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
976 ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
977
978 ctx->cur_buf_num ^= 1;
979 }
980
981 ctx->next_tile++;
982 return IRQ_HANDLED;
983done:
984 list_add_tail(&run->list, &chan->done_q);
985 chan->current_run = NULL;
986 run_next(chan);
987 return IRQ_WAKE_THREAD;
988}
989
990static irqreturn_t norotate_irq(int irq, void *data)
991{
992 struct ipu_image_convert_chan *chan = data;
993 struct ipu_image_convert_ctx *ctx;
994 struct ipu_image_convert_run *run;
995 unsigned long flags;
996 irqreturn_t ret;
997
998 spin_lock_irqsave(&chan->irqlock, flags);
999
1000 /* get current run and its context */
1001 run = chan->current_run;
1002 if (!run) {
1003 ret = IRQ_NONE;
1004 goto out;
1005 }
1006
1007 ctx = run->ctx;
1008
1009 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1010 /* this is a rotation operation, just ignore */
1011 spin_unlock_irqrestore(&chan->irqlock, flags);
1012 return IRQ_HANDLED;
1013 }
1014
1015 ret = do_irq(run);
1016out:
1017 spin_unlock_irqrestore(&chan->irqlock, flags);
1018 return ret;
1019}
1020
1021static irqreturn_t rotate_irq(int irq, void *data)
1022{
1023 struct ipu_image_convert_chan *chan = data;
1024 struct ipu_image_convert_priv *priv = chan->priv;
1025 struct ipu_image_convert_ctx *ctx;
1026 struct ipu_image_convert_run *run;
1027 unsigned long flags;
1028 irqreturn_t ret;
1029
1030 spin_lock_irqsave(&chan->irqlock, flags);
1031
1032 /* get current run and its context */
1033 run = chan->current_run;
1034 if (!run) {
1035 ret = IRQ_NONE;
1036 goto out;
1037 }
1038
1039 ctx = run->ctx;
1040
1041 if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
1042 /* this was NOT a rotation operation, shouldn't happen */
1043 dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
1044 spin_unlock_irqrestore(&chan->irqlock, flags);
1045 return IRQ_HANDLED;
1046 }
1047
1048 ret = do_irq(run);
1049out:
1050 spin_unlock_irqrestore(&chan->irqlock, flags);
1051 return ret;
1052}
1053
1054/*
1055 * try to force the completion of runs for this ctx. Called when
1056 * abort wait times out in ipu_image_convert_abort().
1057 */
1058static void force_abort(struct ipu_image_convert_ctx *ctx)
1059{
1060 struct ipu_image_convert_chan *chan = ctx->chan;
1061 struct ipu_image_convert_run *run;
1062 unsigned long flags;
1063
1064 spin_lock_irqsave(&chan->irqlock, flags);
1065
1066 run = chan->current_run;
1067 if (run && run->ctx == ctx) {
1068 convert_stop(run);
1069 run->status = -EIO;
1070 list_add_tail(&run->list, &chan->done_q);
1071 chan->current_run = NULL;
1072 run_next(chan);
1073 }
1074
1075 spin_unlock_irqrestore(&chan->irqlock, flags);
1076
1077 empty_done_q(chan);
1078}
1079
1080static void release_ipu_resources(struct ipu_image_convert_chan *chan)
1081{
1082 if (chan->out_eof_irq >= 0)
1083 free_irq(chan->out_eof_irq, chan);
1084 if (chan->rot_out_eof_irq >= 0)
1085 free_irq(chan->rot_out_eof_irq, chan);
1086
1087 if (!IS_ERR_OR_NULL(chan->in_chan))
1088 ipu_idmac_put(chan->in_chan);
1089 if (!IS_ERR_OR_NULL(chan->out_chan))
1090 ipu_idmac_put(chan->out_chan);
1091 if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
1092 ipu_idmac_put(chan->rotation_in_chan);
1093 if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
1094 ipu_idmac_put(chan->rotation_out_chan);
1095 if (!IS_ERR_OR_NULL(chan->ic))
1096 ipu_ic_put(chan->ic);
1097
1098 chan->in_chan = chan->out_chan = chan->rotation_in_chan =
1099 chan->rotation_out_chan = NULL;
1100 chan->out_eof_irq = chan->rot_out_eof_irq = -1;
1101}
1102
1103static int get_ipu_resources(struct ipu_image_convert_chan *chan)
1104{
1105 const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
1106 struct ipu_image_convert_priv *priv = chan->priv;
1107 int ret;
1108
1109 /* get IC */
1110 chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
1111 if (IS_ERR(chan->ic)) {
1112 dev_err(priv->ipu->dev, "could not acquire IC\n");
1113 ret = PTR_ERR(chan->ic);
1114 goto err;
1115 }
1116
1117 /* get IDMAC channels */
1118 chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
1119 chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
1120 if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
1121 dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
1122 ret = -EBUSY;
1123 goto err;
1124 }
1125
1126 chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
1127 chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
1128 if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
1129 dev_err(priv->ipu->dev,
1130 "could not acquire idmac rotation channels\n");
1131 ret = -EBUSY;
1132 goto err;
1133 }
1134
1135 /* acquire the EOF interrupts */
1136 chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1137 chan->out_chan,
1138 IPU_IRQ_EOF);
1139
1140 ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
1141 0, "ipu-ic", chan);
1142 if (ret < 0) {
1143 dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1144 chan->out_eof_irq);
1145 chan->out_eof_irq = -1;
1146 goto err;
1147 }
1148
1149 chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1150 chan->rotation_out_chan,
1151 IPU_IRQ_EOF);
1152
1153 ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
1154 0, "ipu-ic", chan);
1155 if (ret < 0) {
1156 dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1157 chan->rot_out_eof_irq);
1158 chan->rot_out_eof_irq = -1;
1159 goto err;
1160 }
1161
1162 return 0;
1163err:
1164 release_ipu_resources(chan);
1165 return ret;
1166}
1167
1168static int fill_image(struct ipu_image_convert_ctx *ctx,
1169 struct ipu_image_convert_image *ic_image,
1170 struct ipu_image *image,
1171 enum ipu_image_convert_type type)
1172{
1173 struct ipu_image_convert_priv *priv = ctx->chan->priv;
1174
1175 ic_image->base = *image;
1176 ic_image->type = type;
1177
1178 ic_image->fmt = get_format(image->pix.pixelformat);
1179 if (!ic_image->fmt) {
1180 dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
1181 type == IMAGE_CONVERT_OUT ? "Output" : "Input");
1182 return -EINVAL;
1183 }
1184
1185 if (ic_image->fmt->planar)
1186 ic_image->stride = ic_image->base.pix.width;
1187 else
1188 ic_image->stride = ic_image->base.pix.bytesperline;
1189
1190 calc_tile_dimensions(ctx, ic_image);
1191 calc_tile_offsets(ctx, ic_image);
1192
1193 return 0;
1194}
1195
1196/* borrowed from drivers/media/v4l2-core/v4l2-common.c */
1197static unsigned int clamp_align(unsigned int x, unsigned int min,
1198 unsigned int max, unsigned int align)
1199{
1200 /* Bits that must be zero to be aligned */
1201 unsigned int mask = ~((1 << align) - 1);
1202
1203 /* Clamp to aligned min and max */
1204 x = clamp(x, (min + ~mask) & mask, max & mask);
1205
1206 /* Round to nearest aligned value */
1207 if (align)
1208 x = (x + (1 << (align - 1))) & mask;
1209
1210 return x;
1211}
1212
1213/*
1214 * We have to adjust the tile width such that the tile physaddrs and
1215 * U and V plane offsets are multiples of 8 bytes as required by
1216 * the IPU DMA Controller. For the planar formats, this corresponds
1217 * to a pixel alignment of 16 (but use a more formal equation since
1218 * the variables are available). For all the packed formats, 8 is
1219 * good enough.
1220 */
1221static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
1222{
1223 return fmt->planar ? 8 * fmt->uv_width_dec : 8;
1224}
1225
1226/*
1227 * For tile height alignment, we have to ensure that the output tile
1228 * heights are multiples of 8 lines if the IRT is required by the
1229 * given rotation mode (the IRT performs rotations on 8x8 blocks
1230 * at a time). If the IRT is not used, or for input image tiles,
1231 * 2 lines are good enough.
1232 */
1233static inline u32 tile_height_align(enum ipu_image_convert_type type,
1234 enum ipu_rotate_mode rot_mode)
1235{
1236 return (type == IMAGE_CONVERT_OUT &&
1237 ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
1238}
1239
1240/* Adjusts input/output images to IPU restrictions */
1241void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
1242 enum ipu_rotate_mode rot_mode)
1243{
1244 const struct ipu_image_pixfmt *infmt, *outfmt;
1245 unsigned int num_in_rows, num_in_cols;
1246 unsigned int num_out_rows, num_out_cols;
1247 u32 w_align, h_align;
1248
1249 infmt = get_format(in->pix.pixelformat);
1250 outfmt = get_format(out->pix.pixelformat);
1251
1252 /* set some default pixel formats if needed */
1253 if (!infmt) {
1254 in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1255 infmt = get_format(V4L2_PIX_FMT_RGB24);
1256 }
1257 if (!outfmt) {
1258 out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1259 outfmt = get_format(V4L2_PIX_FMT_RGB24);
1260 }
1261
1262 /* image converter does not handle fields */
1263 in->pix.field = out->pix.field = V4L2_FIELD_NONE;
1264
1265 /* resizer cannot downsize more than 4:1 */
1266 if (ipu_rot_mode_is_irt(rot_mode)) {
1267 out->pix.height = max_t(__u32, out->pix.height,
1268 in->pix.width / 4);
1269 out->pix.width = max_t(__u32, out->pix.width,
1270 in->pix.height / 4);
1271 } else {
1272 out->pix.width = max_t(__u32, out->pix.width,
1273 in->pix.width / 4);
1274 out->pix.height = max_t(__u32, out->pix.height,
1275 in->pix.height / 4);
1276 }
1277
1278 /* get tiling rows/cols from output format */
1279 num_out_rows = num_stripes(out->pix.height);
1280 num_out_cols = num_stripes(out->pix.width);
1281 if (ipu_rot_mode_is_irt(rot_mode)) {
1282 num_in_rows = num_out_cols;
1283 num_in_cols = num_out_rows;
1284 } else {
1285 num_in_rows = num_out_rows;
1286 num_in_cols = num_out_cols;
1287 }
1288
1289 /* align input width/height */
1290 w_align = ilog2(tile_width_align(infmt) * num_in_cols);
1291 h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
1292 num_in_rows);
1293 in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
1294 in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
1295
1296 /* align output width/height */
1297 w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
1298 h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
1299 num_out_rows);
1300 out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
1301 out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
1302
1303 /* set input/output strides and image sizes */
1304 in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
1305 in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
1306 out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
1307 out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
1308}
1309EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
1310
1311/*
1312 * this is used by ipu_image_convert_prepare() to verify set input and
1313 * output images are valid before starting the conversion. Clients can
1314 * also call it before calling ipu_image_convert_prepare().
1315 */
1316int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
1317 enum ipu_rotate_mode rot_mode)
1318{
1319 struct ipu_image testin, testout;
1320
1321 testin = *in;
1322 testout = *out;
1323
1324 ipu_image_convert_adjust(&testin, &testout, rot_mode);
1325
1326 if (testin.pix.width != in->pix.width ||
1327 testin.pix.height != in->pix.height ||
1328 testout.pix.width != out->pix.width ||
1329 testout.pix.height != out->pix.height)
1330 return -EINVAL;
1331
1332 return 0;
1333}
1334EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
1335
1336/*
1337 * Call ipu_image_convert_prepare() to prepare for the conversion of
1338 * given images and rotation mode. Returns a new conversion context.
1339 */
1340struct ipu_image_convert_ctx *
1341ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1342 struct ipu_image *in, struct ipu_image *out,
1343 enum ipu_rotate_mode rot_mode,
1344 ipu_image_convert_cb_t complete,
1345 void *complete_context)
1346{
1347 struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
1348 struct ipu_image_convert_image *s_image, *d_image;
1349 struct ipu_image_convert_chan *chan;
1350 struct ipu_image_convert_ctx *ctx;
1351 unsigned long flags;
1352 bool get_res;
1353 int ret;
1354
1355 if (!in || !out || !complete ||
1356 (ic_task != IC_TASK_VIEWFINDER &&
1357 ic_task != IC_TASK_POST_PROCESSOR))
1358 return ERR_PTR(-EINVAL);
1359
1360 /* verify the in/out images before continuing */
1361 ret = ipu_image_convert_verify(in, out, rot_mode);
1362 if (ret) {
1363 dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
1364 __func__);
1365 return ERR_PTR(ret);
1366 }
1367
1368 chan = &priv->chan[ic_task];
1369
1370 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1371 if (!ctx)
1372 return ERR_PTR(-ENOMEM);
1373
1374 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
1375 chan->ic_task, ctx);
1376
1377 ctx->chan = chan;
1378 init_completion(&ctx->aborted);
1379
1380 s_image = &ctx->in;
1381 d_image = &ctx->out;
1382
1383 /* set tiling and rotation */
1384 d_image->num_rows = num_stripes(out->pix.height);
1385 d_image->num_cols = num_stripes(out->pix.width);
1386 if (ipu_rot_mode_is_irt(rot_mode)) {
1387 s_image->num_rows = d_image->num_cols;
1388 s_image->num_cols = d_image->num_rows;
1389 } else {
1390 s_image->num_rows = d_image->num_rows;
1391 s_image->num_cols = d_image->num_cols;
1392 }
1393
1394 ctx->num_tiles = d_image->num_cols * d_image->num_rows;
1395 ctx->rot_mode = rot_mode;
1396
1397 ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
1398 if (ret)
1399 goto out_free;
1400 ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
1401 if (ret)
1402 goto out_free;
1403
1404 calc_out_tile_map(ctx);
1405
1406 dump_format(ctx, s_image);
1407 dump_format(ctx, d_image);
1408
1409 ctx->complete = complete;
1410 ctx->complete_context = complete_context;
1411
1412 /*
1413 * Can we use double-buffering for this operation? If there is
1414 * only one tile (the whole image can be converted in a single
1415 * operation) there's no point in using double-buffering. Also,
1416 * the IPU's IDMAC channels allow only a single U and V plane
1417 * offset shared between both buffers, but these offsets change
1418 * for every tile, and therefore would have to be updated for
1419 * each buffer which is not possible. So double-buffering is
1420 * impossible when either the source or destination images are
1421 * a planar format (YUV420, YUV422P, etc.).
1422 */
1423 ctx->double_buffering = (ctx->num_tiles > 1 &&
1424 !s_image->fmt->planar &&
1425 !d_image->fmt->planar);
1426
1427 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1428 ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
1429 d_image->tile[0].size);
1430 if (ret)
1431 goto out_free;
1432 if (ctx->double_buffering) {
1433 ret = alloc_dma_buf(priv,
1434 &ctx->rot_intermediate[1],
1435 d_image->tile[0].size);
1436 if (ret)
1437 goto out_free_dmabuf0;
1438 }
1439 }
1440
1441 spin_lock_irqsave(&chan->irqlock, flags);
1442
1443 get_res = list_empty(&chan->ctx_list);
1444
1445 list_add_tail(&ctx->list, &chan->ctx_list);
1446
1447 spin_unlock_irqrestore(&chan->irqlock, flags);
1448
1449 if (get_res) {
1450 ret = get_ipu_resources(chan);
1451 if (ret)
1452 goto out_free_dmabuf1;
1453 }
1454
1455 return ctx;
1456
1457out_free_dmabuf1:
1458 free_dma_buf(priv, &ctx->rot_intermediate[1]);
1459 spin_lock_irqsave(&chan->irqlock, flags);
1460 list_del(&ctx->list);
1461 spin_unlock_irqrestore(&chan->irqlock, flags);
1462out_free_dmabuf0:
1463 free_dma_buf(priv, &ctx->rot_intermediate[0]);
1464out_free:
1465 kfree(ctx);
1466 return ERR_PTR(ret);
1467}
1468EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
1469
1470/*
1471 * Carry out a single image conversion run. Only the physaddr's of the input
1472 * and output image buffers are needed. The conversion context must have
1473 * been created previously with ipu_image_convert_prepare().
1474 */
1475int ipu_image_convert_queue(struct ipu_image_convert_run *run)
1476{
1477 struct ipu_image_convert_chan *chan;
1478 struct ipu_image_convert_priv *priv;
1479 struct ipu_image_convert_ctx *ctx;
1480 unsigned long flags;
1481 int ret = 0;
1482
1483 if (!run || !run->ctx || !run->in_phys || !run->out_phys)
1484 return -EINVAL;
1485
1486 ctx = run->ctx;
1487 chan = ctx->chan;
1488 priv = chan->priv;
1489
1490 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
1491 chan->ic_task, ctx, run);
1492
1493 INIT_LIST_HEAD(&run->list);
1494
1495 spin_lock_irqsave(&chan->irqlock, flags);
1496
1497 if (ctx->aborting) {
1498 ret = -EIO;
1499 goto unlock;
1500 }
1501
1502 list_add_tail(&run->list, &chan->pending_q);
1503
1504 if (!chan->current_run) {
1505 ret = do_run(run);
1506 if (ret)
1507 chan->current_run = NULL;
1508 }
1509unlock:
1510 spin_unlock_irqrestore(&chan->irqlock, flags);
1511 return ret;
1512}
1513EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
1514
1515/* Abort any active or pending conversions for this context */
1516void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
1517{
1518 struct ipu_image_convert_chan *chan = ctx->chan;
1519 struct ipu_image_convert_priv *priv = chan->priv;
1520 struct ipu_image_convert_run *run, *active_run, *tmp;
1521 unsigned long flags;
1522 int run_count, ret;
1523 bool need_abort;
1524
1525 reinit_completion(&ctx->aborted);
1526
1527 spin_lock_irqsave(&chan->irqlock, flags);
1528
1529 /* move all remaining pending runs in this context to done_q */
1530 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
1531 if (run->ctx != ctx)
1532 continue;
1533 run->status = -EIO;
1534 list_move_tail(&run->list, &chan->done_q);
1535 }
1536
1537 run_count = get_run_count(ctx, &chan->done_q);
1538 active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
1539 chan->current_run : NULL;
1540
1541 need_abort = (run_count || active_run);
1542
1543 ctx->aborting = need_abort;
1544
1545 spin_unlock_irqrestore(&chan->irqlock, flags);
1546
1547 if (!need_abort) {
1548 dev_dbg(priv->ipu->dev,
1549 "%s: task %u: no abort needed for ctx %p\n",
1550 __func__, chan->ic_task, ctx);
1551 return;
1552 }
1553
1554 dev_dbg(priv->ipu->dev,
1555 "%s: task %u: wait for completion: %d runs, active run %p\n",
1556 __func__, chan->ic_task, run_count, active_run);
1557
1558 ret = wait_for_completion_timeout(&ctx->aborted,
1559 msecs_to_jiffies(10000));
1560 if (ret == 0) {
1561 dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
1562 force_abort(ctx);
1563 }
1564
1565 ctx->aborting = false;
1566}
1567EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
1568
1569/* Unprepare image conversion context */
1570void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
1571{
1572 struct ipu_image_convert_chan *chan = ctx->chan;
1573 struct ipu_image_convert_priv *priv = chan->priv;
1574 unsigned long flags;
1575 bool put_res;
1576
1577 /* make sure no runs are hanging around */
1578 ipu_image_convert_abort(ctx);
1579
1580 dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
1581 chan->ic_task, ctx);
1582
1583 spin_lock_irqsave(&chan->irqlock, flags);
1584
1585 list_del(&ctx->list);
1586
1587 put_res = list_empty(&chan->ctx_list);
1588
1589 spin_unlock_irqrestore(&chan->irqlock, flags);
1590
1591 if (put_res)
1592 release_ipu_resources(chan);
1593
1594 free_dma_buf(priv, &ctx->rot_intermediate[1]);
1595 free_dma_buf(priv, &ctx->rot_intermediate[0]);
1596
1597 kfree(ctx);
1598}
1599EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
1600
1601/*
1602 * "Canned" asynchronous single image conversion. Allocates and returns
1603 * a new conversion run. On successful return the caller must free the
1604 * run and call ipu_image_convert_unprepare() after conversion completes.
1605 */
1606struct ipu_image_convert_run *
1607ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1608 struct ipu_image *in, struct ipu_image *out,
1609 enum ipu_rotate_mode rot_mode,
1610 ipu_image_convert_cb_t complete,
1611 void *complete_context)
1612{
1613 struct ipu_image_convert_ctx *ctx;
1614 struct ipu_image_convert_run *run;
1615 int ret;
1616
1617 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
1618 complete, complete_context);
1619 if (IS_ERR(ctx))
1620 return ERR_PTR(PTR_ERR(ctx));
1621
1622 run = kzalloc(sizeof(*run), GFP_KERNEL);
1623 if (!run) {
1624 ipu_image_convert_unprepare(ctx);
1625 return ERR_PTR(-ENOMEM);
1626 }
1627
1628 run->ctx = ctx;
1629 run->in_phys = in->phys0;
1630 run->out_phys = out->phys0;
1631
1632 ret = ipu_image_convert_queue(run);
1633 if (ret) {
1634 ipu_image_convert_unprepare(ctx);
1635 kfree(run);
1636 return ERR_PTR(ret);
1637 }
1638
1639 return run;
1640}
1641EXPORT_SYMBOL_GPL(ipu_image_convert);
1642
1643/* "Canned" synchronous single image conversion */
1644static void image_convert_sync_complete(struct ipu_image_convert_run *run,
1645 void *data)
1646{
1647 struct completion *comp = data;
1648
1649 complete(comp);
1650}
1651
1652int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1653 struct ipu_image *in, struct ipu_image *out,
1654 enum ipu_rotate_mode rot_mode)
1655{
1656 struct ipu_image_convert_run *run;
1657 struct completion comp;
1658 int ret;
1659
1660 init_completion(&comp);
1661
1662 run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
1663 image_convert_sync_complete, &comp);
1664 if (IS_ERR(run))
1665 return PTR_ERR(run);
1666
1667 ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
1668 ret = (ret == 0) ? -ETIMEDOUT : 0;
1669
1670 ipu_image_convert_unprepare(run->ctx);
1671 kfree(run);
1672
1673 return ret;
1674}
1675EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
1676
1677int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
1678{
1679 struct ipu_image_convert_priv *priv;
1680 int i;
1681
1682 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1683 if (!priv)
1684 return -ENOMEM;
1685
1686 ipu->image_convert_priv = priv;
1687 priv->ipu = ipu;
1688
1689 for (i = 0; i < IC_NUM_TASKS; i++) {
1690 struct ipu_image_convert_chan *chan = &priv->chan[i];
1691
1692 chan->ic_task = i;
1693 chan->priv = priv;
1694 chan->dma_ch = &image_convert_dma_chan[i];
1695 chan->out_eof_irq = -1;
1696 chan->rot_out_eof_irq = -1;
1697
1698 spin_lock_init(&chan->irqlock);
1699 INIT_LIST_HEAD(&chan->ctx_list);
1700 INIT_LIST_HEAD(&chan->pending_q);
1701 INIT_LIST_HEAD(&chan->done_q);
1702 }
1703
1704 return 0;
1705}
1706
1707void ipu_image_convert_exit(struct ipu_soc *ipu)
1708{
1709}
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index dca2c3af1b8a..22e47b68b14a 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -166,6 +166,7 @@ struct ipu_dmfc_priv;
166struct ipu_di; 166struct ipu_di;
167struct ipu_ic_priv; 167struct ipu_ic_priv;
168struct ipu_vdi; 168struct ipu_vdi;
169struct ipu_image_convert_priv;
169struct ipu_smfc_priv; 170struct ipu_smfc_priv;
170 171
171struct ipu_devtype; 172struct ipu_devtype;
@@ -199,6 +200,7 @@ struct ipu_soc {
199 struct ipu_csi *csi_priv[2]; 200 struct ipu_csi *csi_priv[2];
200 struct ipu_ic_priv *ic_priv; 201 struct ipu_ic_priv *ic_priv;
201 struct ipu_vdi *vdi_priv; 202 struct ipu_vdi *vdi_priv;
203 struct ipu_image_convert_priv *image_convert_priv;
202 struct ipu_smfc_priv *smfc_priv; 204 struct ipu_smfc_priv *smfc_priv;
203}; 205};
204 206
@@ -233,6 +235,9 @@ int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
233 unsigned long base, u32 module); 235 unsigned long base, u32 module);
234void ipu_vdi_exit(struct ipu_soc *ipu); 236void ipu_vdi_exit(struct ipu_soc *ipu);
235 237
238int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev);
239void ipu_image_convert_exit(struct ipu_soc *ipu);
240
236int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id, 241int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
237 unsigned long base, u32 module, struct clk *ipu_clk); 242 unsigned long base, u32 module, struct clk *ipu_clk);
238void ipu_di_exit(struct ipu_soc *ipu, int id); 243void ipu_di_exit(struct ipu_soc *ipu, int id);
diff --git a/include/video/imx-ipu-image-convert.h b/include/video/imx-ipu-image-convert.h
new file mode 100644
index 000000000000..7b87efc6d77a
--- /dev/null
+++ b/include/video/imx-ipu-image-convert.h
@@ -0,0 +1,207 @@
1/*
2 * Copyright (C) 2012-2016 Mentor Graphics Inc.
3 *
4 * i.MX Queued image conversion support, with tiling and rotation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16#ifndef __IMX_IPU_IMAGE_CONVERT_H__
17#define __IMX_IPU_IMAGE_CONVERT_H__
18
19#include <video/imx-ipu-v3.h>
20
21struct ipu_image_convert_ctx;
22
23/**
24 * struct ipu_image_convert_run - image conversion run request struct
25 *
26 * @ctx: the conversion context
27 * @in_phys: dma addr of input image buffer for this run
28 * @out_phys: dma addr of output image buffer for this run
29 * @status: completion status of this run
30 */
31struct ipu_image_convert_run {
32 struct ipu_image_convert_ctx *ctx;
33
34 dma_addr_t in_phys;
35 dma_addr_t out_phys;
36
37 int status;
38
39 /* internal to image converter, callers don't touch */
40 struct list_head list;
41};
42
43/**
44 * ipu_image_convert_cb_t - conversion callback function prototype
45 *
46 * @run: the completed conversion run pointer
47 * @ctx: a private context pointer for the callback
48 */
49typedef void (*ipu_image_convert_cb_t)(struct ipu_image_convert_run *run,
50 void *ctx);
51
52/**
53 * ipu_image_convert_enum_format() - enumerate the image converter's
54 * supported input and output pixel formats.
55 *
56 * @index: pixel format index
57 * @fourcc: v4l2 fourcc for this index
58 *
59 * Returns 0 with a valid index and fills in v4l2 fourcc, -EINVAL otherwise.
60 *
61 * In V4L2, drivers can call ipu_image_enum_format() in .enum_fmt.
62 */
63int ipu_image_convert_enum_format(int index, u32 *fourcc);
64
65/**
66 * ipu_image_convert_adjust() - adjust input/output images to IPU restrictions.
67 *
68 * @in: input image format, adjusted on return
69 * @out: output image format, adjusted on return
70 * @rot_mode: rotation mode
71 *
72 * In V4L2, drivers can call ipu_image_convert_adjust() in .try_fmt.
73 */
74void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
75 enum ipu_rotate_mode rot_mode);
76
77/**
78 * ipu_image_convert_verify() - verify that input/output image formats
79 * and rotation mode meet IPU restrictions.
80 *
81 * @in: input image format
82 * @out: output image format
83 * @rot_mode: rotation mode
84 *
85 * Returns 0 if the formats and rotation mode meet IPU restrictions,
86 * -EINVAL otherwise.
87 */
88int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
89 enum ipu_rotate_mode rot_mode);
90
91/**
92 * ipu_image_convert_prepare() - prepare a conversion context.
93 *
94 * @ipu: the IPU handle to use for the conversions
95 * @ic_task: the IC task to use for the conversions
96 * @in: input image format
97 * @out: output image format
98 * @rot_mode: rotation mode
99 * @complete: run completion callback
100 * @complete_context: a context pointer for the completion callback
101 *
102 * Returns an opaque conversion context pointer on success, error pointer
103 * on failure. The input/output formats and rotation mode must already meet
104 * IPU retrictions.
105 *
106 * In V4L2, drivers should call ipu_image_convert_prepare() at streamon.
107 */
108struct ipu_image_convert_ctx *
109ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
110 struct ipu_image *in, struct ipu_image *out,
111 enum ipu_rotate_mode rot_mode,
112 ipu_image_convert_cb_t complete,
113 void *complete_context);
114
115/**
116 * ipu_image_convert_unprepare() - unprepare a conversion context.
117 *
118 * @ctx: the conversion context pointer to unprepare
119 *
120 * Aborts any active or pending conversions for this context and
121 * frees the context. Any currently active or pending runs belonging
122 * to this context are returned via the completion callback with an
123 * error run status.
124 *
125 * In V4L2, drivers should call ipu_image_convert_unprepare() at
126 * streamoff.
127 */
128void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx);
129
130/**
131 * ipu_image_convert_queue() - queue a conversion run
132 *
133 * @run: the run request pointer
134 *
135 * ipu_image_convert_run must be dynamically allocated (_not_ as a local
136 * var) by callers and filled in with a previously prepared conversion
137 * context handle and the dma addr's of the input and output image buffers
138 * for this conversion run.
139 *
140 * When this conversion completes, the run pointer is returned via the
141 * completion callback. The caller is responsible for freeing the run
142 * object after it completes.
143 *
144 * In V4L2, drivers should call ipu_image_convert_queue() while
145 * streaming to queue the conversion of a received input buffer.
146 * For example mem2mem devices this would be called in .device_run.
147 */
148int ipu_image_convert_queue(struct ipu_image_convert_run *run);
149
150/**
151 * ipu_image_convert_abort() - abort conversions
152 *
153 * @ctx: the conversion context pointer
154 *
155 * This will abort any active or pending conversions for this context.
156 * Any currently active or pending runs belonging to this context are
157 * returned via the completion callback with an error run status.
158 */
159void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx);
160
161/**
162 * ipu_image_convert() - asynchronous image conversion request
163 *
164 * @ipu: the IPU handle to use for the conversion
165 * @ic_task: the IC task to use for the conversion
166 * @in: input image format
167 * @out: output image format
168 * @rot_mode: rotation mode
169 * @complete: run completion callback
170 * @complete_context: a context pointer for the completion callback
171 *
172 * Request a single image conversion. Returns the run that has been queued.
173 * A conversion context is automatically created and is available in run->ctx.
174 * As with ipu_image_convert_prepare(), the input/output formats and rotation
175 * mode must already meet IPU retrictions.
176 *
177 * On successful return the caller can queue more run requests if needed, using
178 * the prepared context in run->ctx. The caller is responsible for unpreparing
179 * the context when no more conversion requests are needed.
180 */
181struct ipu_image_convert_run *
182ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
183 struct ipu_image *in, struct ipu_image *out,
184 enum ipu_rotate_mode rot_mode,
185 ipu_image_convert_cb_t complete,
186 void *complete_context);
187
188/**
189 * ipu_image_convert_sync() - synchronous single image conversion request
190 *
191 * @ipu: the IPU handle to use for the conversion
192 * @ic_task: the IC task to use for the conversion
193 * @in: input image format
194 * @out: output image format
195 * @rot_mode: rotation mode
196 *
197 * Carry out a single image conversion. Returns when the conversion
198 * completes. The input/output formats and rotation mode must already
199 * meet IPU retrictions. The created context is automatically unprepared
200 * and the run freed on return.
201 */
202int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
203 struct ipu_image *in, struct ipu_image *out,
204 enum ipu_rotate_mode rot_mode);
205
206
207#endif /* __IMX_IPU_IMAGE_CONVERT_H__ */