diff options
-rw-r--r-- | arch/arm/plat-mxc/include/mach/ipu.h | 181 | ||||
-rw-r--r-- | arch/arm/plat-mxc/include/mach/irqs.h | 10 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 19 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/ipu/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_idmac.c | 1740 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_intern.h | 176 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_irq.c | 413 |
8 files changed, 2540 insertions, 1 deletions
diff --git a/arch/arm/plat-mxc/include/mach/ipu.h b/arch/arm/plat-mxc/include/mach/ipu.h new file mode 100644 index 000000000000..a9221f1cc1a0 --- /dev/null +++ b/arch/arm/plat-mxc/include/mach/ipu.h | |||
@@ -0,0 +1,181 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 | ||
3 | * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> | ||
4 | * | ||
5 | * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef _IPU_H_ | ||
13 | #define _IPU_H_ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | #include <linux/dmaengine.h> | ||
17 | |||
18 | /* IPU DMA Controller channel definitions. */ | ||
19 | enum ipu_channel { | ||
20 | IDMAC_IC_0 = 0, /* IC (encoding task) to memory */ | ||
21 | IDMAC_IC_1 = 1, /* IC (viewfinder task) to memory */ | ||
22 | IDMAC_ADC_0 = 1, | ||
23 | IDMAC_IC_2 = 2, | ||
24 | IDMAC_ADC_1 = 2, | ||
25 | IDMAC_IC_3 = 3, | ||
26 | IDMAC_IC_4 = 4, | ||
27 | IDMAC_IC_5 = 5, | ||
28 | IDMAC_IC_6 = 6, | ||
29 | IDMAC_IC_7 = 7, /* IC (sensor data) to memory */ | ||
30 | IDMAC_IC_8 = 8, | ||
31 | IDMAC_IC_9 = 9, | ||
32 | IDMAC_IC_10 = 10, | ||
33 | IDMAC_IC_11 = 11, | ||
34 | IDMAC_IC_12 = 12, | ||
35 | IDMAC_IC_13 = 13, | ||
36 | IDMAC_SDC_0 = 14, /* Background synchronous display data */ | ||
37 | IDMAC_SDC_1 = 15, /* Foreground data (overlay) */ | ||
38 | IDMAC_SDC_2 = 16, | ||
39 | IDMAC_SDC_3 = 17, | ||
40 | IDMAC_ADC_2 = 18, | ||
41 | IDMAC_ADC_3 = 19, | ||
42 | IDMAC_ADC_4 = 20, | ||
43 | IDMAC_ADC_5 = 21, | ||
44 | IDMAC_ADC_6 = 22, | ||
45 | IDMAC_ADC_7 = 23, | ||
46 | IDMAC_PF_0 = 24, | ||
47 | IDMAC_PF_1 = 25, | ||
48 | IDMAC_PF_2 = 26, | ||
49 | IDMAC_PF_3 = 27, | ||
50 | IDMAC_PF_4 = 28, | ||
51 | IDMAC_PF_5 = 29, | ||
52 | IDMAC_PF_6 = 30, | ||
53 | IDMAC_PF_7 = 31, | ||
54 | }; | ||
55 | |||
56 | /* Order significant! */ | ||
57 | enum ipu_channel_status { | ||
58 | IPU_CHANNEL_FREE, | ||
59 | IPU_CHANNEL_INITIALIZED, | ||
60 | IPU_CHANNEL_READY, | ||
61 | IPU_CHANNEL_ENABLED, | ||
62 | }; | ||
63 | |||
64 | #define IPU_CHANNELS_NUM 32 | ||
65 | |||
66 | enum pixel_fmt { | ||
67 | /* 1 byte */ | ||
68 | IPU_PIX_FMT_GENERIC, | ||
69 | IPU_PIX_FMT_RGB332, | ||
70 | IPU_PIX_FMT_YUV420P, | ||
71 | IPU_PIX_FMT_YUV422P, | ||
72 | IPU_PIX_FMT_YUV420P2, | ||
73 | IPU_PIX_FMT_YVU422P, | ||
74 | /* 2 bytes */ | ||
75 | IPU_PIX_FMT_RGB565, | ||
76 | IPU_PIX_FMT_RGB666, | ||
77 | IPU_PIX_FMT_BGR666, | ||
78 | IPU_PIX_FMT_YUYV, | ||
79 | IPU_PIX_FMT_UYVY, | ||
80 | /* 3 bytes */ | ||
81 | IPU_PIX_FMT_RGB24, | ||
82 | IPU_PIX_FMT_BGR24, | ||
83 | /* 4 bytes */ | ||
84 | IPU_PIX_FMT_GENERIC_32, | ||
85 | IPU_PIX_FMT_RGB32, | ||
86 | IPU_PIX_FMT_BGR32, | ||
87 | IPU_PIX_FMT_ABGR32, | ||
88 | IPU_PIX_FMT_BGRA32, | ||
89 | IPU_PIX_FMT_RGBA32, | ||
90 | }; | ||
91 | |||
92 | enum ipu_color_space { | ||
93 | IPU_COLORSPACE_RGB, | ||
94 | IPU_COLORSPACE_YCBCR, | ||
95 | IPU_COLORSPACE_YUV | ||
96 | }; | ||
97 | |||
98 | /* | ||
99 | * Enumeration of IPU rotation modes | ||
100 | */ | ||
101 | enum ipu_rotate_mode { | ||
102 | /* Note the enum values correspond to BAM value */ | ||
103 | IPU_ROTATE_NONE = 0, | ||
104 | IPU_ROTATE_VERT_FLIP = 1, | ||
105 | IPU_ROTATE_HORIZ_FLIP = 2, | ||
106 | IPU_ROTATE_180 = 3, | ||
107 | IPU_ROTATE_90_RIGHT = 4, | ||
108 | IPU_ROTATE_90_RIGHT_VFLIP = 5, | ||
109 | IPU_ROTATE_90_RIGHT_HFLIP = 6, | ||
110 | IPU_ROTATE_90_LEFT = 7, | ||
111 | }; | ||
112 | |||
113 | struct ipu_platform_data { | ||
114 | unsigned int irq_base; | ||
115 | }; | ||
116 | |||
117 | /* | ||
118 | * Enumeration of DI ports for ADC. | ||
119 | */ | ||
120 | enum display_port { | ||
121 | DISP0, | ||
122 | DISP1, | ||
123 | DISP2, | ||
124 | DISP3 | ||
125 | }; | ||
126 | |||
127 | struct idmac_video_param { | ||
128 | unsigned short in_width; | ||
129 | unsigned short in_height; | ||
130 | uint32_t in_pixel_fmt; | ||
131 | unsigned short out_width; | ||
132 | unsigned short out_height; | ||
133 | uint32_t out_pixel_fmt; | ||
134 | unsigned short out_stride; | ||
135 | bool graphics_combine_en; | ||
136 | bool global_alpha_en; | ||
137 | bool key_color_en; | ||
138 | enum display_port disp; | ||
139 | unsigned short out_left; | ||
140 | unsigned short out_top; | ||
141 | }; | ||
142 | |||
143 | /* | ||
144 | * Union of initialization parameters for a logical channel. So far only video | ||
145 | * parameters are used. | ||
146 | */ | ||
147 | union ipu_channel_param { | ||
148 | struct idmac_video_param video; | ||
149 | }; | ||
150 | |||
151 | struct idmac_tx_desc { | ||
152 | struct dma_async_tx_descriptor txd; | ||
153 | struct scatterlist *sg; /* scatterlist for this */ | ||
154 | unsigned int sg_len; /* tx-descriptor. */ | ||
155 | struct list_head list; | ||
156 | }; | ||
157 | |||
158 | struct idmac_channel { | ||
159 | struct dma_chan dma_chan; | ||
160 | dma_cookie_t completed; /* last completed cookie */ | ||
161 | union ipu_channel_param params; | ||
162 | enum ipu_channel link; /* input channel, linked to the output */ | ||
163 | enum ipu_channel_status status; | ||
164 | void *client; /* Only one client per channel */ | ||
165 | unsigned int n_tx_desc; | ||
166 | struct idmac_tx_desc *desc; /* allocated tx-descriptors */ | ||
167 | struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */ | ||
168 | struct list_head free_list; /* free tx-descriptors */ | ||
169 | struct list_head queue; /* queued tx-descriptors */ | ||
170 | spinlock_t lock; /* protects sg[0,1], queue */ | ||
171 | struct mutex chan_mutex; /* protects status, cookie, free_list */ | ||
172 | bool sec_chan_en; | ||
173 | int active_buffer; | ||
174 | unsigned int eof_irq; | ||
175 | char eof_name[16]; /* EOF IRQ name for request_irq() */ | ||
176 | }; | ||
177 | |||
178 | #define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd) | ||
179 | #define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan) | ||
180 | |||
181 | #endif | ||
diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h index e06d3cb0ee11..c02b8fc2d821 100644 --- a/arch/arm/plat-mxc/include/mach/irqs.h +++ b/arch/arm/plat-mxc/include/mach/irqs.h | |||
@@ -35,7 +35,15 @@ | |||
35 | #define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS) | 35 | #define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS) |
36 | #define MXC_BOARD_IRQS 16 | 36 | #define MXC_BOARD_IRQS 16 |
37 | 37 | ||
38 | #define NR_IRQS (MXC_BOARD_IRQ_START + MXC_BOARD_IRQS) | 38 | #define MXC_IPU_IRQ_START (MXC_BOARD_IRQ_START + MXC_BOARD_IRQS) |
39 | |||
40 | #ifdef CONFIG_MX3_IPU_IRQS | ||
41 | #define MX3_IPU_IRQS CONFIG_MX3_IPU_IRQS | ||
42 | #else | ||
43 | #define MX3_IPU_IRQS 0 | ||
44 | #endif | ||
45 | |||
46 | #define NR_IRQS (MXC_IPU_IRQ_START + MX3_IPU_IRQS) | ||
39 | 47 | ||
40 | extern void imx_irq_set_priority(unsigned char irq, unsigned char prio); | 48 | extern void imx_irq_set_priority(unsigned char irq, unsigned char prio); |
41 | 49 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e34b06420816..48ea59e79672 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -62,6 +62,25 @@ config MV_XOR | |||
62 | ---help--- | 62 | ---help--- |
63 | Enable support for the Marvell XOR engine. | 63 | Enable support for the Marvell XOR engine. |
64 | 64 | ||
65 | config MX3_IPU | ||
66 | bool "MX3x Image Processing Unit support" | ||
67 | depends on ARCH_MX3 | ||
68 | select DMA_ENGINE | ||
69 | default y | ||
70 | help | ||
71 | If you plan to use the Image Processing unit in the i.MX3x, say | ||
72 | Y here. If unsure, select Y. | ||
73 | |||
74 | config MX3_IPU_IRQS | ||
75 | int "Number of dynamically mapped interrupts for IPU" | ||
76 | depends on MX3_IPU | ||
77 | range 2 137 | ||
78 | default 4 | ||
79 | help | ||
80 | Out of 137 interrupt sources on i.MX31 IPU only very few are used. | ||
81 | To avoid bloating the irq_desc[] array we allocate a sufficient | ||
82 | number of IRQ slots and map them dynamically to specific sources. | ||
83 | |||
65 | config DMA_ENGINE | 84 | config DMA_ENGINE |
66 | bool | 85 | bool |
67 | 86 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 14f59527d4f6..2e5dc96700d2 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -7,3 +7,4 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | |||
7 | obj-$(CONFIG_FSL_DMA) += fsldma.o | 7 | obj-$(CONFIG_FSL_DMA) += fsldma.o |
8 | obj-$(CONFIG_MV_XOR) += mv_xor.o | 8 | obj-$(CONFIG_MV_XOR) += mv_xor.o |
9 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o | 9 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o |
10 | obj-$(CONFIG_MX3_IPU) += ipu/ | ||
diff --git a/drivers/dma/ipu/Makefile b/drivers/dma/ipu/Makefile new file mode 100644 index 000000000000..6704cf48326d --- /dev/null +++ b/drivers/dma/ipu/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-y += ipu_irq.o ipu_idmac.o | |||
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c new file mode 100644 index 000000000000..1f154d08e98f --- /dev/null +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -0,0 +1,1740 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 | ||
3 | * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> | ||
4 | * | ||
5 | * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/list.h> | ||
18 | #include <linux/clk.h> | ||
19 | #include <linux/vmalloc.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/io.h> | ||
23 | |||
24 | #include <mach/ipu.h> | ||
25 | |||
26 | #include "ipu_intern.h" | ||
27 | |||
28 | #define FS_VF_IN_VALID 0x00000002 | ||
29 | #define FS_ENC_IN_VALID 0x00000001 | ||
30 | |||
31 | /* | ||
32 | * There can be only one, we could allocate it dynamically, but then we'd have | ||
33 | * to add an extra parameter to some functions, and use something as ugly as | ||
34 | * struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device)); | ||
35 | * in the ISR | ||
36 | */ | ||
37 | static struct ipu ipu_data; | ||
38 | |||
39 | #define to_ipu(id) container_of(id, struct ipu, idmac) | ||
40 | |||
41 | static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg) | ||
42 | { | ||
43 | return __raw_readl(ipu->reg_ic + reg); | ||
44 | } | ||
45 | |||
46 | #define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF) | ||
47 | |||
48 | static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg) | ||
49 | { | ||
50 | __raw_writel(value, ipu->reg_ic + reg); | ||
51 | } | ||
52 | |||
53 | #define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF) | ||
54 | |||
55 | static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg) | ||
56 | { | ||
57 | return __raw_readl(ipu->reg_ipu + reg); | ||
58 | } | ||
59 | |||
60 | static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg) | ||
61 | { | ||
62 | __raw_writel(value, ipu->reg_ipu + reg); | ||
63 | } | ||
64 | |||
65 | /***************************************************************************** | ||
66 | * IPU / IC common functions | ||
67 | */ | ||
68 | static void dump_idmac_reg(struct ipu *ipu) | ||
69 | { | ||
70 | dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, " | ||
71 | "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n", | ||
72 | idmac_read_icreg(ipu, IDMAC_CONF), | ||
73 | idmac_read_icreg(ipu, IC_CONF), | ||
74 | idmac_read_icreg(ipu, IDMAC_CHA_EN), | ||
75 | idmac_read_icreg(ipu, IDMAC_CHA_PRI), | ||
76 | idmac_read_icreg(ipu, IDMAC_CHA_BUSY)); | ||
77 | dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, " | ||
78 | "DB_MODE 0x%x, TASKS_STAT 0x%x\n", | ||
79 | idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY), | ||
80 | idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY), | ||
81 | idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF), | ||
82 | idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL), | ||
83 | idmac_read_ipureg(ipu, IPU_TASKS_STAT)); | ||
84 | } | ||
85 | |||
86 | static uint32_t bytes_per_pixel(enum pixel_fmt fmt) | ||
87 | { | ||
88 | switch (fmt) { | ||
89 | case IPU_PIX_FMT_GENERIC: /* generic data */ | ||
90 | case IPU_PIX_FMT_RGB332: | ||
91 | case IPU_PIX_FMT_YUV420P: | ||
92 | case IPU_PIX_FMT_YUV422P: | ||
93 | default: | ||
94 | return 1; | ||
95 | case IPU_PIX_FMT_RGB565: | ||
96 | case IPU_PIX_FMT_YUYV: | ||
97 | case IPU_PIX_FMT_UYVY: | ||
98 | return 2; | ||
99 | case IPU_PIX_FMT_BGR24: | ||
100 | case IPU_PIX_FMT_RGB24: | ||
101 | return 3; | ||
102 | case IPU_PIX_FMT_GENERIC_32: /* generic data */ | ||
103 | case IPU_PIX_FMT_BGR32: | ||
104 | case IPU_PIX_FMT_RGB32: | ||
105 | case IPU_PIX_FMT_ABGR32: | ||
106 | return 4; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | /* Enable / disable direct write to memory by the Camera Sensor Interface */ | ||
111 | static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) | ||
112 | { | ||
113 | uint32_t ic_conf, mask; | ||
114 | |||
115 | switch (channel) { | ||
116 | case IDMAC_IC_0: | ||
117 | mask = IC_CONF_PRPENC_EN; | ||
118 | break; | ||
119 | case IDMAC_IC_7: | ||
120 | mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN; | ||
121 | break; | ||
122 | default: | ||
123 | return; | ||
124 | } | ||
125 | ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask; | ||
126 | idmac_write_icreg(ipu, ic_conf, IC_CONF); | ||
127 | } | ||
128 | |||
129 | static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel) | ||
130 | { | ||
131 | uint32_t ic_conf, mask; | ||
132 | |||
133 | switch (channel) { | ||
134 | case IDMAC_IC_0: | ||
135 | mask = IC_CONF_PRPENC_EN; | ||
136 | break; | ||
137 | case IDMAC_IC_7: | ||
138 | mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN; | ||
139 | break; | ||
140 | default: | ||
141 | return; | ||
142 | } | ||
143 | ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask; | ||
144 | idmac_write_icreg(ipu, ic_conf, IC_CONF); | ||
145 | } | ||
146 | |||
147 | static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel) | ||
148 | { | ||
149 | uint32_t stat = TASK_STAT_IDLE; | ||
150 | uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT); | ||
151 | |||
152 | switch (channel) { | ||
153 | case IDMAC_IC_7: | ||
154 | stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >> | ||
155 | TSTAT_CSI2MEM_OFFSET; | ||
156 | break; | ||
157 | case IDMAC_IC_0: | ||
158 | case IDMAC_SDC_0: | ||
159 | case IDMAC_SDC_1: | ||
160 | default: | ||
161 | break; | ||
162 | } | ||
163 | return stat; | ||
164 | } | ||
165 | |||
166 | struct chan_param_mem_planar { | ||
167 | /* Word 0 */ | ||
168 | u32 xv:10; | ||
169 | u32 yv:10; | ||
170 | u32 xb:12; | ||
171 | |||
172 | u32 yb:12; | ||
173 | u32 res1:2; | ||
174 | u32 nsb:1; | ||
175 | u32 lnpb:6; | ||
176 | u32 ubo_l:11; | ||
177 | |||
178 | u32 ubo_h:15; | ||
179 | u32 vbo_l:17; | ||
180 | |||
181 | u32 vbo_h:9; | ||
182 | u32 res2:3; | ||
183 | u32 fw:12; | ||
184 | u32 fh_l:8; | ||
185 | |||
186 | u32 fh_h:4; | ||
187 | u32 res3:28; | ||
188 | |||
189 | /* Word 1 */ | ||
190 | u32 eba0; | ||
191 | |||
192 | u32 eba1; | ||
193 | |||
194 | u32 bpp:3; | ||
195 | u32 sl:14; | ||
196 | u32 pfs:3; | ||
197 | u32 bam:3; | ||
198 | u32 res4:2; | ||
199 | u32 npb:6; | ||
200 | u32 res5:1; | ||
201 | |||
202 | u32 sat:2; | ||
203 | u32 res6:30; | ||
204 | } __attribute__ ((packed)); | ||
205 | |||
206 | struct chan_param_mem_interleaved { | ||
207 | /* Word 0 */ | ||
208 | u32 xv:10; | ||
209 | u32 yv:10; | ||
210 | u32 xb:12; | ||
211 | |||
212 | u32 yb:12; | ||
213 | u32 sce:1; | ||
214 | u32 res1:1; | ||
215 | u32 nsb:1; | ||
216 | u32 lnpb:6; | ||
217 | u32 sx:10; | ||
218 | u32 sy_l:1; | ||
219 | |||
220 | u32 sy_h:9; | ||
221 | u32 ns:10; | ||
222 | u32 sm:10; | ||
223 | u32 sdx_l:3; | ||
224 | |||
225 | u32 sdx_h:2; | ||
226 | u32 sdy:5; | ||
227 | u32 sdrx:1; | ||
228 | u32 sdry:1; | ||
229 | u32 sdr1:1; | ||
230 | u32 res2:2; | ||
231 | u32 fw:12; | ||
232 | u32 fh_l:8; | ||
233 | |||
234 | u32 fh_h:4; | ||
235 | u32 res3:28; | ||
236 | |||
237 | /* Word 1 */ | ||
238 | u32 eba0; | ||
239 | |||
240 | u32 eba1; | ||
241 | |||
242 | u32 bpp:3; | ||
243 | u32 sl:14; | ||
244 | u32 pfs:3; | ||
245 | u32 bam:3; | ||
246 | u32 res4:2; | ||
247 | u32 npb:6; | ||
248 | u32 res5:1; | ||
249 | |||
250 | u32 sat:2; | ||
251 | u32 scc:1; | ||
252 | u32 ofs0:5; | ||
253 | u32 ofs1:5; | ||
254 | u32 ofs2:5; | ||
255 | u32 ofs3:5; | ||
256 | u32 wid0:3; | ||
257 | u32 wid1:3; | ||
258 | u32 wid2:3; | ||
259 | |||
260 | u32 wid3:3; | ||
261 | u32 dec_sel:1; | ||
262 | u32 res6:28; | ||
263 | } __attribute__ ((packed)); | ||
264 | |||
265 | union chan_param_mem { | ||
266 | struct chan_param_mem_planar pp; | ||
267 | struct chan_param_mem_interleaved ip; | ||
268 | }; | ||
269 | |||
270 | static void ipu_ch_param_set_plane_offset(union chan_param_mem *params, | ||
271 | u32 u_offset, u32 v_offset) | ||
272 | { | ||
273 | params->pp.ubo_l = u_offset & 0x7ff; | ||
274 | params->pp.ubo_h = u_offset >> 11; | ||
275 | params->pp.vbo_l = v_offset & 0x1ffff; | ||
276 | params->pp.vbo_h = v_offset >> 17; | ||
277 | } | ||
278 | |||
279 | static void ipu_ch_param_set_size(union chan_param_mem *params, | ||
280 | uint32_t pixel_fmt, uint16_t width, | ||
281 | uint16_t height, uint16_t stride) | ||
282 | { | ||
283 | u32 u_offset; | ||
284 | u32 v_offset; | ||
285 | |||
286 | params->pp.fw = width - 1; | ||
287 | params->pp.fh_l = height - 1; | ||
288 | params->pp.fh_h = (height - 1) >> 8; | ||
289 | params->pp.sl = stride - 1; | ||
290 | |||
291 | switch (pixel_fmt) { | ||
292 | case IPU_PIX_FMT_GENERIC: | ||
293 | /*Represents 8-bit Generic data */ | ||
294 | params->pp.bpp = 3; | ||
295 | params->pp.pfs = 7; | ||
296 | params->pp.npb = 31; | ||
297 | params->pp.sat = 2; /* SAT = use 32-bit access */ | ||
298 | break; | ||
299 | case IPU_PIX_FMT_GENERIC_32: | ||
300 | /*Represents 32-bit Generic data */ | ||
301 | params->pp.bpp = 0; | ||
302 | params->pp.pfs = 7; | ||
303 | params->pp.npb = 7; | ||
304 | params->pp.sat = 2; /* SAT = use 32-bit access */ | ||
305 | break; | ||
306 | case IPU_PIX_FMT_RGB565: | ||
307 | params->ip.bpp = 2; | ||
308 | params->ip.pfs = 4; | ||
309 | params->ip.npb = 7; | ||
310 | params->ip.sat = 2; /* SAT = 32-bit access */ | ||
311 | params->ip.ofs0 = 0; /* Red bit offset */ | ||
312 | params->ip.ofs1 = 5; /* Green bit offset */ | ||
313 | params->ip.ofs2 = 11; /* Blue bit offset */ | ||
314 | params->ip.ofs3 = 16; /* Alpha bit offset */ | ||
315 | params->ip.wid0 = 4; /* Red bit width - 1 */ | ||
316 | params->ip.wid1 = 5; /* Green bit width - 1 */ | ||
317 | params->ip.wid2 = 4; /* Blue bit width - 1 */ | ||
318 | break; | ||
319 | case IPU_PIX_FMT_BGR24: | ||
320 | params->ip.bpp = 1; /* 24 BPP & RGB PFS */ | ||
321 | params->ip.pfs = 4; | ||
322 | params->ip.npb = 7; | ||
323 | params->ip.sat = 2; /* SAT = 32-bit access */ | ||
324 | params->ip.ofs0 = 0; /* Red bit offset */ | ||
325 | params->ip.ofs1 = 8; /* Green bit offset */ | ||
326 | params->ip.ofs2 = 16; /* Blue bit offset */ | ||
327 | params->ip.ofs3 = 24; /* Alpha bit offset */ | ||
328 | params->ip.wid0 = 7; /* Red bit width - 1 */ | ||
329 | params->ip.wid1 = 7; /* Green bit width - 1 */ | ||
330 | params->ip.wid2 = 7; /* Blue bit width - 1 */ | ||
331 | break; | ||
332 | case IPU_PIX_FMT_RGB24: | ||
333 | params->ip.bpp = 1; /* 24 BPP & RGB PFS */ | ||
334 | params->ip.pfs = 4; | ||
335 | params->ip.npb = 7; | ||
336 | params->ip.sat = 2; /* SAT = 32-bit access */ | ||
337 | params->ip.ofs0 = 16; /* Red bit offset */ | ||
338 | params->ip.ofs1 = 8; /* Green bit offset */ | ||
339 | params->ip.ofs2 = 0; /* Blue bit offset */ | ||
340 | params->ip.ofs3 = 24; /* Alpha bit offset */ | ||
341 | params->ip.wid0 = 7; /* Red bit width - 1 */ | ||
342 | params->ip.wid1 = 7; /* Green bit width - 1 */ | ||
343 | params->ip.wid2 = 7; /* Blue bit width - 1 */ | ||
344 | break; | ||
345 | case IPU_PIX_FMT_BGRA32: | ||
346 | case IPU_PIX_FMT_BGR32: | ||
347 | params->ip.bpp = 0; | ||
348 | params->ip.pfs = 4; | ||
349 | params->ip.npb = 7; | ||
350 | params->ip.sat = 2; /* SAT = 32-bit access */ | ||
351 | params->ip.ofs0 = 8; /* Red bit offset */ | ||
352 | params->ip.ofs1 = 16; /* Green bit offset */ | ||
353 | params->ip.ofs2 = 24; /* Blue bit offset */ | ||
354 | params->ip.ofs3 = 0; /* Alpha bit offset */ | ||
355 | params->ip.wid0 = 7; /* Red bit width - 1 */ | ||
356 | params->ip.wid1 = 7; /* Green bit width - 1 */ | ||
357 | params->ip.wid2 = 7; /* Blue bit width - 1 */ | ||
358 | params->ip.wid3 = 7; /* Alpha bit width - 1 */ | ||
359 | break; | ||
360 | case IPU_PIX_FMT_RGBA32: | ||
361 | case IPU_PIX_FMT_RGB32: | ||
362 | params->ip.bpp = 0; | ||
363 | params->ip.pfs = 4; | ||
364 | params->ip.npb = 7; | ||
365 | params->ip.sat = 2; /* SAT = 32-bit access */ | ||
366 | params->ip.ofs0 = 24; /* Red bit offset */ | ||
367 | params->ip.ofs1 = 16; /* Green bit offset */ | ||
368 | params->ip.ofs2 = 8; /* Blue bit offset */ | ||
369 | params->ip.ofs3 = 0; /* Alpha bit offset */ | ||
370 | params->ip.wid0 = 7; /* Red bit width - 1 */ | ||
371 | params->ip.wid1 = 7; /* Green bit width - 1 */ | ||
372 | params->ip.wid2 = 7; /* Blue bit width - 1 */ | ||
373 | params->ip.wid3 = 7; /* Alpha bit width - 1 */ | ||
374 | break; | ||
375 | case IPU_PIX_FMT_ABGR32: | ||
376 | params->ip.bpp = 0; | ||
377 | params->ip.pfs = 4; | ||
378 | params->ip.npb = 7; | ||
379 | params->ip.sat = 2; /* SAT = 32-bit access */ | ||
380 | params->ip.ofs0 = 8; /* Red bit offset */ | ||
381 | params->ip.ofs1 = 16; /* Green bit offset */ | ||
382 | params->ip.ofs2 = 24; /* Blue bit offset */ | ||
383 | params->ip.ofs3 = 0; /* Alpha bit offset */ | ||
384 | params->ip.wid0 = 7; /* Red bit width - 1 */ | ||
385 | params->ip.wid1 = 7; /* Green bit width - 1 */ | ||
386 | params->ip.wid2 = 7; /* Blue bit width - 1 */ | ||
387 | params->ip.wid3 = 7; /* Alpha bit width - 1 */ | ||
388 | break; | ||
389 | case IPU_PIX_FMT_UYVY: | ||
390 | params->ip.bpp = 2; | ||
391 | params->ip.pfs = 6; | ||
392 | params->ip.npb = 7; | ||
393 | params->ip.sat = 2; /* SAT = 32-bit access */ | ||
394 | break; | ||
395 | case IPU_PIX_FMT_YUV420P2: | ||
396 | case IPU_PIX_FMT_YUV420P: | ||
397 | params->ip.bpp = 3; | ||
398 | params->ip.pfs = 3; | ||
399 | params->ip.npb = 7; | ||
400 | params->ip.sat = 2; /* SAT = 32-bit access */ | ||
401 | u_offset = stride * height; | ||
402 | v_offset = u_offset + u_offset / 4; | ||
403 | ipu_ch_param_set_plane_offset(params, u_offset, v_offset); | ||
404 | break; | ||
405 | case IPU_PIX_FMT_YVU422P: | ||
406 | params->ip.bpp = 3; | ||
407 | params->ip.pfs = 2; | ||
408 | params->ip.npb = 7; | ||
409 | params->ip.sat = 2; /* SAT = 32-bit access */ | ||
410 | v_offset = stride * height; | ||
411 | u_offset = v_offset + v_offset / 2; | ||
412 | ipu_ch_param_set_plane_offset(params, u_offset, v_offset); | ||
413 | break; | ||
414 | case IPU_PIX_FMT_YUV422P: | ||
415 | params->ip.bpp = 3; | ||
416 | params->ip.pfs = 2; | ||
417 | params->ip.npb = 7; | ||
418 | params->ip.sat = 2; /* SAT = 32-bit access */ | ||
419 | u_offset = stride * height; | ||
420 | v_offset = u_offset + u_offset / 2; | ||
421 | ipu_ch_param_set_plane_offset(params, u_offset, v_offset); | ||
422 | break; | ||
423 | default: | ||
424 | dev_err(ipu_data.dev, | ||
425 | "mxc ipu: unimplemented pixel format %d\n", pixel_fmt); | ||
426 | break; | ||
427 | } | ||
428 | |||
429 | params->pp.nsb = 1; | ||
430 | } | ||
431 | |||
432 | static void ipu_ch_param_set_burst_size(union chan_param_mem *params, | ||
433 | uint16_t burst_pixels) | ||
434 | { | ||
435 | params->pp.npb = burst_pixels - 1; | ||
436 | }; | ||
437 | |||
438 | static void ipu_ch_param_set_buffer(union chan_param_mem *params, | ||
439 | dma_addr_t buf0, dma_addr_t buf1) | ||
440 | { | ||
441 | params->pp.eba0 = buf0; | ||
442 | params->pp.eba1 = buf1; | ||
443 | }; | ||
444 | |||
445 | static void ipu_ch_param_set_rotation(union chan_param_mem *params, | ||
446 | enum ipu_rotate_mode rotate) | ||
447 | { | ||
448 | params->pp.bam = rotate; | ||
449 | }; | ||
450 | |||
451 | static void ipu_write_param_mem(uint32_t addr, uint32_t *data, | ||
452 | uint32_t num_words) | ||
453 | { | ||
454 | for (; num_words > 0; num_words--) { | ||
455 | dev_dbg(ipu_data.dev, | ||
456 | "write param mem - addr = 0x%08X, data = 0x%08X\n", | ||
457 | addr, *data); | ||
458 | idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR); | ||
459 | idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA); | ||
460 | addr++; | ||
461 | if ((addr & 0x7) == 5) { | ||
462 | addr &= ~0x7; /* set to word 0 */ | ||
463 | addr += 8; /* increment to next row */ | ||
464 | } | ||
465 | } | ||
466 | } | ||
467 | |||
468 | static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size, | ||
469 | uint32_t *resize_coeff, | ||
470 | uint32_t *downsize_coeff) | ||
471 | { | ||
472 | uint32_t temp_size; | ||
473 | uint32_t temp_downsize; | ||
474 | |||
475 | *resize_coeff = 1 << 13; | ||
476 | *downsize_coeff = 1 << 13; | ||
477 | |||
478 | /* Cannot downsize more than 8:1 */ | ||
479 | if (out_size << 3 < in_size) | ||
480 | return -EINVAL; | ||
481 | |||
482 | /* compute downsizing coefficient */ | ||
483 | temp_downsize = 0; | ||
484 | temp_size = in_size; | ||
485 | while (temp_size >= out_size * 2 && temp_downsize < 2) { | ||
486 | temp_size >>= 1; | ||
487 | temp_downsize++; | ||
488 | } | ||
489 | *downsize_coeff = temp_downsize; | ||
490 | |||
491 | /* | ||
492 | * compute resizing coefficient using the following formula: | ||
493 | * resize_coeff = M*(SI -1)/(SO - 1) | ||
494 | * where M = 2^13, SI - input size, SO - output size | ||
495 | */ | ||
496 | *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1); | ||
497 | if (*resize_coeff >= 16384L) { | ||
498 | dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n"); | ||
499 | *resize_coeff = 0x3FFF; | ||
500 | } | ||
501 | |||
502 | dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, " | ||
503 | "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size, | ||
504 | *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0, | ||
505 | ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff); | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt) | ||
511 | { | ||
512 | switch (fmt) { | ||
513 | case IPU_PIX_FMT_RGB565: | ||
514 | case IPU_PIX_FMT_BGR24: | ||
515 | case IPU_PIX_FMT_RGB24: | ||
516 | case IPU_PIX_FMT_BGR32: | ||
517 | case IPU_PIX_FMT_RGB32: | ||
518 | return IPU_COLORSPACE_RGB; | ||
519 | default: | ||
520 | return IPU_COLORSPACE_YCBCR; | ||
521 | } | ||
522 | } | ||
523 | |||
524 | static int ipu_ic_init_prpenc(struct ipu *ipu, | ||
525 | union ipu_channel_param *params, bool src_is_csi) | ||
526 | { | ||
527 | uint32_t reg, ic_conf; | ||
528 | uint32_t downsize_coeff, resize_coeff; | ||
529 | enum ipu_color_space in_fmt, out_fmt; | ||
530 | |||
531 | /* Setup vertical resizing */ | ||
532 | calc_resize_coeffs(params->video.in_height, | ||
533 | params->video.out_height, | ||
534 | &resize_coeff, &downsize_coeff); | ||
535 | reg = (downsize_coeff << 30) | (resize_coeff << 16); | ||
536 | |||
537 | /* Setup horizontal resizing */ | ||
538 | calc_resize_coeffs(params->video.in_width, | ||
539 | params->video.out_width, | ||
540 | &resize_coeff, &downsize_coeff); | ||
541 | reg |= (downsize_coeff << 14) | resize_coeff; | ||
542 | |||
543 | /* Setup color space conversion */ | ||
544 | in_fmt = format_to_colorspace(params->video.in_pixel_fmt); | ||
545 | out_fmt = format_to_colorspace(params->video.out_pixel_fmt); | ||
546 | |||
547 | /* | ||
548 | * Colourspace conversion unsupported yet - see _init_csc() in | ||
549 | * Freescale sources | ||
550 | */ | ||
551 | if (in_fmt != out_fmt) { | ||
552 | dev_err(ipu->dev, "Colourspace conversion unsupported!\n"); | ||
553 | return -EOPNOTSUPP; | ||
554 | } | ||
555 | |||
556 | idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC); | ||
557 | |||
558 | ic_conf = idmac_read_icreg(ipu, IC_CONF); | ||
559 | |||
560 | if (src_is_csi) | ||
561 | ic_conf &= ~IC_CONF_RWS_EN; | ||
562 | else | ||
563 | ic_conf |= IC_CONF_RWS_EN; | ||
564 | |||
565 | idmac_write_icreg(ipu, ic_conf, IC_CONF); | ||
566 | |||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static uint32_t dma_param_addr(uint32_t dma_ch) | ||
571 | { | ||
572 | /* Channel Parameter Memory */ | ||
573 | return 0x10000 | (dma_ch << 4); | ||
574 | }; | ||
575 | |||
576 | static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel, | ||
577 | bool prio) | ||
578 | { | ||
579 | u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI); | ||
580 | |||
581 | if (prio) | ||
582 | reg |= 1UL << channel; | ||
583 | else | ||
584 | reg &= ~(1UL << channel); | ||
585 | |||
586 | idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI); | ||
587 | |||
588 | dump_idmac_reg(ipu); | ||
589 | } | ||
590 | |||
591 | static uint32_t ipu_channel_conf_mask(enum ipu_channel channel) | ||
592 | { | ||
593 | uint32_t mask; | ||
594 | |||
595 | switch (channel) { | ||
596 | case IDMAC_IC_0: | ||
597 | case IDMAC_IC_7: | ||
598 | mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN; | ||
599 | break; | ||
600 | case IDMAC_SDC_0: | ||
601 | case IDMAC_SDC_1: | ||
602 | mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN; | ||
603 | break; | ||
604 | default: | ||
605 | mask = 0; | ||
606 | break; | ||
607 | } | ||
608 | |||
609 | return mask; | ||
610 | } | ||
611 | |||
612 | /** | ||
613 | * ipu_enable_channel() - enable an IPU channel. | ||
614 | * @channel: channel ID. | ||
615 | * @return: 0 on success or negative error code on failure. | ||
616 | */ | ||
617 | static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) | ||
618 | { | ||
619 | struct ipu *ipu = to_ipu(idmac); | ||
620 | enum ipu_channel channel = ichan->dma_chan.chan_id; | ||
621 | uint32_t reg; | ||
622 | unsigned long flags; | ||
623 | |||
624 | spin_lock_irqsave(&ipu->lock, flags); | ||
625 | |||
626 | /* Reset to buffer 0 */ | ||
627 | idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF); | ||
628 | ichan->active_buffer = 0; | ||
629 | ichan->status = IPU_CHANNEL_ENABLED; | ||
630 | |||
631 | switch (channel) { | ||
632 | case IDMAC_SDC_0: | ||
633 | case IDMAC_SDC_1: | ||
634 | case IDMAC_IC_7: | ||
635 | ipu_channel_set_priority(ipu, channel, true); | ||
636 | default: | ||
637 | break; | ||
638 | } | ||
639 | |||
640 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); | ||
641 | |||
642 | idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN); | ||
643 | |||
644 | ipu_ic_enable_task(ipu, channel); | ||
645 | |||
646 | spin_unlock_irqrestore(&ipu->lock, flags); | ||
647 | return 0; | ||
648 | } | ||
649 | |||
650 | /** | ||
651 | * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel. | ||
652 | * @channel: channel ID. | ||
653 | * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code. | ||
654 | * @width: width of buffer in pixels. | ||
655 | * @height: height of buffer in pixels. | ||
656 | * @stride: stride length of buffer in pixels. | ||
657 | * @rot_mode: rotation mode of buffer. A rotation setting other than | ||
658 | * IPU_ROTATE_VERT_FLIP should only be used for input buffers of | ||
659 | * rotation channels. | ||
660 | * @phyaddr_0: buffer 0 physical address. | ||
661 | * @phyaddr_1: buffer 1 physical address. Setting this to a value other than | ||
662 | * NULL enables double buffering mode. | ||
663 | * @return: 0 on success or negative error code on failure. | ||
664 | */ | ||
665 | static int ipu_init_channel_buffer(struct idmac_channel *ichan, | ||
666 | enum pixel_fmt pixel_fmt, | ||
667 | uint16_t width, uint16_t height, | ||
668 | uint32_t stride, | ||
669 | enum ipu_rotate_mode rot_mode, | ||
670 | dma_addr_t phyaddr_0, dma_addr_t phyaddr_1) | ||
671 | { | ||
672 | enum ipu_channel channel = ichan->dma_chan.chan_id; | ||
673 | struct idmac *idmac = to_idmac(ichan->dma_chan.device); | ||
674 | struct ipu *ipu = to_ipu(idmac); | ||
675 | union chan_param_mem params = {}; | ||
676 | unsigned long flags; | ||
677 | uint32_t reg; | ||
678 | uint32_t stride_bytes; | ||
679 | |||
680 | stride_bytes = stride * bytes_per_pixel(pixel_fmt); | ||
681 | |||
682 | if (stride_bytes % 4) { | ||
683 | dev_err(ipu->dev, | ||
684 | "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n", | ||
685 | stride, stride_bytes); | ||
686 | return -EINVAL; | ||
687 | } | ||
688 | |||
689 | /* IC channel's stride must be a multiple of 8 pixels */ | ||
690 | if ((channel <= 13) && (stride % 8)) { | ||
691 | dev_err(ipu->dev, "Stride must be 8 pixel multiple\n"); | ||
692 | return -EINVAL; | ||
693 | } | ||
694 | |||
695 | /* Build parameter memory data for DMA channel */ | ||
696 | ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes); | ||
697 | ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1); | ||
698 | ipu_ch_param_set_rotation(¶ms, rot_mode); | ||
699 | /* Some channels (rotation) have restriction on burst length */ | ||
700 | switch (channel) { | ||
701 | case IDMAC_IC_7: /* Hangs with burst 8, 16, other values | ||
702 | invalid - Table 44-30 */ | ||
703 | /* | ||
704 | ipu_ch_param_set_burst_size(¶ms, 8); | ||
705 | */ | ||
706 | break; | ||
707 | case IDMAC_SDC_0: | ||
708 | case IDMAC_SDC_1: | ||
709 | /* In original code only IPU_PIX_FMT_RGB565 was setting burst */ | ||
710 | ipu_ch_param_set_burst_size(¶ms, 16); | ||
711 | break; | ||
712 | case IDMAC_IC_0: | ||
713 | default: | ||
714 | break; | ||
715 | } | ||
716 | |||
717 | spin_lock_irqsave(&ipu->lock, flags); | ||
718 | |||
719 | ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)¶ms, 10); | ||
720 | |||
721 | reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL); | ||
722 | |||
723 | if (phyaddr_1) | ||
724 | reg |= 1UL << channel; | ||
725 | else | ||
726 | reg &= ~(1UL << channel); | ||
727 | |||
728 | idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL); | ||
729 | |||
730 | ichan->status = IPU_CHANNEL_READY; | ||
731 | |||
732 | spin_unlock_irqrestore(ipu->lock, flags); | ||
733 | |||
734 | return 0; | ||
735 | } | ||
736 | |||
737 | /** | ||
738 | * ipu_select_buffer() - mark a channel's buffer as ready. | ||
739 | * @channel: channel ID. | ||
740 | * @buffer_n: buffer number to mark ready. | ||
741 | */ | ||
742 | static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) | ||
743 | { | ||
744 | /* No locking - this is a write-one-to-set register, cleared by IPU */ | ||
745 | if (buffer_n == 0) | ||
746 | /* Mark buffer 0 as ready. */ | ||
747 | idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY); | ||
748 | else | ||
749 | /* Mark buffer 1 as ready. */ | ||
750 | idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY); | ||
751 | } | ||
752 | |||
753 | /** | ||
754 | * ipu_update_channel_buffer() - update physical address of a channel buffer. | ||
755 | * @channel: channel ID. | ||
756 | * @buffer_n: buffer number to update. | ||
757 | * 0 or 1 are the only valid values. | ||
758 | * @phyaddr: buffer physical address. | ||
759 | * @return: Returns 0 on success or negative error code on failure. This | ||
760 | * function will fail if the buffer is set to ready. | ||
761 | */ | ||
762 | /* Called under spin_lock(_irqsave)(&ichan->lock) */ | ||
763 | static int ipu_update_channel_buffer(enum ipu_channel channel, | ||
764 | int buffer_n, dma_addr_t phyaddr) | ||
765 | { | ||
766 | uint32_t reg; | ||
767 | unsigned long flags; | ||
768 | |||
769 | spin_lock_irqsave(&ipu_data.lock, flags); | ||
770 | |||
771 | if (buffer_n == 0) { | ||
772 | reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); | ||
773 | if (reg & (1UL << channel)) { | ||
774 | spin_unlock_irqrestore(&ipu_data.lock, flags); | ||
775 | return -EACCES; | ||
776 | } | ||
777 | |||
778 | /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */ | ||
779 | idmac_write_ipureg(&ipu_data, dma_param_addr(channel) + | ||
780 | 0x0008UL, IPU_IMA_ADDR); | ||
781 | idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA); | ||
782 | } else { | ||
783 | reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); | ||
784 | if (reg & (1UL << channel)) { | ||
785 | spin_unlock_irqrestore(&ipu_data.lock, flags); | ||
786 | return -EACCES; | ||
787 | } | ||
788 | |||
789 | /* Check if double-buffering is already enabled */ | ||
790 | reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL); | ||
791 | |||
792 | if (!(reg & (1UL << channel))) | ||
793 | idmac_write_ipureg(&ipu_data, reg | (1UL << channel), | ||
794 | IPU_CHA_DB_MODE_SEL); | ||
795 | |||
796 | /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */ | ||
797 | idmac_write_ipureg(&ipu_data, dma_param_addr(channel) + | ||
798 | 0x0009UL, IPU_IMA_ADDR); | ||
799 | idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA); | ||
800 | } | ||
801 | |||
802 | spin_unlock_irqrestore(&ipu_data.lock, flags); | ||
803 | |||
804 | return 0; | ||
805 | } | ||
806 | |||
807 | /* Called under spin_lock_irqsave(&ichan->lock) */ | ||
808 | static int ipu_submit_channel_buffers(struct idmac_channel *ichan, | ||
809 | struct idmac_tx_desc *desc) | ||
810 | { | ||
811 | struct scatterlist *sg; | ||
812 | int i, ret = 0; | ||
813 | |||
814 | for (i = 0, sg = desc->sg; i < 2 && sg; i++) { | ||
815 | if (!ichan->sg[i]) { | ||
816 | ichan->sg[i] = sg; | ||
817 | |||
818 | /* | ||
819 | * On first invocation this shouldn't be necessary, the | ||
820 | * call to ipu_init_channel_buffer() above will set | ||
821 | * addresses for us, so we could make it conditional | ||
822 | * on status >= IPU_CHANNEL_ENABLED, but doing it again | ||
823 | * shouldn't hurt either. | ||
824 | */ | ||
825 | ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i, | ||
826 | sg_dma_address(sg)); | ||
827 | if (ret < 0) | ||
828 | return ret; | ||
829 | |||
830 | ipu_select_buffer(ichan->dma_chan.chan_id, i); | ||
831 | |||
832 | sg = sg_next(sg); | ||
833 | } | ||
834 | } | ||
835 | |||
836 | return ret; | ||
837 | } | ||
838 | |||
839 | static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) | ||
840 | { | ||
841 | struct idmac_tx_desc *desc = to_tx_desc(tx); | ||
842 | struct idmac_channel *ichan = to_idmac_chan(tx->chan); | ||
843 | struct idmac *idmac = to_idmac(tx->chan->device); | ||
844 | struct ipu *ipu = to_ipu(idmac); | ||
845 | dma_cookie_t cookie; | ||
846 | unsigned long flags; | ||
847 | |||
848 | /* Sanity check */ | ||
849 | if (!list_empty(&desc->list)) { | ||
850 | /* The descriptor doesn't belong to client */ | ||
851 | dev_err(&ichan->dma_chan.dev->device, | ||
852 | "Descriptor %p not prepared!\n", tx); | ||
853 | return -EBUSY; | ||
854 | } | ||
855 | |||
856 | mutex_lock(&ichan->chan_mutex); | ||
857 | |||
858 | if (ichan->status < IPU_CHANNEL_READY) { | ||
859 | struct idmac_video_param *video = &ichan->params.video; | ||
860 | /* | ||
861 | * Initial buffer assignment - the first two sg-entries from | ||
862 | * the descriptor will end up in the IDMAC buffers | ||
863 | */ | ||
864 | dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 : | ||
865 | sg_dma_address(&desc->sg[1]); | ||
866 | |||
867 | WARN_ON(ichan->sg[0] || ichan->sg[1]); | ||
868 | |||
869 | cookie = ipu_init_channel_buffer(ichan, | ||
870 | video->out_pixel_fmt, | ||
871 | video->out_width, | ||
872 | video->out_height, | ||
873 | video->out_stride, | ||
874 | IPU_ROTATE_NONE, | ||
875 | sg_dma_address(&desc->sg[0]), | ||
876 | dma_1); | ||
877 | if (cookie < 0) | ||
878 | goto out; | ||
879 | } | ||
880 | |||
881 | /* ipu->lock can be taken under ichan->lock, but not v.v. */ | ||
882 | spin_lock_irqsave(&ichan->lock, flags); | ||
883 | |||
884 | /* submit_buffers() atomically verifies and fills empty sg slots */ | ||
885 | cookie = ipu_submit_channel_buffers(ichan, desc); | ||
886 | |||
887 | spin_unlock_irqrestore(&ichan->lock, flags); | ||
888 | |||
889 | if (cookie < 0) | ||
890 | goto out; | ||
891 | |||
892 | cookie = ichan->dma_chan.cookie; | ||
893 | |||
894 | if (++cookie < 0) | ||
895 | cookie = 1; | ||
896 | |||
897 | /* from dmaengine.h: "last cookie value returned to client" */ | ||
898 | ichan->dma_chan.cookie = cookie; | ||
899 | tx->cookie = cookie; | ||
900 | spin_lock_irqsave(&ichan->lock, flags); | ||
901 | list_add_tail(&desc->list, &ichan->queue); | ||
902 | spin_unlock_irqrestore(&ichan->lock, flags); | ||
903 | |||
904 | if (ichan->status < IPU_CHANNEL_ENABLED) { | ||
905 | int ret = ipu_enable_channel(idmac, ichan); | ||
906 | if (ret < 0) { | ||
907 | cookie = ret; | ||
908 | spin_lock_irqsave(&ichan->lock, flags); | ||
909 | list_del_init(&desc->list); | ||
910 | spin_unlock_irqrestore(&ichan->lock, flags); | ||
911 | tx->cookie = cookie; | ||
912 | ichan->dma_chan.cookie = cookie; | ||
913 | } | ||
914 | } | ||
915 | |||
916 | dump_idmac_reg(ipu); | ||
917 | |||
918 | out: | ||
919 | mutex_unlock(&ichan->chan_mutex); | ||
920 | |||
921 | return cookie; | ||
922 | } | ||
923 | |||
924 | /* Called with ichan->chan_mutex held */ | ||
925 | static int idmac_desc_alloc(struct idmac_channel *ichan, int n) | ||
926 | { | ||
927 | struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc)); | ||
928 | struct idmac *idmac = to_idmac(ichan->dma_chan.device); | ||
929 | |||
930 | if (!desc) | ||
931 | return -ENOMEM; | ||
932 | |||
933 | /* No interrupts, just disable the tasklet for a moment */ | ||
934 | tasklet_disable(&to_ipu(idmac)->tasklet); | ||
935 | |||
936 | ichan->n_tx_desc = n; | ||
937 | ichan->desc = desc; | ||
938 | INIT_LIST_HEAD(&ichan->queue); | ||
939 | INIT_LIST_HEAD(&ichan->free_list); | ||
940 | |||
941 | while (n--) { | ||
942 | struct dma_async_tx_descriptor *txd = &desc->txd; | ||
943 | |||
944 | memset(txd, 0, sizeof(*txd)); | ||
945 | dma_async_tx_descriptor_init(txd, &ichan->dma_chan); | ||
946 | txd->tx_submit = idmac_tx_submit; | ||
947 | txd->chan = &ichan->dma_chan; | ||
948 | INIT_LIST_HEAD(&txd->tx_list); | ||
949 | |||
950 | list_add(&desc->list, &ichan->free_list); | ||
951 | |||
952 | desc++; | ||
953 | } | ||
954 | |||
955 | tasklet_enable(&to_ipu(idmac)->tasklet); | ||
956 | |||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | /** | ||
961 | * ipu_init_channel() - initialize an IPU channel. | ||
962 | * @idmac: IPU DMAC context. | ||
963 | * @ichan: pointer to the channel object. | ||
964 | * @return 0 on success or negative error code on failure. | ||
965 | */ | ||
966 | static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan) | ||
967 | { | ||
968 | union ipu_channel_param *params = &ichan->params; | ||
969 | uint32_t ipu_conf; | ||
970 | enum ipu_channel channel = ichan->dma_chan.chan_id; | ||
971 | unsigned long flags; | ||
972 | uint32_t reg; | ||
973 | struct ipu *ipu = to_ipu(idmac); | ||
974 | int ret = 0, n_desc = 0; | ||
975 | |||
976 | dev_dbg(ipu->dev, "init channel = %d\n", channel); | ||
977 | |||
978 | if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 && | ||
979 | channel != IDMAC_IC_7) | ||
980 | return -EINVAL; | ||
981 | |||
982 | spin_lock_irqsave(&ipu->lock, flags); | ||
983 | |||
984 | switch (channel) { | ||
985 | case IDMAC_IC_7: | ||
986 | n_desc = 16; | ||
987 | reg = idmac_read_icreg(ipu, IC_CONF); | ||
988 | idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF); | ||
989 | break; | ||
990 | case IDMAC_IC_0: | ||
991 | n_desc = 16; | ||
992 | reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW); | ||
993 | idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW); | ||
994 | ret = ipu_ic_init_prpenc(ipu, params, true); | ||
995 | break; | ||
996 | case IDMAC_SDC_0: | ||
997 | case IDMAC_SDC_1: | ||
998 | n_desc = 4; | ||
999 | default: | ||
1000 | break; | ||
1001 | } | ||
1002 | |||
1003 | ipu->channel_init_mask |= 1L << channel; | ||
1004 | |||
1005 | /* Enable IPU sub module */ | ||
1006 | ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) | | ||
1007 | ipu_channel_conf_mask(channel); | ||
1008 | idmac_write_ipureg(ipu, ipu_conf, IPU_CONF); | ||
1009 | |||
1010 | spin_unlock_irqrestore(&ipu->lock, flags); | ||
1011 | |||
1012 | if (n_desc && !ichan->desc) | ||
1013 | ret = idmac_desc_alloc(ichan, n_desc); | ||
1014 | |||
1015 | dump_idmac_reg(ipu); | ||
1016 | |||
1017 | return ret; | ||
1018 | } | ||
1019 | |||
1020 | /** | ||
1021 | * ipu_uninit_channel() - uninitialize an IPU channel. | ||
1022 | * @idmac: IPU DMAC context. | ||
1023 | * @ichan: pointer to the channel object. | ||
1024 | */ | ||
1025 | static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan) | ||
1026 | { | ||
1027 | enum ipu_channel channel = ichan->dma_chan.chan_id; | ||
1028 | unsigned long flags; | ||
1029 | uint32_t reg; | ||
1030 | unsigned long chan_mask = 1UL << channel; | ||
1031 | uint32_t ipu_conf; | ||
1032 | struct ipu *ipu = to_ipu(idmac); | ||
1033 | |||
1034 | spin_lock_irqsave(&ipu->lock, flags); | ||
1035 | |||
1036 | if (!(ipu->channel_init_mask & chan_mask)) { | ||
1037 | dev_err(ipu->dev, "Channel already uninitialized %d\n", | ||
1038 | channel); | ||
1039 | spin_unlock_irqrestore(&ipu->lock, flags); | ||
1040 | return; | ||
1041 | } | ||
1042 | |||
1043 | /* Reset the double buffer */ | ||
1044 | reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL); | ||
1045 | idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL); | ||
1046 | |||
1047 | ichan->sec_chan_en = false; | ||
1048 | |||
1049 | switch (channel) { | ||
1050 | case IDMAC_IC_7: | ||
1051 | reg = idmac_read_icreg(ipu, IC_CONF); | ||
1052 | idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN), | ||
1053 | IC_CONF); | ||
1054 | break; | ||
1055 | case IDMAC_IC_0: | ||
1056 | reg = idmac_read_icreg(ipu, IC_CONF); | ||
1057 | idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1), | ||
1058 | IC_CONF); | ||
1059 | break; | ||
1060 | case IDMAC_SDC_0: | ||
1061 | case IDMAC_SDC_1: | ||
1062 | default: | ||
1063 | break; | ||
1064 | } | ||
1065 | |||
1066 | ipu->channel_init_mask &= ~(1L << channel); | ||
1067 | |||
1068 | ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) & | ||
1069 | ~ipu_channel_conf_mask(channel); | ||
1070 | idmac_write_ipureg(ipu, ipu_conf, IPU_CONF); | ||
1071 | |||
1072 | spin_unlock_irqrestore(&ipu->lock, flags); | ||
1073 | |||
1074 | ichan->n_tx_desc = 0; | ||
1075 | vfree(ichan->desc); | ||
1076 | ichan->desc = NULL; | ||
1077 | } | ||
1078 | |||
1079 | /** | ||
1080 | * ipu_disable_channel() - disable an IPU channel. | ||
1081 | * @idmac: IPU DMAC context. | ||
1082 | * @ichan: channel object pointer. | ||
1083 | * @wait_for_stop: flag to set whether to wait for channel end of frame or | ||
1084 | * return immediately. | ||
1085 | * @return: 0 on success or negative error code on failure. | ||
1086 | */ | ||
1087 | static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, | ||
1088 | bool wait_for_stop) | ||
1089 | { | ||
1090 | enum ipu_channel channel = ichan->dma_chan.chan_id; | ||
1091 | struct ipu *ipu = to_ipu(idmac); | ||
1092 | uint32_t reg; | ||
1093 | unsigned long flags; | ||
1094 | unsigned long chan_mask = 1UL << channel; | ||
1095 | unsigned int timeout; | ||
1096 | |||
1097 | if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) { | ||
1098 | timeout = 40; | ||
1099 | /* This waiting always fails. Related to spurious irq problem */ | ||
1100 | while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) || | ||
1101 | (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) { | ||
1102 | timeout--; | ||
1103 | msleep(10); | ||
1104 | |||
1105 | if (!timeout) { | ||
1106 | dev_dbg(ipu->dev, | ||
1107 | "Warning: timeout waiting for channel %u to " | ||
1108 | "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, " | ||
1109 | "busy = 0x%08X, tstat = 0x%08X\n", channel, | ||
1110 | idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY), | ||
1111 | idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY), | ||
1112 | idmac_read_icreg(ipu, IDMAC_CHA_BUSY), | ||
1113 | idmac_read_ipureg(ipu, IPU_TASKS_STAT)); | ||
1114 | break; | ||
1115 | } | ||
1116 | } | ||
1117 | dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout); | ||
1118 | } | ||
1119 | /* SDC BG and FG must be disabled before DMA is disabled */ | ||
1120 | if (wait_for_stop && (channel == IDMAC_SDC_0 || | ||
1121 | channel == IDMAC_SDC_1)) { | ||
1122 | for (timeout = 5; | ||
1123 | timeout && !ipu_irq_status(ichan->eof_irq); timeout--) | ||
1124 | msleep(5); | ||
1125 | } | ||
1126 | |||
1127 | spin_lock_irqsave(&ipu->lock, flags); | ||
1128 | |||
1129 | /* Disable IC task */ | ||
1130 | ipu_ic_disable_task(ipu, channel); | ||
1131 | |||
1132 | /* Disable DMA channel(s) */ | ||
1133 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); | ||
1134 | idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); | ||
1135 | |||
1136 | /* | ||
1137 | * Problem (observed with channel DMAIC_7): after enabling the channel | ||
1138 | * and initialising buffers, there comes an interrupt with current still | ||
1139 | * pointing at buffer 0, whereas it should use buffer 0 first and only | ||
1140 | * generate an interrupt when it is done, then current should already | ||
1141 | * point to buffer 1. This spurious interrupt also comes on channel | ||
1142 | * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the | ||
1143 | * first interrupt, there comes the second with current correctly | ||
1144 | * pointing to buffer 1 this time. But sometimes this second interrupt | ||
1145 | * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling | ||
1146 | * the channel seems to prevent the channel from hanging, but it doesn't | ||
1147 | * prevent the spurious interrupt. This might also be unsafe. Think | ||
1148 | * about the IDMAC controller trying to switch to a buffer, when we | ||
1149 | * clear the ready bit, and re-enable it a moment later. | ||
1150 | */ | ||
1151 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY); | ||
1152 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY); | ||
1153 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY); | ||
1154 | |||
1155 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY); | ||
1156 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY); | ||
1157 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY); | ||
1158 | |||
1159 | spin_unlock_irqrestore(&ipu->lock, flags); | ||
1160 | |||
1161 | return 0; | ||
1162 | } | ||
1163 | |||
1164 | /* | ||
1165 | * We have several possibilities here: | ||
1166 | * current BUF next BUF | ||
1167 | * | ||
1168 | * not last sg next not last sg | ||
1169 | * not last sg next last sg | ||
1170 | * last sg first sg from next descriptor | ||
1171 | * last sg NULL | ||
1172 | * | ||
1173 | * Besides, the descriptor queue might be empty or not. We process all these | ||
1174 | * cases carefully. | ||
1175 | */ | ||
1176 | static irqreturn_t idmac_interrupt(int irq, void *dev_id) | ||
1177 | { | ||
1178 | struct idmac_channel *ichan = dev_id; | ||
1179 | unsigned int chan_id = ichan->dma_chan.chan_id; | ||
1180 | struct scatterlist **sg, *sgnext, *sgnew = NULL; | ||
1181 | /* Next transfer descriptor */ | ||
1182 | struct idmac_tx_desc *desc = NULL, *descnew; | ||
1183 | dma_async_tx_callback callback; | ||
1184 | void *callback_param; | ||
1185 | bool done = false; | ||
1186 | u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY), | ||
1187 | ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY), | ||
1188 | curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); | ||
1189 | |||
1190 | /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ | ||
1191 | |||
1192 | pr_debug("IDMAC irq %d\n", irq); | ||
1193 | /* Other interrupts do not interfere with this channel */ | ||
1194 | spin_lock(&ichan->lock); | ||
1195 | |||
1196 | if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && | ||
1197 | ((curbuf >> chan_id) & 1) == ichan->active_buffer)) { | ||
1198 | int i = 100; | ||
1199 | |||
1200 | /* This doesn't help. See comment in ipu_disable_channel() */ | ||
1201 | while (--i) { | ||
1202 | curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); | ||
1203 | if (((curbuf >> chan_id) & 1) != ichan->active_buffer) | ||
1204 | break; | ||
1205 | cpu_relax(); | ||
1206 | } | ||
1207 | |||
1208 | if (!i) { | ||
1209 | spin_unlock(&ichan->lock); | ||
1210 | dev_dbg(ichan->dma_chan.device->dev, | ||
1211 | "IRQ on active buffer on channel %x, active " | ||
1212 | "%d, ready %x, %x, current %x!\n", chan_id, | ||
1213 | ichan->active_buffer, ready0, ready1, curbuf); | ||
1214 | return IRQ_NONE; | ||
1215 | } | ||
1216 | } | ||
1217 | |||
1218 | if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || | ||
1219 | (!ichan->active_buffer && (ready0 >> chan_id) & 1) | ||
1220 | )) { | ||
1221 | spin_unlock(&ichan->lock); | ||
1222 | dev_dbg(ichan->dma_chan.device->dev, | ||
1223 | "IRQ with active buffer still ready on channel %x, " | ||
1224 | "active %d, ready %x, %x!\n", chan_id, | ||
1225 | ichan->active_buffer, ready0, ready1); | ||
1226 | return IRQ_NONE; | ||
1227 | } | ||
1228 | |||
1229 | if (unlikely(list_empty(&ichan->queue))) { | ||
1230 | spin_unlock(&ichan->lock); | ||
1231 | dev_err(ichan->dma_chan.device->dev, | ||
1232 | "IRQ without queued buffers on channel %x, active %d, " | ||
1233 | "ready %x, %x!\n", chan_id, | ||
1234 | ichan->active_buffer, ready0, ready1); | ||
1235 | return IRQ_NONE; | ||
1236 | } | ||
1237 | |||
1238 | /* | ||
1239 | * active_buffer is a software flag, it shows which buffer we are | ||
1240 | * currently expecting back from the hardware, IDMAC should be | ||
1241 | * processing the other buffer already | ||
1242 | */ | ||
1243 | sg = &ichan->sg[ichan->active_buffer]; | ||
1244 | sgnext = ichan->sg[!ichan->active_buffer]; | ||
1245 | |||
1246 | /* | ||
1247 | * if sgnext == NULL sg must be the last element in a scatterlist and | ||
1248 | * queue must be empty | ||
1249 | */ | ||
1250 | if (unlikely(!sgnext)) { | ||
1251 | if (unlikely(sg_next(*sg))) { | ||
1252 | dev_err(ichan->dma_chan.device->dev, | ||
1253 | "Broken buffer-update locking on channel %x!\n", | ||
1254 | chan_id); | ||
1255 | /* We'll let the user catch up */ | ||
1256 | } else { | ||
1257 | /* Underrun */ | ||
1258 | ipu_ic_disable_task(&ipu_data, chan_id); | ||
1259 | dev_dbg(ichan->dma_chan.device->dev, | ||
1260 | "Underrun on channel %x\n", chan_id); | ||
1261 | ichan->status = IPU_CHANNEL_READY; | ||
1262 | /* Continue to check for complete descriptor */ | ||
1263 | } | ||
1264 | } | ||
1265 | |||
1266 | desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); | ||
1267 | |||
1268 | /* First calculate and submit the next sg element */ | ||
1269 | if (likely(sgnext)) | ||
1270 | sgnew = sg_next(sgnext); | ||
1271 | |||
1272 | if (unlikely(!sgnew)) { | ||
1273 | /* Start a new scatterlist, if any queued */ | ||
1274 | if (likely(desc->list.next != &ichan->queue)) { | ||
1275 | descnew = list_entry(desc->list.next, | ||
1276 | struct idmac_tx_desc, list); | ||
1277 | sgnew = &descnew->sg[0]; | ||
1278 | } | ||
1279 | } | ||
1280 | |||
1281 | if (unlikely(!sg_next(*sg)) || !sgnext) { | ||
1282 | /* | ||
1283 | * Last element in scatterlist done, remove from the queue, | ||
1284 | * _init for debugging | ||
1285 | */ | ||
1286 | list_del_init(&desc->list); | ||
1287 | done = true; | ||
1288 | } | ||
1289 | |||
1290 | *sg = sgnew; | ||
1291 | |||
1292 | if (likely(sgnew)) { | ||
1293 | int ret; | ||
1294 | |||
1295 | ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer, | ||
1296 | sg_dma_address(*sg)); | ||
1297 | if (ret < 0) | ||
1298 | dev_err(ichan->dma_chan.device->dev, | ||
1299 | "Failed to update buffer on channel %x buffer %d!\n", | ||
1300 | chan_id, ichan->active_buffer); | ||
1301 | else | ||
1302 | ipu_select_buffer(chan_id, ichan->active_buffer); | ||
1303 | } | ||
1304 | |||
1305 | /* Flip the active buffer - even if update above failed */ | ||
1306 | ichan->active_buffer = !ichan->active_buffer; | ||
1307 | if (done) | ||
1308 | ichan->completed = desc->txd.cookie; | ||
1309 | |||
1310 | callback = desc->txd.callback; | ||
1311 | callback_param = desc->txd.callback_param; | ||
1312 | |||
1313 | spin_unlock(&ichan->lock); | ||
1314 | |||
1315 | if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback) | ||
1316 | callback(callback_param); | ||
1317 | |||
1318 | return IRQ_HANDLED; | ||
1319 | } | ||
1320 | |||
1321 | static void ipu_gc_tasklet(unsigned long arg) | ||
1322 | { | ||
1323 | struct ipu *ipu = (struct ipu *)arg; | ||
1324 | int i; | ||
1325 | |||
1326 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { | ||
1327 | struct idmac_channel *ichan = ipu->channel + i; | ||
1328 | struct idmac_tx_desc *desc; | ||
1329 | unsigned long flags; | ||
1330 | int j; | ||
1331 | |||
1332 | for (j = 0; j < ichan->n_tx_desc; j++) { | ||
1333 | desc = ichan->desc + j; | ||
1334 | spin_lock_irqsave(&ichan->lock, flags); | ||
1335 | if (async_tx_test_ack(&desc->txd)) { | ||
1336 | list_move(&desc->list, &ichan->free_list); | ||
1337 | async_tx_clear_ack(&desc->txd); | ||
1338 | } | ||
1339 | spin_unlock_irqrestore(&ichan->lock, flags); | ||
1340 | } | ||
1341 | } | ||
1342 | } | ||
1343 | |||
1344 | /* | ||
1345 | * At the time .device_alloc_chan_resources() method is called, we cannot know, | ||
1346 | * whether the client will accept the channel. Thus we must only check, if we | ||
1347 | * can satisfy client's request but the only real criterion to verify, whether | ||
1348 | * the client has accepted our offer is the client_count. That's why we have to | ||
1349 | * perform the rest of our allocation tasks on the first call to this function. | ||
1350 | */ | ||
1351 | static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, | ||
1352 | struct scatterlist *sgl, unsigned int sg_len, | ||
1353 | enum dma_data_direction direction, unsigned long tx_flags) | ||
1354 | { | ||
1355 | struct idmac_channel *ichan = to_idmac_chan(chan); | ||
1356 | struct idmac_tx_desc *desc = NULL; | ||
1357 | struct dma_async_tx_descriptor *txd = NULL; | ||
1358 | unsigned long flags; | ||
1359 | |||
1360 | /* We only can handle these three channels so far */ | ||
1361 | if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 && | ||
1362 | ichan->dma_chan.chan_id != IDMAC_IC_7) | ||
1363 | return NULL; | ||
1364 | |||
1365 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { | ||
1366 | dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); | ||
1367 | return NULL; | ||
1368 | } | ||
1369 | |||
1370 | mutex_lock(&ichan->chan_mutex); | ||
1371 | |||
1372 | spin_lock_irqsave(&ichan->lock, flags); | ||
1373 | if (!list_empty(&ichan->free_list)) { | ||
1374 | desc = list_entry(ichan->free_list.next, | ||
1375 | struct idmac_tx_desc, list); | ||
1376 | |||
1377 | list_del_init(&desc->list); | ||
1378 | |||
1379 | desc->sg_len = sg_len; | ||
1380 | desc->sg = sgl; | ||
1381 | txd = &desc->txd; | ||
1382 | txd->flags = tx_flags; | ||
1383 | } | ||
1384 | spin_unlock_irqrestore(&ichan->lock, flags); | ||
1385 | |||
1386 | mutex_unlock(&ichan->chan_mutex); | ||
1387 | |||
1388 | tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet); | ||
1389 | |||
1390 | return txd; | ||
1391 | } | ||
1392 | |||
1393 | /* Re-select the current buffer and re-activate the channel */ | ||
1394 | static void idmac_issue_pending(struct dma_chan *chan) | ||
1395 | { | ||
1396 | struct idmac_channel *ichan = to_idmac_chan(chan); | ||
1397 | struct idmac *idmac = to_idmac(chan->device); | ||
1398 | struct ipu *ipu = to_ipu(idmac); | ||
1399 | unsigned long flags; | ||
1400 | |||
1401 | /* This is not always needed, but doesn't hurt either */ | ||
1402 | spin_lock_irqsave(&ipu->lock, flags); | ||
1403 | ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer); | ||
1404 | spin_unlock_irqrestore(&ipu->lock, flags); | ||
1405 | |||
1406 | /* | ||
1407 | * Might need to perform some parts of initialisation from | ||
1408 | * ipu_enable_channel(), but not all, we do not want to reset to buffer | ||
1409 | * 0, don't need to set priority again either, but re-enabling the task | ||
1410 | * and the channel might be a good idea. | ||
1411 | */ | ||
1412 | } | ||
1413 | |||
1414 | static void __idmac_terminate_all(struct dma_chan *chan) | ||
1415 | { | ||
1416 | struct idmac_channel *ichan = to_idmac_chan(chan); | ||
1417 | struct idmac *idmac = to_idmac(chan->device); | ||
1418 | unsigned long flags; | ||
1419 | int i; | ||
1420 | |||
1421 | ipu_disable_channel(idmac, ichan, | ||
1422 | ichan->status >= IPU_CHANNEL_ENABLED); | ||
1423 | |||
1424 | tasklet_disable(&to_ipu(idmac)->tasklet); | ||
1425 | |||
1426 | /* ichan->queue is modified in ISR, have to spinlock */ | ||
1427 | spin_lock_irqsave(&ichan->lock, flags); | ||
1428 | list_splice_init(&ichan->queue, &ichan->free_list); | ||
1429 | |||
1430 | if (ichan->desc) | ||
1431 | for (i = 0; i < ichan->n_tx_desc; i++) { | ||
1432 | struct idmac_tx_desc *desc = ichan->desc + i; | ||
1433 | if (list_empty(&desc->list)) | ||
1434 | /* Descriptor was prepared, but not submitted */ | ||
1435 | list_add(&desc->list, | ||
1436 | &ichan->free_list); | ||
1437 | |||
1438 | async_tx_clear_ack(&desc->txd); | ||
1439 | } | ||
1440 | |||
1441 | ichan->sg[0] = NULL; | ||
1442 | ichan->sg[1] = NULL; | ||
1443 | spin_unlock_irqrestore(&ichan->lock, flags); | ||
1444 | |||
1445 | tasklet_enable(&to_ipu(idmac)->tasklet); | ||
1446 | |||
1447 | ichan->status = IPU_CHANNEL_INITIALIZED; | ||
1448 | } | ||
1449 | |||
1450 | static void idmac_terminate_all(struct dma_chan *chan) | ||
1451 | { | ||
1452 | struct idmac_channel *ichan = to_idmac_chan(chan); | ||
1453 | |||
1454 | mutex_lock(&ichan->chan_mutex); | ||
1455 | |||
1456 | __idmac_terminate_all(chan); | ||
1457 | |||
1458 | mutex_unlock(&ichan->chan_mutex); | ||
1459 | } | ||
1460 | |||
1461 | static int idmac_alloc_chan_resources(struct dma_chan *chan) | ||
1462 | { | ||
1463 | struct idmac_channel *ichan = to_idmac_chan(chan); | ||
1464 | struct idmac *idmac = to_idmac(chan->device); | ||
1465 | int ret; | ||
1466 | |||
1467 | /* dmaengine.c now guarantees to only offer free channels */ | ||
1468 | BUG_ON(chan->client_count > 1); | ||
1469 | WARN_ON(ichan->status != IPU_CHANNEL_FREE); | ||
1470 | |||
1471 | chan->cookie = 1; | ||
1472 | ichan->completed = -ENXIO; | ||
1473 | |||
1474 | ret = ipu_irq_map(ichan->dma_chan.chan_id); | ||
1475 | if (ret < 0) | ||
1476 | goto eimap; | ||
1477 | |||
1478 | ichan->eof_irq = ret; | ||
1479 | ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, | ||
1480 | ichan->eof_name, ichan); | ||
1481 | if (ret < 0) | ||
1482 | goto erirq; | ||
1483 | |||
1484 | ret = ipu_init_channel(idmac, ichan); | ||
1485 | if (ret < 0) | ||
1486 | goto eichan; | ||
1487 | |||
1488 | ichan->status = IPU_CHANNEL_INITIALIZED; | ||
1489 | |||
1490 | dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n", | ||
1491 | ichan->dma_chan.chan_id, ichan->eof_irq); | ||
1492 | |||
1493 | return ret; | ||
1494 | |||
1495 | eichan: | ||
1496 | free_irq(ichan->eof_irq, ichan); | ||
1497 | erirq: | ||
1498 | ipu_irq_unmap(ichan->dma_chan.chan_id); | ||
1499 | eimap: | ||
1500 | return ret; | ||
1501 | } | ||
1502 | |||
1503 | static void idmac_free_chan_resources(struct dma_chan *chan) | ||
1504 | { | ||
1505 | struct idmac_channel *ichan = to_idmac_chan(chan); | ||
1506 | struct idmac *idmac = to_idmac(chan->device); | ||
1507 | |||
1508 | mutex_lock(&ichan->chan_mutex); | ||
1509 | |||
1510 | __idmac_terminate_all(chan); | ||
1511 | |||
1512 | if (ichan->status > IPU_CHANNEL_FREE) { | ||
1513 | free_irq(ichan->eof_irq, ichan); | ||
1514 | ipu_irq_unmap(ichan->dma_chan.chan_id); | ||
1515 | } | ||
1516 | |||
1517 | ichan->status = IPU_CHANNEL_FREE; | ||
1518 | |||
1519 | ipu_uninit_channel(idmac, ichan); | ||
1520 | |||
1521 | mutex_unlock(&ichan->chan_mutex); | ||
1522 | |||
1523 | tasklet_schedule(&to_ipu(idmac)->tasklet); | ||
1524 | } | ||
1525 | |||
1526 | static enum dma_status idmac_is_tx_complete(struct dma_chan *chan, | ||
1527 | dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) | ||
1528 | { | ||
1529 | struct idmac_channel *ichan = to_idmac_chan(chan); | ||
1530 | |||
1531 | if (done) | ||
1532 | *done = ichan->completed; | ||
1533 | if (used) | ||
1534 | *used = chan->cookie; | ||
1535 | if (cookie != chan->cookie) | ||
1536 | return DMA_ERROR; | ||
1537 | return DMA_SUCCESS; | ||
1538 | } | ||
1539 | |||
1540 | static int __init ipu_idmac_init(struct ipu *ipu) | ||
1541 | { | ||
1542 | struct idmac *idmac = &ipu->idmac; | ||
1543 | struct dma_device *dma = &idmac->dma; | ||
1544 | int i; | ||
1545 | |||
1546 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | ||
1547 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | ||
1548 | |||
1549 | /* Compulsory common fields */ | ||
1550 | dma->dev = ipu->dev; | ||
1551 | dma->device_alloc_chan_resources = idmac_alloc_chan_resources; | ||
1552 | dma->device_free_chan_resources = idmac_free_chan_resources; | ||
1553 | dma->device_is_tx_complete = idmac_is_tx_complete; | ||
1554 | dma->device_issue_pending = idmac_issue_pending; | ||
1555 | |||
1556 | /* Compulsory for DMA_SLAVE fields */ | ||
1557 | dma->device_prep_slave_sg = idmac_prep_slave_sg; | ||
1558 | dma->device_terminate_all = idmac_terminate_all; | ||
1559 | |||
1560 | INIT_LIST_HEAD(&dma->channels); | ||
1561 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { | ||
1562 | struct idmac_channel *ichan = ipu->channel + i; | ||
1563 | struct dma_chan *dma_chan = &ichan->dma_chan; | ||
1564 | |||
1565 | spin_lock_init(&ichan->lock); | ||
1566 | mutex_init(&ichan->chan_mutex); | ||
1567 | |||
1568 | ichan->status = IPU_CHANNEL_FREE; | ||
1569 | ichan->sec_chan_en = false; | ||
1570 | ichan->completed = -ENXIO; | ||
1571 | snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); | ||
1572 | |||
1573 | dma_chan->device = &idmac->dma; | ||
1574 | dma_chan->cookie = 1; | ||
1575 | dma_chan->chan_id = i; | ||
1576 | list_add_tail(&ichan->dma_chan.device_node, &dma->channels); | ||
1577 | } | ||
1578 | |||
1579 | idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF); | ||
1580 | |||
1581 | return dma_async_device_register(&idmac->dma); | ||
1582 | } | ||
1583 | |||
1584 | static void ipu_idmac_exit(struct ipu *ipu) | ||
1585 | { | ||
1586 | int i; | ||
1587 | struct idmac *idmac = &ipu->idmac; | ||
1588 | |||
1589 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { | ||
1590 | struct idmac_channel *ichan = ipu->channel + i; | ||
1591 | |||
1592 | idmac_terminate_all(&ichan->dma_chan); | ||
1593 | idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0); | ||
1594 | } | ||
1595 | |||
1596 | dma_async_device_unregister(&idmac->dma); | ||
1597 | } | ||
1598 | |||
1599 | /***************************************************************************** | ||
1600 | * IPU common probe / remove | ||
1601 | */ | ||
1602 | |||
1603 | static int ipu_probe(struct platform_device *pdev) | ||
1604 | { | ||
1605 | struct ipu_platform_data *pdata = pdev->dev.platform_data; | ||
1606 | struct resource *mem_ipu, *mem_ic; | ||
1607 | int ret; | ||
1608 | |||
1609 | spin_lock_init(&ipu_data.lock); | ||
1610 | |||
1611 | mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1612 | mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1613 | if (!pdata || !mem_ipu || !mem_ic) | ||
1614 | return -EINVAL; | ||
1615 | |||
1616 | ipu_data.dev = &pdev->dev; | ||
1617 | |||
1618 | platform_set_drvdata(pdev, &ipu_data); | ||
1619 | |||
1620 | ret = platform_get_irq(pdev, 0); | ||
1621 | if (ret < 0) | ||
1622 | goto err_noirq; | ||
1623 | |||
1624 | ipu_data.irq_fn = ret; | ||
1625 | ret = platform_get_irq(pdev, 1); | ||
1626 | if (ret < 0) | ||
1627 | goto err_noirq; | ||
1628 | |||
1629 | ipu_data.irq_err = ret; | ||
1630 | ipu_data.irq_base = pdata->irq_base; | ||
1631 | |||
1632 | dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n", | ||
1633 | ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); | ||
1634 | |||
1635 | /* Remap IPU common registers */ | ||
1636 | ipu_data.reg_ipu = ioremap(mem_ipu->start, | ||
1637 | mem_ipu->end - mem_ipu->start + 1); | ||
1638 | if (!ipu_data.reg_ipu) { | ||
1639 | ret = -ENOMEM; | ||
1640 | goto err_ioremap_ipu; | ||
1641 | } | ||
1642 | |||
1643 | /* Remap Image Converter and Image DMA Controller registers */ | ||
1644 | ipu_data.reg_ic = ioremap(mem_ic->start, | ||
1645 | mem_ic->end - mem_ic->start + 1); | ||
1646 | if (!ipu_data.reg_ic) { | ||
1647 | ret = -ENOMEM; | ||
1648 | goto err_ioremap_ic; | ||
1649 | } | ||
1650 | |||
1651 | /* Get IPU clock */ | ||
1652 | ipu_data.ipu_clk = clk_get(&pdev->dev, "ipu_clk"); | ||
1653 | if (IS_ERR(ipu_data.ipu_clk)) { | ||
1654 | ret = PTR_ERR(ipu_data.ipu_clk); | ||
1655 | goto err_clk_get; | ||
1656 | } | ||
1657 | |||
1658 | /* Make sure IPU HSP clock is running */ | ||
1659 | clk_enable(ipu_data.ipu_clk); | ||
1660 | |||
1661 | /* Disable all interrupts */ | ||
1662 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1); | ||
1663 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2); | ||
1664 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3); | ||
1665 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4); | ||
1666 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5); | ||
1667 | |||
1668 | dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name, | ||
1669 | (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err); | ||
1670 | |||
1671 | ret = ipu_irq_attach_irq(&ipu_data, pdev); | ||
1672 | if (ret < 0) | ||
1673 | goto err_attach_irq; | ||
1674 | |||
1675 | /* Initialize DMA engine */ | ||
1676 | ret = ipu_idmac_init(&ipu_data); | ||
1677 | if (ret < 0) | ||
1678 | goto err_idmac_init; | ||
1679 | |||
1680 | tasklet_init(&ipu_data.tasklet, ipu_gc_tasklet, (unsigned long)&ipu_data); | ||
1681 | |||
1682 | ipu_data.dev = &pdev->dev; | ||
1683 | |||
1684 | dev_dbg(ipu_data.dev, "IPU initialized\n"); | ||
1685 | |||
1686 | return 0; | ||
1687 | |||
1688 | err_idmac_init: | ||
1689 | err_attach_irq: | ||
1690 | ipu_irq_detach_irq(&ipu_data, pdev); | ||
1691 | clk_disable(ipu_data.ipu_clk); | ||
1692 | clk_put(ipu_data.ipu_clk); | ||
1693 | err_clk_get: | ||
1694 | iounmap(ipu_data.reg_ic); | ||
1695 | err_ioremap_ic: | ||
1696 | iounmap(ipu_data.reg_ipu); | ||
1697 | err_ioremap_ipu: | ||
1698 | err_noirq: | ||
1699 | dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret); | ||
1700 | return ret; | ||
1701 | } | ||
1702 | |||
1703 | static int ipu_remove(struct platform_device *pdev) | ||
1704 | { | ||
1705 | struct ipu *ipu = platform_get_drvdata(pdev); | ||
1706 | |||
1707 | ipu_idmac_exit(ipu); | ||
1708 | ipu_irq_detach_irq(ipu, pdev); | ||
1709 | clk_disable(ipu->ipu_clk); | ||
1710 | clk_put(ipu->ipu_clk); | ||
1711 | iounmap(ipu->reg_ic); | ||
1712 | iounmap(ipu->reg_ipu); | ||
1713 | tasklet_kill(&ipu->tasklet); | ||
1714 | platform_set_drvdata(pdev, NULL); | ||
1715 | |||
1716 | return 0; | ||
1717 | } | ||
1718 | |||
1719 | /* | ||
1720 | * We need two MEM resources - with IPU-common and Image Converter registers, | ||
1721 | * including PF_CONF and IDMAC_* registers, and two IRQs - function and error | ||
1722 | */ | ||
1723 | static struct platform_driver ipu_platform_driver = { | ||
1724 | .driver = { | ||
1725 | .name = "ipu-core", | ||
1726 | .owner = THIS_MODULE, | ||
1727 | }, | ||
1728 | .remove = ipu_remove, | ||
1729 | }; | ||
1730 | |||
1731 | static int __init ipu_init(void) | ||
1732 | { | ||
1733 | return platform_driver_probe(&ipu_platform_driver, ipu_probe); | ||
1734 | } | ||
1735 | subsys_initcall(ipu_init); | ||
1736 | |||
1737 | MODULE_DESCRIPTION("IPU core driver"); | ||
1738 | MODULE_LICENSE("GPL v2"); | ||
1739 | MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>"); | ||
1740 | MODULE_ALIAS("platform:ipu-core"); | ||
diff --git a/drivers/dma/ipu/ipu_intern.h b/drivers/dma/ipu/ipu_intern.h new file mode 100644 index 000000000000..545cf11a94ab --- /dev/null +++ b/drivers/dma/ipu/ipu_intern.h | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 | ||
3 | * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> | ||
4 | * | ||
5 | * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef _IPU_INTERN_H_ | ||
13 | #define _IPU_INTERN_H_ | ||
14 | |||
15 | #include <linux/dmaengine.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | |||
19 | /* IPU Common registers */ | ||
20 | #define IPU_CONF 0x00 | ||
21 | #define IPU_CHA_BUF0_RDY 0x04 | ||
22 | #define IPU_CHA_BUF1_RDY 0x08 | ||
23 | #define IPU_CHA_DB_MODE_SEL 0x0C | ||
24 | #define IPU_CHA_CUR_BUF 0x10 | ||
25 | #define IPU_FS_PROC_FLOW 0x14 | ||
26 | #define IPU_FS_DISP_FLOW 0x18 | ||
27 | #define IPU_TASKS_STAT 0x1C | ||
28 | #define IPU_IMA_ADDR 0x20 | ||
29 | #define IPU_IMA_DATA 0x24 | ||
30 | #define IPU_INT_CTRL_1 0x28 | ||
31 | #define IPU_INT_CTRL_2 0x2C | ||
32 | #define IPU_INT_CTRL_3 0x30 | ||
33 | #define IPU_INT_CTRL_4 0x34 | ||
34 | #define IPU_INT_CTRL_5 0x38 | ||
35 | #define IPU_INT_STAT_1 0x3C | ||
36 | #define IPU_INT_STAT_2 0x40 | ||
37 | #define IPU_INT_STAT_3 0x44 | ||
38 | #define IPU_INT_STAT_4 0x48 | ||
39 | #define IPU_INT_STAT_5 0x4C | ||
40 | #define IPU_BRK_CTRL_1 0x50 | ||
41 | #define IPU_BRK_CTRL_2 0x54 | ||
42 | #define IPU_BRK_STAT 0x58 | ||
43 | #define IPU_DIAGB_CTRL 0x5C | ||
44 | |||
45 | /* IPU_CONF Register bits */ | ||
46 | #define IPU_CONF_CSI_EN 0x00000001 | ||
47 | #define IPU_CONF_IC_EN 0x00000002 | ||
48 | #define IPU_CONF_ROT_EN 0x00000004 | ||
49 | #define IPU_CONF_PF_EN 0x00000008 | ||
50 | #define IPU_CONF_SDC_EN 0x00000010 | ||
51 | #define IPU_CONF_ADC_EN 0x00000020 | ||
52 | #define IPU_CONF_DI_EN 0x00000040 | ||
53 | #define IPU_CONF_DU_EN 0x00000080 | ||
54 | #define IPU_CONF_PXL_ENDIAN 0x00000100 | ||
55 | |||
56 | /* Image Converter Registers */ | ||
57 | #define IC_CONF 0x88 | ||
58 | #define IC_PRP_ENC_RSC 0x8C | ||
59 | #define IC_PRP_VF_RSC 0x90 | ||
60 | #define IC_PP_RSC 0x94 | ||
61 | #define IC_CMBP_1 0x98 | ||
62 | #define IC_CMBP_2 0x9C | ||
63 | #define PF_CONF 0xA0 | ||
64 | #define IDMAC_CONF 0xA4 | ||
65 | #define IDMAC_CHA_EN 0xA8 | ||
66 | #define IDMAC_CHA_PRI 0xAC | ||
67 | #define IDMAC_CHA_BUSY 0xB0 | ||
68 | |||
69 | /* Image Converter Register bits */ | ||
70 | #define IC_CONF_PRPENC_EN 0x00000001 | ||
71 | #define IC_CONF_PRPENC_CSC1 0x00000002 | ||
72 | #define IC_CONF_PRPENC_ROT_EN 0x00000004 | ||
73 | #define IC_CONF_PRPVF_EN 0x00000100 | ||
74 | #define IC_CONF_PRPVF_CSC1 0x00000200 | ||
75 | #define IC_CONF_PRPVF_CSC2 0x00000400 | ||
76 | #define IC_CONF_PRPVF_CMB 0x00000800 | ||
77 | #define IC_CONF_PRPVF_ROT_EN 0x00001000 | ||
78 | #define IC_CONF_PP_EN 0x00010000 | ||
79 | #define IC_CONF_PP_CSC1 0x00020000 | ||
80 | #define IC_CONF_PP_CSC2 0x00040000 | ||
81 | #define IC_CONF_PP_CMB 0x00080000 | ||
82 | #define IC_CONF_PP_ROT_EN 0x00100000 | ||
83 | #define IC_CONF_IC_GLB_LOC_A 0x10000000 | ||
84 | #define IC_CONF_KEY_COLOR_EN 0x20000000 | ||
85 | #define IC_CONF_RWS_EN 0x40000000 | ||
86 | #define IC_CONF_CSI_MEM_WR_EN 0x80000000 | ||
87 | |||
88 | #define IDMA_CHAN_INVALID 0x000000FF | ||
89 | #define IDMA_IC_0 0x00000001 | ||
90 | #define IDMA_IC_1 0x00000002 | ||
91 | #define IDMA_IC_2 0x00000004 | ||
92 | #define IDMA_IC_3 0x00000008 | ||
93 | #define IDMA_IC_4 0x00000010 | ||
94 | #define IDMA_IC_5 0x00000020 | ||
95 | #define IDMA_IC_6 0x00000040 | ||
96 | #define IDMA_IC_7 0x00000080 | ||
97 | #define IDMA_IC_8 0x00000100 | ||
98 | #define IDMA_IC_9 0x00000200 | ||
99 | #define IDMA_IC_10 0x00000400 | ||
100 | #define IDMA_IC_11 0x00000800 | ||
101 | #define IDMA_IC_12 0x00001000 | ||
102 | #define IDMA_IC_13 0x00002000 | ||
103 | #define IDMA_SDC_BG 0x00004000 | ||
104 | #define IDMA_SDC_FG 0x00008000 | ||
105 | #define IDMA_SDC_MASK 0x00010000 | ||
106 | #define IDMA_SDC_PARTIAL 0x00020000 | ||
107 | #define IDMA_ADC_SYS1_WR 0x00040000 | ||
108 | #define IDMA_ADC_SYS2_WR 0x00080000 | ||
109 | #define IDMA_ADC_SYS1_CMD 0x00100000 | ||
110 | #define IDMA_ADC_SYS2_CMD 0x00200000 | ||
111 | #define IDMA_ADC_SYS1_RD 0x00400000 | ||
112 | #define IDMA_ADC_SYS2_RD 0x00800000 | ||
113 | #define IDMA_PF_QP 0x01000000 | ||
114 | #define IDMA_PF_BSP 0x02000000 | ||
115 | #define IDMA_PF_Y_IN 0x04000000 | ||
116 | #define IDMA_PF_U_IN 0x08000000 | ||
117 | #define IDMA_PF_V_IN 0x10000000 | ||
118 | #define IDMA_PF_Y_OUT 0x20000000 | ||
119 | #define IDMA_PF_U_OUT 0x40000000 | ||
120 | #define IDMA_PF_V_OUT 0x80000000 | ||
121 | |||
122 | #define TSTAT_PF_H264_PAUSE 0x00000001 | ||
123 | #define TSTAT_CSI2MEM_MASK 0x0000000C | ||
124 | #define TSTAT_CSI2MEM_OFFSET 2 | ||
125 | #define TSTAT_VF_MASK 0x00000600 | ||
126 | #define TSTAT_VF_OFFSET 9 | ||
127 | #define TSTAT_VF_ROT_MASK 0x000C0000 | ||
128 | #define TSTAT_VF_ROT_OFFSET 18 | ||
129 | #define TSTAT_ENC_MASK 0x00000180 | ||
130 | #define TSTAT_ENC_OFFSET 7 | ||
131 | #define TSTAT_ENC_ROT_MASK 0x00030000 | ||
132 | #define TSTAT_ENC_ROT_OFFSET 16 | ||
133 | #define TSTAT_PP_MASK 0x00001800 | ||
134 | #define TSTAT_PP_OFFSET 11 | ||
135 | #define TSTAT_PP_ROT_MASK 0x00300000 | ||
136 | #define TSTAT_PP_ROT_OFFSET 20 | ||
137 | #define TSTAT_PF_MASK 0x00C00000 | ||
138 | #define TSTAT_PF_OFFSET 22 | ||
139 | #define TSTAT_ADCSYS1_MASK 0x03000000 | ||
140 | #define TSTAT_ADCSYS1_OFFSET 24 | ||
141 | #define TSTAT_ADCSYS2_MASK 0x0C000000 | ||
142 | #define TSTAT_ADCSYS2_OFFSET 26 | ||
143 | |||
144 | #define TASK_STAT_IDLE 0 | ||
145 | #define TASK_STAT_ACTIVE 1 | ||
146 | #define TASK_STAT_WAIT4READY 2 | ||
147 | |||
148 | struct idmac { | ||
149 | struct dma_device dma; | ||
150 | }; | ||
151 | |||
152 | struct ipu { | ||
153 | void __iomem *reg_ipu; | ||
154 | void __iomem *reg_ic; | ||
155 | unsigned int irq_fn; /* IPU Function IRQ to the CPU */ | ||
156 | unsigned int irq_err; /* IPU Error IRQ to the CPU */ | ||
157 | unsigned int irq_base; /* Beginning of the IPU IRQ range */ | ||
158 | unsigned long channel_init_mask; | ||
159 | spinlock_t lock; | ||
160 | struct clk *ipu_clk; | ||
161 | struct device *dev; | ||
162 | struct idmac idmac; | ||
163 | struct idmac_channel channel[IPU_CHANNELS_NUM]; | ||
164 | struct tasklet_struct tasklet; | ||
165 | }; | ||
166 | |||
167 | #define to_idmac(d) container_of(d, struct idmac, dma) | ||
168 | |||
169 | extern int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev); | ||
170 | extern void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev); | ||
171 | |||
172 | extern bool ipu_irq_status(uint32_t irq); | ||
173 | extern int ipu_irq_map(unsigned int source); | ||
174 | extern int ipu_irq_unmap(unsigned int source); | ||
175 | |||
176 | #endif | ||
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c new file mode 100644 index 000000000000..83f532cc767f --- /dev/null +++ b/drivers/dma/ipu/ipu_irq.c | |||
@@ -0,0 +1,413 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 | ||
3 | * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/init.h> | ||
11 | #include <linux/err.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/irq.h> | ||
16 | #include <linux/io.h> | ||
17 | |||
18 | #include <mach/ipu.h> | ||
19 | |||
20 | #include "ipu_intern.h" | ||
21 | |||
22 | /* | ||
23 | * Register read / write - shall be inlined by the compiler | ||
24 | */ | ||
25 | static u32 ipu_read_reg(struct ipu *ipu, unsigned long reg) | ||
26 | { | ||
27 | return __raw_readl(ipu->reg_ipu + reg); | ||
28 | } | ||
29 | |||
30 | static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg) | ||
31 | { | ||
32 | __raw_writel(value, ipu->reg_ipu + reg); | ||
33 | } | ||
34 | |||
35 | |||
36 | /* | ||
37 | * IPU IRQ chip driver | ||
38 | */ | ||
39 | |||
40 | #define IPU_IRQ_NR_FN_BANKS 3 | ||
41 | #define IPU_IRQ_NR_ERR_BANKS 2 | ||
42 | #define IPU_IRQ_NR_BANKS (IPU_IRQ_NR_FN_BANKS + IPU_IRQ_NR_ERR_BANKS) | ||
43 | |||
44 | struct ipu_irq_bank { | ||
45 | unsigned int control; | ||
46 | unsigned int status; | ||
47 | spinlock_t lock; | ||
48 | struct ipu *ipu; | ||
49 | }; | ||
50 | |||
51 | static struct ipu_irq_bank irq_bank[IPU_IRQ_NR_BANKS] = { | ||
52 | /* 3 groups of functional interrupts */ | ||
53 | { | ||
54 | .control = IPU_INT_CTRL_1, | ||
55 | .status = IPU_INT_STAT_1, | ||
56 | }, { | ||
57 | .control = IPU_INT_CTRL_2, | ||
58 | .status = IPU_INT_STAT_2, | ||
59 | }, { | ||
60 | .control = IPU_INT_CTRL_3, | ||
61 | .status = IPU_INT_STAT_3, | ||
62 | }, | ||
63 | /* 2 groups of error interrupts */ | ||
64 | { | ||
65 | .control = IPU_INT_CTRL_4, | ||
66 | .status = IPU_INT_STAT_4, | ||
67 | }, { | ||
68 | .control = IPU_INT_CTRL_5, | ||
69 | .status = IPU_INT_STAT_5, | ||
70 | }, | ||
71 | }; | ||
72 | |||
73 | struct ipu_irq_map { | ||
74 | unsigned int irq; | ||
75 | int source; | ||
76 | struct ipu_irq_bank *bank; | ||
77 | struct ipu *ipu; | ||
78 | }; | ||
79 | |||
80 | static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS]; | ||
81 | /* Protects allocations from the above array of maps */ | ||
82 | static DEFINE_MUTEX(map_lock); | ||
83 | /* Protects register accesses and individual mappings */ | ||
84 | static DEFINE_SPINLOCK(bank_lock); | ||
85 | |||
86 | static struct ipu_irq_map *src2map(unsigned int src) | ||
87 | { | ||
88 | int i; | ||
89 | |||
90 | for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) | ||
91 | if (irq_map[i].source == src) | ||
92 | return irq_map + i; | ||
93 | |||
94 | return NULL; | ||
95 | } | ||
96 | |||
97 | static void ipu_irq_unmask(unsigned int irq) | ||
98 | { | ||
99 | struct ipu_irq_map *map = get_irq_chip_data(irq); | ||
100 | struct ipu_irq_bank *bank; | ||
101 | uint32_t reg; | ||
102 | unsigned long lock_flags; | ||
103 | |||
104 | spin_lock_irqsave(&bank_lock, lock_flags); | ||
105 | |||
106 | bank = map->bank; | ||
107 | if (!bank) { | ||
108 | spin_unlock_irqrestore(&bank_lock, lock_flags); | ||
109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | ||
110 | return; | ||
111 | } | ||
112 | |||
113 | reg = ipu_read_reg(bank->ipu, bank->control); | ||
114 | reg |= (1UL << (map->source & 31)); | ||
115 | ipu_write_reg(bank->ipu, reg, bank->control); | ||
116 | |||
117 | spin_unlock_irqrestore(&bank_lock, lock_flags); | ||
118 | } | ||
119 | |||
120 | static void ipu_irq_mask(unsigned int irq) | ||
121 | { | ||
122 | struct ipu_irq_map *map = get_irq_chip_data(irq); | ||
123 | struct ipu_irq_bank *bank; | ||
124 | uint32_t reg; | ||
125 | unsigned long lock_flags; | ||
126 | |||
127 | spin_lock_irqsave(&bank_lock, lock_flags); | ||
128 | |||
129 | bank = map->bank; | ||
130 | if (!bank) { | ||
131 | spin_unlock_irqrestore(&bank_lock, lock_flags); | ||
132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | ||
133 | return; | ||
134 | } | ||
135 | |||
136 | reg = ipu_read_reg(bank->ipu, bank->control); | ||
137 | reg &= ~(1UL << (map->source & 31)); | ||
138 | ipu_write_reg(bank->ipu, reg, bank->control); | ||
139 | |||
140 | spin_unlock_irqrestore(&bank_lock, lock_flags); | ||
141 | } | ||
142 | |||
143 | static void ipu_irq_ack(unsigned int irq) | ||
144 | { | ||
145 | struct ipu_irq_map *map = get_irq_chip_data(irq); | ||
146 | struct ipu_irq_bank *bank; | ||
147 | unsigned long lock_flags; | ||
148 | |||
149 | spin_lock_irqsave(&bank_lock, lock_flags); | ||
150 | |||
151 | bank = map->bank; | ||
152 | if (!bank) { | ||
153 | spin_unlock_irqrestore(&bank_lock, lock_flags); | ||
154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | ||
155 | return; | ||
156 | } | ||
157 | |||
158 | ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status); | ||
159 | spin_unlock_irqrestore(&bank_lock, lock_flags); | ||
160 | } | ||
161 | |||
162 | /** | ||
163 | * ipu_irq_status() - returns the current interrupt status of the specified IRQ. | ||
164 | * @irq: interrupt line to get status for. | ||
165 | * @return: true if the interrupt is pending/asserted or false if the | ||
166 | * interrupt is not pending. | ||
167 | */ | ||
168 | bool ipu_irq_status(unsigned int irq) | ||
169 | { | ||
170 | struct ipu_irq_map *map = get_irq_chip_data(irq); | ||
171 | struct ipu_irq_bank *bank; | ||
172 | unsigned long lock_flags; | ||
173 | bool ret; | ||
174 | |||
175 | spin_lock_irqsave(&bank_lock, lock_flags); | ||
176 | bank = map->bank; | ||
177 | ret = bank && ipu_read_reg(bank->ipu, bank->status) & | ||
178 | (1UL << (map->source & 31)); | ||
179 | spin_unlock_irqrestore(&bank_lock, lock_flags); | ||
180 | |||
181 | return ret; | ||
182 | } | ||
183 | |||
184 | /** | ||
185 | * ipu_irq_map() - map an IPU interrupt source to an IRQ number | ||
186 | * @source: interrupt source bit position (see below) | ||
187 | * @return: mapped IRQ number or negative error code | ||
188 | * | ||
189 | * The source parameter has to be explained further. On i.MX31 IPU has 137 IRQ | ||
190 | * sources, they are broken down in 5 32-bit registers, like 32, 32, 24, 32, 17. | ||
191 | * However, the source argument of this function is not the sequence number of | ||
192 | * the possible IRQ, but rather its bit position. So, first interrupt in fourth | ||
193 | * register has source number 96, and not 88. This makes calculations easier, | ||
194 | * and also provides forward compatibility with any future IPU implementations | ||
195 | * with any interrupt bit assignments. | ||
196 | */ | ||
197 | int ipu_irq_map(unsigned int source) | ||
198 | { | ||
199 | int i, ret = -ENOMEM; | ||
200 | struct ipu_irq_map *map; | ||
201 | |||
202 | might_sleep(); | ||
203 | |||
204 | mutex_lock(&map_lock); | ||
205 | map = src2map(source); | ||
206 | if (map) { | ||
207 | pr_err("IPU: Source %u already mapped to IRQ %u\n", source, map->irq); | ||
208 | ret = -EBUSY; | ||
209 | goto out; | ||
210 | } | ||
211 | |||
212 | for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) { | ||
213 | if (irq_map[i].source < 0) { | ||
214 | unsigned long lock_flags; | ||
215 | |||
216 | spin_lock_irqsave(&bank_lock, lock_flags); | ||
217 | irq_map[i].source = source; | ||
218 | irq_map[i].bank = irq_bank + source / 32; | ||
219 | spin_unlock_irqrestore(&bank_lock, lock_flags); | ||
220 | |||
221 | ret = irq_map[i].irq; | ||
222 | pr_debug("IPU: mapped source %u to IRQ %u\n", | ||
223 | source, ret); | ||
224 | break; | ||
225 | } | ||
226 | } | ||
227 | out: | ||
228 | mutex_unlock(&map_lock); | ||
229 | |||
230 | if (ret < 0) | ||
231 | pr_err("IPU: couldn't map source %u: %d\n", source, ret); | ||
232 | |||
233 | return ret; | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * ipu_irq_map() - map an IPU interrupt source to an IRQ number | ||
238 | * @source: interrupt source bit position (see ipu_irq_map()) | ||
239 | * @return: 0 or negative error code | ||
240 | */ | ||
241 | int ipu_irq_unmap(unsigned int source) | ||
242 | { | ||
243 | int i, ret = -EINVAL; | ||
244 | |||
245 | might_sleep(); | ||
246 | |||
247 | mutex_lock(&map_lock); | ||
248 | for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) { | ||
249 | if (irq_map[i].source == source) { | ||
250 | unsigned long lock_flags; | ||
251 | |||
252 | pr_debug("IPU: unmapped source %u from IRQ %u\n", | ||
253 | source, irq_map[i].irq); | ||
254 | |||
255 | spin_lock_irqsave(&bank_lock, lock_flags); | ||
256 | irq_map[i].source = -EINVAL; | ||
257 | irq_map[i].bank = NULL; | ||
258 | spin_unlock_irqrestore(&bank_lock, lock_flags); | ||
259 | |||
260 | ret = 0; | ||
261 | break; | ||
262 | } | ||
263 | } | ||
264 | mutex_unlock(&map_lock); | ||
265 | |||
266 | return ret; | ||
267 | } | ||
268 | |||
269 | /* Chained IRQ handler for IPU error interrupt */ | ||
270 | static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | ||
271 | { | ||
272 | struct ipu *ipu = get_irq_data(irq); | ||
273 | u32 status; | ||
274 | int i, line; | ||
275 | |||
276 | for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) { | ||
277 | struct ipu_irq_bank *bank = irq_bank + i; | ||
278 | |||
279 | spin_lock(&bank_lock); | ||
280 | status = ipu_read_reg(ipu, bank->status); | ||
281 | /* | ||
282 | * Don't think we have to clear all interrupts here, they will | ||
283 | * be acked by ->handle_irq() (handle_level_irq). However, we | ||
284 | * might want to clear unhandled interrupts after the loop... | ||
285 | */ | ||
286 | status &= ipu_read_reg(ipu, bank->control); | ||
287 | spin_unlock(&bank_lock); | ||
288 | while ((line = ffs(status))) { | ||
289 | struct ipu_irq_map *map; | ||
290 | |||
291 | line--; | ||
292 | status &= ~(1UL << line); | ||
293 | |||
294 | spin_lock(&bank_lock); | ||
295 | map = src2map(32 * i + line); | ||
296 | if (map) | ||
297 | irq = map->irq; | ||
298 | spin_unlock(&bank_lock); | ||
299 | |||
300 | if (!map) { | ||
301 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", | ||
302 | line, i); | ||
303 | continue; | ||
304 | } | ||
305 | generic_handle_irq(irq); | ||
306 | } | ||
307 | } | ||
308 | } | ||
309 | |||
310 | /* Chained IRQ handler for IPU function interrupt */ | ||
311 | static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) | ||
312 | { | ||
313 | struct ipu *ipu = get_irq_data(irq); | ||
314 | u32 status; | ||
315 | int i, line; | ||
316 | |||
317 | for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) { | ||
318 | struct ipu_irq_bank *bank = irq_bank + i; | ||
319 | |||
320 | spin_lock(&bank_lock); | ||
321 | status = ipu_read_reg(ipu, bank->status); | ||
322 | /* Not clearing all interrupts, see above */ | ||
323 | status &= ipu_read_reg(ipu, bank->control); | ||
324 | spin_unlock(&bank_lock); | ||
325 | while ((line = ffs(status))) { | ||
326 | struct ipu_irq_map *map; | ||
327 | |||
328 | line--; | ||
329 | status &= ~(1UL << line); | ||
330 | |||
331 | spin_lock(&bank_lock); | ||
332 | map = src2map(32 * i + line); | ||
333 | if (map) | ||
334 | irq = map->irq; | ||
335 | spin_unlock(&bank_lock); | ||
336 | |||
337 | if (!map) { | ||
338 | pr_err("IPU: Interrupt on unmapped source %u bank %d\n", | ||
339 | line, i); | ||
340 | continue; | ||
341 | } | ||
342 | generic_handle_irq(irq); | ||
343 | } | ||
344 | } | ||
345 | } | ||
346 | |||
347 | static struct irq_chip ipu_irq_chip = { | ||
348 | .name = "ipu_irq", | ||
349 | .ack = ipu_irq_ack, | ||
350 | .mask = ipu_irq_mask, | ||
351 | .unmask = ipu_irq_unmask, | ||
352 | }; | ||
353 | |||
354 | /* Install the IRQ handler */ | ||
355 | int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) | ||
356 | { | ||
357 | struct ipu_platform_data *pdata = dev->dev.platform_data; | ||
358 | unsigned int irq, irq_base, i; | ||
359 | |||
360 | irq_base = pdata->irq_base; | ||
361 | |||
362 | for (i = 0; i < IPU_IRQ_NR_BANKS; i++) | ||
363 | irq_bank[i].ipu = ipu; | ||
364 | |||
365 | for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) { | ||
366 | int ret; | ||
367 | |||
368 | irq = irq_base + i; | ||
369 | ret = set_irq_chip(irq, &ipu_irq_chip); | ||
370 | if (ret < 0) | ||
371 | return ret; | ||
372 | ret = set_irq_chip_data(irq, irq_map + i); | ||
373 | if (ret < 0) | ||
374 | return ret; | ||
375 | irq_map[i].ipu = ipu; | ||
376 | irq_map[i].irq = irq; | ||
377 | irq_map[i].source = -EINVAL; | ||
378 | set_irq_handler(irq, handle_level_irq); | ||
379 | #ifdef CONFIG_ARM | ||
380 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | ||
381 | #endif | ||
382 | } | ||
383 | |||
384 | set_irq_data(ipu->irq_fn, ipu); | ||
385 | set_irq_chained_handler(ipu->irq_fn, ipu_irq_fn); | ||
386 | |||
387 | set_irq_data(ipu->irq_err, ipu); | ||
388 | set_irq_chained_handler(ipu->irq_err, ipu_irq_err); | ||
389 | |||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev) | ||
394 | { | ||
395 | struct ipu_platform_data *pdata = dev->dev.platform_data; | ||
396 | unsigned int irq, irq_base; | ||
397 | |||
398 | irq_base = pdata->irq_base; | ||
399 | |||
400 | set_irq_chained_handler(ipu->irq_fn, NULL); | ||
401 | set_irq_data(ipu->irq_fn, NULL); | ||
402 | |||
403 | set_irq_chained_handler(ipu->irq_err, NULL); | ||
404 | set_irq_data(ipu->irq_err, NULL); | ||
405 | |||
406 | for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) { | ||
407 | #ifdef CONFIG_ARM | ||
408 | set_irq_flags(irq, 0); | ||
409 | #endif | ||
410 | set_irq_chip(irq, NULL); | ||
411 | set_irq_chip_data(irq, NULL); | ||
412 | } | ||
413 | } | ||