diff options
Diffstat (limited to 'arch/sh/drivers/dma/dma-sh.c')
-rw-r--r-- | arch/sh/drivers/dma/dma-sh.c | 169 |
1 files changed, 111 insertions, 58 deletions
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index 50887a592dd0..37fb5b8bbc3f 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c | |||
@@ -17,28 +17,16 @@ | |||
17 | #include <mach-dreamcast/mach/dma.h> | 17 | #include <mach-dreamcast/mach/dma.h> |
18 | #include <asm/dma.h> | 18 | #include <asm/dma.h> |
19 | #include <asm/io.h> | 19 | #include <asm/io.h> |
20 | #include "dma-sh.h" | 20 | #include <asm/dma-sh.h> |
21 | 21 | ||
22 | static int dmte_irq_map[] = { | 22 | #if defined(DMAE1_IRQ) |
23 | DMTE0_IRQ, | 23 | #define NR_DMAE 2 |
24 | DMTE1_IRQ, | 24 | #else |
25 | DMTE2_IRQ, | 25 | #define NR_DMAE 1 |
26 | DMTE3_IRQ, | ||
27 | #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | ||
28 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | ||
29 | defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ | ||
30 | defined(CONFIG_CPU_SUBTYPE_SH7760) || \ | ||
31 | defined(CONFIG_CPU_SUBTYPE_SH7709) || \ | ||
32 | defined(CONFIG_CPU_SUBTYPE_SH7780) | ||
33 | DMTE4_IRQ, | ||
34 | DMTE5_IRQ, | ||
35 | #endif | ||
36 | #if defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ | ||
37 | defined(CONFIG_CPU_SUBTYPE_SH7760) || \ | ||
38 | defined(CONFIG_CPU_SUBTYPE_SH7780) | ||
39 | DMTE6_IRQ, | ||
40 | DMTE7_IRQ, | ||
41 | #endif | 26 | #endif |
27 | |||
28 | static const char *dmae_name[] = { | ||
29 | "DMAC Address Error0", "DMAC Address Error1" | ||
42 | }; | 30 | }; |
43 | 31 | ||
44 | static inline unsigned int get_dmte_irq(unsigned int chan) | 32 | static inline unsigned int get_dmte_irq(unsigned int chan) |
@@ -46,7 +34,14 @@ static inline unsigned int get_dmte_irq(unsigned int chan) | |||
46 | unsigned int irq = 0; | 34 | unsigned int irq = 0; |
47 | if (chan < ARRAY_SIZE(dmte_irq_map)) | 35 | if (chan < ARRAY_SIZE(dmte_irq_map)) |
48 | irq = dmte_irq_map[chan]; | 36 | irq = dmte_irq_map[chan]; |
37 | |||
38 | #if defined(CONFIG_SH_DMA_IRQ_MULTI) | ||
39 | if (irq > DMTE6_IRQ) | ||
40 | return DMTE6_IRQ; | ||
41 | return DMTE0_IRQ; | ||
42 | #else | ||
49 | return irq; | 43 | return irq; |
44 | #endif | ||
50 | } | 45 | } |
51 | 46 | ||
52 | /* | 47 | /* |
@@ -59,7 +54,7 @@ static inline unsigned int get_dmte_irq(unsigned int chan) | |||
59 | */ | 54 | */ |
60 | static inline unsigned int calc_xmit_shift(struct dma_channel *chan) | 55 | static inline unsigned int calc_xmit_shift(struct dma_channel *chan) |
61 | { | 56 | { |
62 | u32 chcr = ctrl_inl(CHCR[chan->chan]); | 57 | u32 chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR); |
63 | 58 | ||
64 | return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT]; | 59 | return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT]; |
65 | } | 60 | } |
@@ -75,13 +70,13 @@ static irqreturn_t dma_tei(int irq, void *dev_id) | |||
75 | struct dma_channel *chan = dev_id; | 70 | struct dma_channel *chan = dev_id; |
76 | u32 chcr; | 71 | u32 chcr; |
77 | 72 | ||
78 | chcr = ctrl_inl(CHCR[chan->chan]); | 73 | chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR); |
79 | 74 | ||
80 | if (!(chcr & CHCR_TE)) | 75 | if (!(chcr & CHCR_TE)) |
81 | return IRQ_NONE; | 76 | return IRQ_NONE; |
82 | 77 | ||
83 | chcr &= ~(CHCR_IE | CHCR_DE); | 78 | chcr &= ~(CHCR_IE | CHCR_DE); |
84 | ctrl_outl(chcr, CHCR[chan->chan]); | 79 | ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR)); |
85 | 80 | ||
86 | wake_up(&chan->wait_queue); | 81 | wake_up(&chan->wait_queue); |
87 | 82 | ||
@@ -94,7 +89,12 @@ static int sh_dmac_request_dma(struct dma_channel *chan) | |||
94 | return 0; | 89 | return 0; |
95 | 90 | ||
96 | return request_irq(get_dmte_irq(chan->chan), dma_tei, | 91 | return request_irq(get_dmte_irq(chan->chan), dma_tei, |
97 | IRQF_DISABLED, chan->dev_id, chan); | 92 | #if defined(CONFIG_SH_DMA_IRQ_MULTI) |
93 | IRQF_SHARED, | ||
94 | #else | ||
95 | IRQF_DISABLED, | ||
96 | #endif | ||
97 | chan->dev_id, chan); | ||
98 | } | 98 | } |
99 | 99 | ||
100 | static void sh_dmac_free_dma(struct dma_channel *chan) | 100 | static void sh_dmac_free_dma(struct dma_channel *chan) |
@@ -115,7 +115,7 @@ sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) | |||
115 | chan->flags &= ~DMA_TEI_CAPABLE; | 115 | chan->flags &= ~DMA_TEI_CAPABLE; |
116 | } | 116 | } |
117 | 117 | ||
118 | ctrl_outl(chcr, CHCR[chan->chan]); | 118 | ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR)); |
119 | 119 | ||
120 | chan->flags |= DMA_CONFIGURED; | 120 | chan->flags |= DMA_CONFIGURED; |
121 | return 0; | 121 | return 0; |
@@ -126,13 +126,13 @@ static void sh_dmac_enable_dma(struct dma_channel *chan) | |||
126 | int irq; | 126 | int irq; |
127 | u32 chcr; | 127 | u32 chcr; |
128 | 128 | ||
129 | chcr = ctrl_inl(CHCR[chan->chan]); | 129 | chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR); |
130 | chcr |= CHCR_DE; | 130 | chcr |= CHCR_DE; |
131 | 131 | ||
132 | if (chan->flags & DMA_TEI_CAPABLE) | 132 | if (chan->flags & DMA_TEI_CAPABLE) |
133 | chcr |= CHCR_IE; | 133 | chcr |= CHCR_IE; |
134 | 134 | ||
135 | ctrl_outl(chcr, CHCR[chan->chan]); | 135 | ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR)); |
136 | 136 | ||
137 | if (chan->flags & DMA_TEI_CAPABLE) { | 137 | if (chan->flags & DMA_TEI_CAPABLE) { |
138 | irq = get_dmte_irq(chan->chan); | 138 | irq = get_dmte_irq(chan->chan); |
@@ -150,9 +150,9 @@ static void sh_dmac_disable_dma(struct dma_channel *chan) | |||
150 | disable_irq(irq); | 150 | disable_irq(irq); |
151 | } | 151 | } |
152 | 152 | ||
153 | chcr = ctrl_inl(CHCR[chan->chan]); | 153 | chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR); |
154 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); | 154 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); |
155 | ctrl_outl(chcr, CHCR[chan->chan]); | 155 | ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR)); |
156 | } | 156 | } |
157 | 157 | ||
158 | static int sh_dmac_xfer_dma(struct dma_channel *chan) | 158 | static int sh_dmac_xfer_dma(struct dma_channel *chan) |
@@ -183,12 +183,13 @@ static int sh_dmac_xfer_dma(struct dma_channel *chan) | |||
183 | */ | 183 | */ |
184 | if (chan->sar || (mach_is_dreamcast() && | 184 | if (chan->sar || (mach_is_dreamcast() && |
185 | chan->chan == PVR2_CASCADE_CHAN)) | 185 | chan->chan == PVR2_CASCADE_CHAN)) |
186 | ctrl_outl(chan->sar, SAR[chan->chan]); | 186 | ctrl_outl(chan->sar, (dma_base_addr[chan->chan]+SAR)); |
187 | if (chan->dar || (mach_is_dreamcast() && | 187 | if (chan->dar || (mach_is_dreamcast() && |
188 | chan->chan == PVR2_CASCADE_CHAN)) | 188 | chan->chan == PVR2_CASCADE_CHAN)) |
189 | ctrl_outl(chan->dar, DAR[chan->chan]); | 189 | ctrl_outl(chan->dar, (dma_base_addr[chan->chan] + DAR)); |
190 | 190 | ||
191 | ctrl_outl(chan->count >> calc_xmit_shift(chan), DMATCR[chan->chan]); | 191 | ctrl_outl(chan->count >> calc_xmit_shift(chan), |
192 | (dma_base_addr[chan->chan] + TCR)); | ||
192 | 193 | ||
193 | sh_dmac_enable_dma(chan); | 194 | sh_dmac_enable_dma(chan); |
194 | 195 | ||
@@ -197,36 +198,26 @@ static int sh_dmac_xfer_dma(struct dma_channel *chan) | |||
197 | 198 | ||
198 | static int sh_dmac_get_dma_residue(struct dma_channel *chan) | 199 | static int sh_dmac_get_dma_residue(struct dma_channel *chan) |
199 | { | 200 | { |
200 | if (!(ctrl_inl(CHCR[chan->chan]) & CHCR_DE)) | 201 | if (!(ctrl_inl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE)) |
201 | return 0; | 202 | return 0; |
202 | 203 | ||
203 | return ctrl_inl(DMATCR[chan->chan]) << calc_xmit_shift(chan); | 204 | return ctrl_inl(dma_base_addr[chan->chan] + TCR) |
205 | << calc_xmit_shift(chan); | ||
204 | } | 206 | } |
205 | 207 | ||
206 | #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 208 | static inline int dmaor_reset(int no) |
207 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | ||
208 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | ||
209 | defined(CONFIG_CPU_SUBTYPE_SH7709) | ||
210 | #define dmaor_read_reg() ctrl_inw(DMAOR) | ||
211 | #define dmaor_write_reg(data) ctrl_outw(data, DMAOR) | ||
212 | #else | ||
213 | #define dmaor_read_reg() ctrl_inl(DMAOR) | ||
214 | #define dmaor_write_reg(data) ctrl_outl(data, DMAOR) | ||
215 | #endif | ||
216 | |||
217 | static inline int dmaor_reset(void) | ||
218 | { | 209 | { |
219 | unsigned long dmaor = dmaor_read_reg(); | 210 | unsigned long dmaor = dmaor_read_reg(no); |
220 | 211 | ||
221 | /* Try to clear the error flags first, incase they are set */ | 212 | /* Try to clear the error flags first, incase they are set */ |
222 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); | 213 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); |
223 | dmaor_write_reg(dmaor); | 214 | dmaor_write_reg(no, dmaor); |
224 | 215 | ||
225 | dmaor |= DMAOR_INIT; | 216 | dmaor |= DMAOR_INIT; |
226 | dmaor_write_reg(dmaor); | 217 | dmaor_write_reg(no, dmaor); |
227 | 218 | ||
228 | /* See if we got an error again */ | 219 | /* See if we got an error again */ |
229 | if ((dmaor_read_reg() & (DMAOR_AE | DMAOR_NMIF))) { | 220 | if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) { |
230 | printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); | 221 | printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); |
231 | return -EINVAL; | 222 | return -EINVAL; |
232 | } | 223 | } |
@@ -237,10 +228,33 @@ static inline int dmaor_reset(void) | |||
237 | #if defined(CONFIG_CPU_SH4) | 228 | #if defined(CONFIG_CPU_SH4) |
238 | static irqreturn_t dma_err(int irq, void *dummy) | 229 | static irqreturn_t dma_err(int irq, void *dummy) |
239 | { | 230 | { |
240 | dmaor_reset(); | 231 | #if defined(CONFIG_SH_DMA_IRQ_MULTI) |
232 | int cnt = 0; | ||
233 | switch (irq) { | ||
234 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | ||
235 | case DMTE6_IRQ: | ||
236 | cnt++; | ||
237 | #endif | ||
238 | case DMTE0_IRQ: | ||
239 | if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { | ||
240 | disable_irq(irq); | ||
241 | /* DMA multi and error IRQ */ | ||
242 | return IRQ_HANDLED; | ||
243 | } | ||
244 | default: | ||
245 | return IRQ_NONE; | ||
246 | } | ||
247 | #else | ||
248 | dmaor_reset(0); | ||
249 | #if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ | ||
250 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | ||
251 | defined(CONFIG_CPU_SUBTYPE_SH7785) | ||
252 | dmaor_reset(1); | ||
253 | #endif | ||
241 | disable_irq(irq); | 254 | disable_irq(irq); |
242 | 255 | ||
243 | return IRQ_HANDLED; | 256 | return IRQ_HANDLED; |
257 | #endif | ||
244 | } | 258 | } |
245 | #endif | 259 | #endif |
246 | 260 | ||
@@ -259,24 +273,59 @@ static struct dma_info sh_dmac_info = { | |||
259 | .flags = DMAC_CHANNELS_TEI_CAPABLE, | 273 | .flags = DMAC_CHANNELS_TEI_CAPABLE, |
260 | }; | 274 | }; |
261 | 275 | ||
276 | #ifdef CONFIG_CPU_SH4 | ||
277 | static unsigned int get_dma_error_irq(int n) | ||
278 | { | ||
279 | #if defined(CONFIG_SH_DMA_IRQ_MULTI) | ||
280 | return (n == 0) ? get_dmte_irq(0) : get_dmte_irq(6); | ||
281 | #else | ||
282 | return (n == 0) ? DMAE0_IRQ : | ||
283 | #if defined(DMAE1_IRQ) | ||
284 | DMAE1_IRQ; | ||
285 | #else | ||
286 | -1; | ||
287 | #endif | ||
288 | #endif | ||
289 | } | ||
290 | #endif | ||
291 | |||
262 | static int __init sh_dmac_init(void) | 292 | static int __init sh_dmac_init(void) |
263 | { | 293 | { |
264 | struct dma_info *info = &sh_dmac_info; | 294 | struct dma_info *info = &sh_dmac_info; |
265 | int i; | 295 | int i; |
266 | 296 | ||
267 | #ifdef CONFIG_CPU_SH4 | 297 | #ifdef CONFIG_CPU_SH4 |
268 | i = request_irq(DMAE_IRQ, dma_err, IRQF_DISABLED, "DMAC Address Error", 0); | 298 | int n; |
269 | if (unlikely(i < 0)) | 299 | |
270 | return i; | 300 | for (n = 0; n < NR_DMAE; n++) { |
301 | i = request_irq(get_dma_error_irq(n), dma_err, | ||
302 | #if defined(CONFIG_SH_DMA_IRQ_MULTI) | ||
303 | IRQF_SHARED, | ||
304 | #else | ||
305 | IRQF_DISABLED, | ||
271 | #endif | 306 | #endif |
307 | dmae_name[n], (void *)dmae_name[n]); | ||
308 | if (unlikely(i < 0)) { | ||
309 | printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); | ||
310 | return i; | ||
311 | } | ||
312 | } | ||
313 | #endif /* CONFIG_CPU_SH4 */ | ||
272 | 314 | ||
273 | /* | 315 | /* |
274 | * Initialize DMAOR, and clean up any error flags that may have | 316 | * Initialize DMAOR, and clean up any error flags that may have |
275 | * been set. | 317 | * been set. |
276 | */ | 318 | */ |
277 | i = dmaor_reset(); | 319 | i = dmaor_reset(0); |
320 | if (unlikely(i != 0)) | ||
321 | return i; | ||
322 | #if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ | ||
323 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | ||
324 | defined(CONFIG_CPU_SUBTYPE_SH7785) | ||
325 | i = dmaor_reset(1); | ||
278 | if (unlikely(i != 0)) | 326 | if (unlikely(i != 0)) |
279 | return i; | 327 | return i; |
328 | #endif | ||
280 | 329 | ||
281 | return register_dmac(info); | 330 | return register_dmac(info); |
282 | } | 331 | } |
@@ -284,8 +333,12 @@ static int __init sh_dmac_init(void) | |||
284 | static void __exit sh_dmac_exit(void) | 333 | static void __exit sh_dmac_exit(void) |
285 | { | 334 | { |
286 | #ifdef CONFIG_CPU_SH4 | 335 | #ifdef CONFIG_CPU_SH4 |
287 | free_irq(DMAE_IRQ, 0); | 336 | int n; |
288 | #endif | 337 | |
338 | for (n = 0; n < NR_DMAE; n++) { | ||
339 | free_irq(get_dma_error_irq(n), (void *)dmae_name[n]); | ||
340 | } | ||
341 | #endif /* CONFIG_CPU_SH4 */ | ||
289 | unregister_dmac(&sh_dmac_info); | 342 | unregister_dmac(&sh_dmac_info); |
290 | } | 343 | } |
291 | 344 | ||