aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/drivers/dma/dma-sh.c290
-rw-r--r--arch/sh/include/asm/dma-sh.h87
-rw-r--r--arch/sh/include/cpu-sh4a/cpu/dma.h8
3 files changed, 177 insertions, 208 deletions
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index a60da6dd4d17..4c171f13b0e8 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -14,35 +14,72 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/io.h>
17#include <mach-dreamcast/mach/dma.h> 18#include <mach-dreamcast/mach/dma.h>
18#include <asm/dma.h> 19#include <asm/dma.h>
19#include <asm/io.h> 20#include <asm/dma-register.h>
20#include <asm/dma-sh.h> 21#include <cpu/dma-register.h>
22#include <cpu/dma.h>
21 23
22#if defined(DMAE1_IRQ) 24/*
23#define NR_DMAE 2 25 * Define the default configuration for dual address memory-memory transfer.
24#else 26 * The 0x400 value represents auto-request, external->external.
25#define NR_DMAE 1 27 */
28#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT))
29
30static unsigned long dma_find_base(unsigned int chan)
31{
32 unsigned long base = SH_DMAC_BASE0;
33
34#ifdef SH_DMAC_BASE1
35 if (chan >= 6)
36 base = SH_DMAC_BASE1;
26#endif 37#endif
27 38
28static const char *dmae_name[] = { 39 return base;
29 "DMAC Address Error0", "DMAC Address Error1" 40}
30}; 41
42static unsigned long dma_base_addr(unsigned int chan)
43{
44 unsigned long base = dma_find_base(chan);
45
46 /* Normalize offset calculation */
47 if (chan >= 9)
48 chan -= 6;
49 if (chan >= 4)
50 base += 0x10;
51
52 return base + (chan * 0x10);
53}
31 54
55#ifdef CONFIG_SH_DMA_IRQ_MULTI
32static inline unsigned int get_dmte_irq(unsigned int chan) 56static inline unsigned int get_dmte_irq(unsigned int chan)
33{ 57{
34 unsigned int irq = 0; 58 return chan >= 6 ? DMTE6_IRQ : DMTE0_IRQ;
35 if (chan < ARRAY_SIZE(dmte_irq_map)) 59}
36 irq = dmte_irq_map[chan];
37
38#if defined(CONFIG_SH_DMA_IRQ_MULTI)
39 if (irq > DMTE6_IRQ)
40 return DMTE6_IRQ;
41 return DMTE0_IRQ;
42#else 60#else
43 return irq; 61
62static unsigned int dmte_irq_map[] = {
63 DMTE0_IRQ, DMTE0_IRQ + 1, DMTE0_IRQ + 2, DMTE0_IRQ + 3,
64
65#ifdef DMTE4_IRQ
66 DMTE4_IRQ, DMTE4_IRQ + 1,
67#endif
68
69#ifdef DMTE6_IRQ
70 DMTE6_IRQ, DMTE6_IRQ + 1,
71#endif
72
73#ifdef DMTE8_IRQ
74 DMTE8_IRQ, DMTE9_IRQ, DMTE10_IRQ, DMTE11_IRQ,
44#endif 75#endif
76};
77
78static inline unsigned int get_dmte_irq(unsigned int chan)
79{
80 return dmte_irq_map[chan];
45} 81}
82#endif
46 83
47/* 84/*
48 * We determine the correct shift size based off of the CHCR transmit size 85 * We determine the correct shift size based off of the CHCR transmit size
@@ -53,9 +90,10 @@ static inline unsigned int get_dmte_irq(unsigned int chan)
53 * iterations to complete the transfer. 90 * iterations to complete the transfer.
54 */ 91 */
55static unsigned int ts_shift[] = TS_SHIFT; 92static unsigned int ts_shift[] = TS_SHIFT;
93
56static inline unsigned int calc_xmit_shift(struct dma_channel *chan) 94static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
57{ 95{
58 u32 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); 96 u32 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
59 int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | 97 int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
60 ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); 98 ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
61 99
@@ -73,13 +111,13 @@ static irqreturn_t dma_tei(int irq, void *dev_id)
73 struct dma_channel *chan = dev_id; 111 struct dma_channel *chan = dev_id;
74 u32 chcr; 112 u32 chcr;
75 113
76 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); 114 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
77 115
78 if (!(chcr & CHCR_TE)) 116 if (!(chcr & CHCR_TE))
79 return IRQ_NONE; 117 return IRQ_NONE;
80 118
81 chcr &= ~(CHCR_IE | CHCR_DE); 119 chcr &= ~(CHCR_IE | CHCR_DE);
82 __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); 120 __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
83 121
84 wake_up(&chan->wait_queue); 122 wake_up(&chan->wait_queue);
85 123
@@ -91,13 +129,8 @@ static int sh_dmac_request_dma(struct dma_channel *chan)
91 if (unlikely(!(chan->flags & DMA_TEI_CAPABLE))) 129 if (unlikely(!(chan->flags & DMA_TEI_CAPABLE)))
92 return 0; 130 return 0;
93 131
94 return request_irq(get_dmte_irq(chan->chan), dma_tei, 132 return request_irq(get_dmte_irq(chan->chan), dma_tei, IRQF_SHARED,
95#if defined(CONFIG_SH_DMA_IRQ_MULTI) 133 chan->dev_id, chan);
96 IRQF_SHARED,
97#else
98 0,
99#endif
100 chan->dev_id, chan);
101} 134}
102 135
103static void sh_dmac_free_dma(struct dma_channel *chan) 136static void sh_dmac_free_dma(struct dma_channel *chan)
@@ -118,7 +151,7 @@ sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
118 chan->flags &= ~DMA_TEI_CAPABLE; 151 chan->flags &= ~DMA_TEI_CAPABLE;
119 } 152 }
120 153
121 __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); 154 __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
122 155
123 chan->flags |= DMA_CONFIGURED; 156 chan->flags |= DMA_CONFIGURED;
124 return 0; 157 return 0;
@@ -129,13 +162,13 @@ static void sh_dmac_enable_dma(struct dma_channel *chan)
129 int irq; 162 int irq;
130 u32 chcr; 163 u32 chcr;
131 164
132 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); 165 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
133 chcr |= CHCR_DE; 166 chcr |= CHCR_DE;
134 167
135 if (chan->flags & DMA_TEI_CAPABLE) 168 if (chan->flags & DMA_TEI_CAPABLE)
136 chcr |= CHCR_IE; 169 chcr |= CHCR_IE;
137 170
138 __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); 171 __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
139 172
140 if (chan->flags & DMA_TEI_CAPABLE) { 173 if (chan->flags & DMA_TEI_CAPABLE) {
141 irq = get_dmte_irq(chan->chan); 174 irq = get_dmte_irq(chan->chan);
@@ -153,9 +186,9 @@ static void sh_dmac_disable_dma(struct dma_channel *chan)
153 disable_irq(irq); 186 disable_irq(irq);
154 } 187 }
155 188
156 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); 189 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
157 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); 190 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
158 __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); 191 __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
159} 192}
160 193
161static int sh_dmac_xfer_dma(struct dma_channel *chan) 194static int sh_dmac_xfer_dma(struct dma_channel *chan)
@@ -186,13 +219,13 @@ static int sh_dmac_xfer_dma(struct dma_channel *chan)
186 */ 219 */
187 if (chan->sar || (mach_is_dreamcast() && 220 if (chan->sar || (mach_is_dreamcast() &&
188 chan->chan == PVR2_CASCADE_CHAN)) 221 chan->chan == PVR2_CASCADE_CHAN))
189 __raw_writel(chan->sar, (dma_base_addr[chan->chan]+SAR)); 222 __raw_writel(chan->sar, (dma_base_addr(chan->chan) + SAR));
190 if (chan->dar || (mach_is_dreamcast() && 223 if (chan->dar || (mach_is_dreamcast() &&
191 chan->chan == PVR2_CASCADE_CHAN)) 224 chan->chan == PVR2_CASCADE_CHAN))
192 __raw_writel(chan->dar, (dma_base_addr[chan->chan] + DAR)); 225 __raw_writel(chan->dar, (dma_base_addr(chan->chan) + DAR));
193 226
194 __raw_writel(chan->count >> calc_xmit_shift(chan), 227 __raw_writel(chan->count >> calc_xmit_shift(chan),
195 (dma_base_addr[chan->chan] + TCR)); 228 (dma_base_addr(chan->chan) + TCR));
196 229
197 sh_dmac_enable_dma(chan); 230 sh_dmac_enable_dma(chan);
198 231
@@ -201,13 +234,32 @@ static int sh_dmac_xfer_dma(struct dma_channel *chan)
201 234
202static int sh_dmac_get_dma_residue(struct dma_channel *chan) 235static int sh_dmac_get_dma_residue(struct dma_channel *chan)
203{ 236{
204 if (!(__raw_readl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE)) 237 if (!(__raw_readl(dma_base_addr(chan->chan) + CHCR) & CHCR_DE))
205 return 0; 238 return 0;
206 239
207 return __raw_readl(dma_base_addr[chan->chan] + TCR) 240 return __raw_readl(dma_base_addr(chan->chan) + TCR)
208 << calc_xmit_shift(chan); 241 << calc_xmit_shift(chan);
209} 242}
210 243
244/*
245 * DMAOR handling
246 */
247#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \
248 defined(CONFIG_CPU_SUBTYPE_SH7724) || \
249 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
250 defined(CONFIG_CPU_SUBTYPE_SH7785)
251#define NR_DMAOR 2
252#else
253#define NR_DMAOR 1
254#endif
255
256/*
257 * DMAOR bases are broken out amongst channel groups. DMAOR0 manages
258 * channels 0 - 5, DMAOR1 6 - 11 (optional).
259 */
260#define dmaor_read_reg(n) __raw_readw(dma_find_base((n)*6))
261#define dmaor_write_reg(n, data) __raw_writew(data, dma_find_base(n)*6)
262
211static inline int dmaor_reset(int no) 263static inline int dmaor_reset(int no)
212{ 264{
213 unsigned long dmaor = dmaor_read_reg(no); 265 unsigned long dmaor = dmaor_read_reg(no);
@@ -228,36 +280,86 @@ static inline int dmaor_reset(int no)
228 return 0; 280 return 0;
229} 281}
230 282
231#if defined(CONFIG_CPU_SH4) 283/*
232static irqreturn_t dma_err(int irq, void *dummy) 284 * DMAE handling
233{ 285 */
234#if defined(CONFIG_SH_DMA_IRQ_MULTI) 286#ifdef CONFIG_CPU_SH4
235 int cnt = 0; 287
236 switch (irq) { 288#if defined(DMAE1_IRQ)
237#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) 289#define NR_DMAE 2
238 case DMTE6_IRQ: 290#else
239 cnt++; 291#define NR_DMAE 1
240#endif 292#endif
241 case DMTE0_IRQ: 293
242 if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { 294static const char *dmae_name[] = {
243 disable_irq(irq); 295 "DMAC Address Error0",
244 /* DMA multi and error IRQ */ 296 "DMAC Address Error1"
245 return IRQ_HANDLED; 297};
246 } 298
247 default: 299#ifdef CONFIG_SH_DMA_IRQ_MULTI
248 return IRQ_NONE; 300static inline unsigned int get_dma_error_irq(int n)
249 } 301{
302 return get_dmte_irq(n * 6);
303}
250#else 304#else
251 dmaor_reset(0); 305
252#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ 306static unsigned int dmae_irq_map[] = {
253 defined(CONFIG_CPU_SUBTYPE_SH7780) || \ 307 DMAE0_IRQ,
254 defined(CONFIG_CPU_SUBTYPE_SH7785) 308
255 dmaor_reset(1); 309#ifdef DMAE1_IRQ
310 DMAE1_IRQ,
311#endif
312};
313
314static inline unsigned int get_dma_error_irq(int n)
315{
316 return dmae_irq_map[n];
317}
256#endif 318#endif
319
320static irqreturn_t dma_err(int irq, void *dummy)
321{
322 int i;
323
324 for (i = 0; i < NR_DMAOR; i++)
325 dmaor_reset(i);
326
257 disable_irq(irq); 327 disable_irq(irq);
258 328
259 return IRQ_HANDLED; 329 return IRQ_HANDLED;
260#endif 330}
331
332static int dmae_irq_init(void)
333{
334 int n;
335
336 for (n = 0; n < NR_DMAE; n++) {
337 int i = request_irq(get_dma_error_irq(n), dma_err,
338 IRQF_SHARED, dmae_name[n], NULL);
339 if (unlikely(i < 0)) {
340 printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
341 return i;
342 }
343 }
344
345 return 0;
346}
347
348static void dmae_irq_free(void)
349{
350 int n;
351
352 for (n = 0; n < NR_DMAE; n++)
353 free_irq(get_dma_error_irq(n), NULL);
354}
355#else
356static inline int dmae_irq_init(void)
357{
358 return 0;
359}
360
361static void dmae_irq_free(void)
362{
261} 363}
262#endif 364#endif
263 365
@@ -276,72 +378,34 @@ static struct dma_info sh_dmac_info = {
276 .flags = DMAC_CHANNELS_TEI_CAPABLE, 378 .flags = DMAC_CHANNELS_TEI_CAPABLE,
277}; 379};
278 380
279#ifdef CONFIG_CPU_SH4
280static unsigned int get_dma_error_irq(int n)
281{
282#if defined(CONFIG_SH_DMA_IRQ_MULTI)
283 return (n == 0) ? get_dmte_irq(0) : get_dmte_irq(6);
284#else
285 return (n == 0) ? DMAE0_IRQ :
286#if defined(DMAE1_IRQ)
287 DMAE1_IRQ;
288#else
289 -1;
290#endif
291#endif
292}
293#endif
294
295static int __init sh_dmac_init(void) 381static int __init sh_dmac_init(void)
296{ 382{
297 struct dma_info *info = &sh_dmac_info; 383 struct dma_info *info = &sh_dmac_info;
298 int i; 384 int i, rc;
299
300#ifdef CONFIG_CPU_SH4
301 int n;
302 385
303 for (n = 0; n < NR_DMAE; n++) { 386 /*
304 i = request_irq(get_dma_error_irq(n), dma_err, 387 * Initialize DMAE, for parts that support it.
305#if defined(CONFIG_SH_DMA_IRQ_MULTI) 388 */
306 IRQF_SHARED, 389 rc = dmae_irq_init();
307#else 390 if (unlikely(rc != 0))
308 0, 391 return rc;
309#endif
310 dmae_name[n], (void *)dmae_name[n]);
311 if (unlikely(i < 0)) {
312 printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
313 return i;
314 }
315 }
316#endif /* CONFIG_CPU_SH4 */
317 392
318 /* 393 /*
319 * Initialize DMAOR, and clean up any error flags that may have 394 * Initialize DMAOR, and clean up any error flags that may have
320 * been set. 395 * been set.
321 */ 396 */
322 i = dmaor_reset(0); 397 for (i = 0; i < NR_DMAOR; i++) {
323 if (unlikely(i != 0)) 398 rc = dmaor_reset(i);
324 return i; 399 if (unlikely(rc != 0))
325#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ 400 return rc;
326 defined(CONFIG_CPU_SUBTYPE_SH7780) || \ 401 }
327 defined(CONFIG_CPU_SUBTYPE_SH7785)
328 i = dmaor_reset(1);
329 if (unlikely(i != 0))
330 return i;
331#endif
332 402
333 return register_dmac(info); 403 return register_dmac(info);
334} 404}
335 405
336static void __exit sh_dmac_exit(void) 406static void __exit sh_dmac_exit(void)
337{ 407{
338#ifdef CONFIG_CPU_SH4 408 dmae_irq_free();
339 int n;
340
341 for (n = 0; n < NR_DMAE; n++) {
342 free_irq(get_dma_error_irq(n), (void *)dmae_name[n]);
343 }
344#endif /* CONFIG_CPU_SH4 */
345 unregister_dmac(&sh_dmac_info); 409 unregister_dmac(&sh_dmac_info);
346} 410}
347 411
diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h
deleted file mode 100644
index 9a4875a89636..000000000000
--- a/arch/sh/include/asm/dma-sh.h
+++ /dev/null
@@ -1,87 +0,0 @@
1/*
2 * arch/sh/include/asm/dma-sh.h
3 *
4 * Copyright (C) 2000 Takashi YOSHII
5 * Copyright (C) 2003 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#ifndef __DMA_SH_H
12#define __DMA_SH_H
13
14#include <asm/dma-register.h>
15#include <cpu/dma-register.h>
16#include <cpu/dma.h>
17
18/* DMAOR contorl: The DMAOR access size is different by CPU.*/
19#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \
20 defined(CONFIG_CPU_SUBTYPE_SH7724) || \
21 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
22 defined(CONFIG_CPU_SUBTYPE_SH7785)
23#define dmaor_read_reg(n) \
24 (n ? __raw_readw(SH_DMAC_BASE1 + DMAOR) \
25 : __raw_readw(SH_DMAC_BASE0 + DMAOR))
26#define dmaor_write_reg(n, data) \
27 (n ? __raw_writew(data, SH_DMAC_BASE1 + DMAOR) \
28 : __raw_writew(data, SH_DMAC_BASE0 + DMAOR))
29#else /* Other CPU */
30#define dmaor_read_reg(n) __raw_readw(SH_DMAC_BASE0 + DMAOR)
31#define dmaor_write_reg(n, data) __raw_writew(data, SH_DMAC_BASE0 + DMAOR)
32#endif
33
34static int dmte_irq_map[] __maybe_unused = {
35#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 4)
36 DMTE0_IRQ,
37 DMTE0_IRQ + 1,
38 DMTE0_IRQ + 2,
39 DMTE0_IRQ + 3,
40#endif
41#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 6)
42 DMTE4_IRQ,
43 DMTE4_IRQ + 1,
44#endif
45#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 8)
46 DMTE6_IRQ,
47 DMTE6_IRQ + 1,
48#endif
49#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 12)
50 DMTE8_IRQ,
51 DMTE9_IRQ,
52 DMTE10_IRQ,
53 DMTE11_IRQ,
54#endif
55};
56
57/*
58 * Define the default configuration for dual address memory-memory transfer.
59 * The 0x400 value represents auto-request, external->external.
60 */
61#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT))
62
63/* DMA base address */
64static u32 dma_base_addr[] __maybe_unused = {
65#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 4)
66 SH_DMAC_BASE0 + 0x00, /* channel 0 */
67 SH_DMAC_BASE0 + 0x10,
68 SH_DMAC_BASE0 + 0x20,
69 SH_DMAC_BASE0 + 0x30,
70#endif
71#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 6)
72 SH_DMAC_BASE0 + 0x50,
73 SH_DMAC_BASE0 + 0x60,
74#endif
75#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 8)
76 SH_DMAC_BASE1 + 0x00,
77 SH_DMAC_BASE1 + 0x10,
78#endif
79#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 12)
80 SH_DMAC_BASE1 + 0x20,
81 SH_DMAC_BASE1 + 0x30,
82 SH_DMAC_BASE1 + 0x50,
83 SH_DMAC_BASE1 + 0x60, /* channel 11 */
84#endif
85};
86
87#endif /* __DMA_SH_H */
diff --git a/arch/sh/include/cpu-sh4a/cpu/dma.h b/arch/sh/include/cpu-sh4a/cpu/dma.h
index c276313104c7..89afb650ce25 100644
--- a/arch/sh/include/cpu-sh4a/cpu/dma.h
+++ b/arch/sh/include/cpu-sh4a/cpu/dma.h
@@ -9,20 +9,17 @@
9#define DMTE4_IRQ evt2irq(0xb80) 9#define DMTE4_IRQ evt2irq(0xb80)
10#define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ 10#define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/
11#define SH_DMAC_BASE0 0xFE008020 11#define SH_DMAC_BASE0 0xFE008020
12#define SH_DMARS_BASE0 0xFE009000
13#elif defined(CONFIG_CPU_SUBTYPE_SH7722) 12#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
14#define DMTE0_IRQ evt2irq(0x800) 13#define DMTE0_IRQ evt2irq(0x800)
15#define DMTE4_IRQ evt2irq(0xb80) 14#define DMTE4_IRQ evt2irq(0xb80)
16#define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ 15#define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/
17#define SH_DMAC_BASE0 0xFE008020 16#define SH_DMAC_BASE0 0xFE008020
18#define SH_DMARS_BASE0 0xFE009000
19#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 17#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
20 defined(CONFIG_CPU_SUBTYPE_SH7764) 18 defined(CONFIG_CPU_SUBTYPE_SH7764)
21#define DMTE0_IRQ evt2irq(0x640) 19#define DMTE0_IRQ evt2irq(0x640)
22#define DMTE4_IRQ evt2irq(0x780) 20#define DMTE4_IRQ evt2irq(0x780)
23#define DMAE0_IRQ evt2irq(0x6c0) 21#define DMAE0_IRQ evt2irq(0x6c0)
24#define SH_DMAC_BASE0 0xFF608020 22#define SH_DMAC_BASE0 0xFF608020
25#define SH_DMARS_BASE0 0xFF609000
26#elif defined(CONFIG_CPU_SUBTYPE_SH7723) 23#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
27#define DMTE0_IRQ evt2irq(0x800) /* DMAC0A*/ 24#define DMTE0_IRQ evt2irq(0x800) /* DMAC0A*/
28#define DMTE4_IRQ evt2irq(0xb80) /* DMAC0B */ 25#define DMTE4_IRQ evt2irq(0xb80) /* DMAC0B */
@@ -35,7 +32,6 @@
35#define DMAE1_IRQ evt2irq(0xb40) /* DMA Error IRQ*/ 32#define DMAE1_IRQ evt2irq(0xb40) /* DMA Error IRQ*/
36#define SH_DMAC_BASE0 0xFE008020 33#define SH_DMAC_BASE0 0xFE008020
37#define SH_DMAC_BASE1 0xFDC08020 34#define SH_DMAC_BASE1 0xFDC08020
38#define SH_DMARS_BASE0 0xFDC09000
39#elif defined(CONFIG_CPU_SUBTYPE_SH7724) 35#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
40#define DMTE0_IRQ evt2irq(0x800) /* DMAC0A*/ 36#define DMTE0_IRQ evt2irq(0x800) /* DMAC0A*/
41#define DMTE4_IRQ evt2irq(0xb80) /* DMAC0B */ 37#define DMTE4_IRQ evt2irq(0xb80) /* DMAC0B */
@@ -48,8 +44,6 @@
48#define DMAE1_IRQ evt2irq(0xb40) /* DMA Error IRQ*/ 44#define DMAE1_IRQ evt2irq(0xb40) /* DMA Error IRQ*/
49#define SH_DMAC_BASE0 0xFE008020 45#define SH_DMAC_BASE0 0xFE008020
50#define SH_DMAC_BASE1 0xFDC08020 46#define SH_DMAC_BASE1 0xFDC08020
51#define SH_DMARS_BASE0 0xFE009000
52#define SH_DMARS_BASE1 0xFDC09000
53#elif defined(CONFIG_CPU_SUBTYPE_SH7780) 47#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
54#define DMTE0_IRQ evt2irq(0x640) 48#define DMTE0_IRQ evt2irq(0x640)
55#define DMTE4_IRQ evt2irq(0x780) 49#define DMTE4_IRQ evt2irq(0x780)
@@ -61,7 +55,6 @@
61#define DMAE0_IRQ evt2irq(0x6c0) /* DMA Error IRQ */ 55#define DMAE0_IRQ evt2irq(0x6c0) /* DMA Error IRQ */
62#define SH_DMAC_BASE0 0xFC808020 56#define SH_DMAC_BASE0 0xFC808020
63#define SH_DMAC_BASE1 0xFC818020 57#define SH_DMAC_BASE1 0xFC818020
64#define SH_DMARS_BASE0 0xFC809000
65#else /* SH7785 */ 58#else /* SH7785 */
66#define DMTE0_IRQ evt2irq(0x620) 59#define DMTE0_IRQ evt2irq(0x620)
67#define DMTE4_IRQ evt2irq(0x6a0) 60#define DMTE4_IRQ evt2irq(0x6a0)
@@ -74,7 +67,6 @@
74#define DMAE1_IRQ evt2irq(0x940) /* DMA Error IRQ1 */ 67#define DMAE1_IRQ evt2irq(0x940) /* DMA Error IRQ1 */
75#define SH_DMAC_BASE0 0xFC808020 68#define SH_DMAC_BASE0 0xFC808020
76#define SH_DMAC_BASE1 0xFCC08020 69#define SH_DMAC_BASE1 0xFCC08020
77#define SH_DMARS_BASE0 0xFC809000
78#endif 70#endif
79 71
80#endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */ 72#endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */