aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-02-21 16:42:50 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-02-21 16:42:50 -0500
commit22b61a11fd4e6d7a48d694ce350331bebc0394ed (patch)
treef4be46c8154f5094c248fcd9fdf644b236f591b3 /arch/arm
parent423145a5d4def58cff760809d48cfb21316d59a9 (diff)
parentfa4e998999322bc1b11d2c8b19b9fa2016fd1548 (diff)
Merge branch 'dma' into devel
Conflicts: arch/arm/plat-mxc/dma-mx1-mx2.c
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/dma.h46
-rw-r--r--arch/arm/include/asm/mach/dma.h35
-rw-r--r--arch/arm/kernel/dma-isa.c67
-rw-r--r--arch/arm/kernel/dma.c119
-rw-r--r--arch/arm/mach-footbridge/dma.c12
-rw-r--r--arch/arm/mach-rpc/dma.c213
-rw-r--r--arch/arm/mach-rpc/include/mach/isa-dma.h2
-rw-r--r--arch/arm/mach-shark/dma.c6
-rw-r--r--arch/arm/plat-mxc/dma-mx1-mx2.c2
9 files changed, 283 insertions, 219 deletions
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index df5638f3643a..7edf3536df24 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -19,21 +19,17 @@
19#include <asm/system.h> 19#include <asm/system.h>
20#include <asm/scatterlist.h> 20#include <asm/scatterlist.h>
21 21
22typedef unsigned int dmach_t;
23
24#include <mach/isa-dma.h> 22#include <mach/isa-dma.h>
25 23
26/* 24/*
27 * DMA modes 25 * The DMA modes reflect the settings for the ISA DMA controller
28 */ 26 */
29typedef unsigned int dmamode_t; 27#define DMA_MODE_MASK 0xcc
30
31#define DMA_MODE_MASK 3
32 28
33#define DMA_MODE_READ 0 29#define DMA_MODE_READ 0x44
34#define DMA_MODE_WRITE 1 30#define DMA_MODE_WRITE 0x48
35#define DMA_MODE_CASCADE 2 31#define DMA_MODE_CASCADE 0xc0
36#define DMA_AUTOINIT 4 32#define DMA_AUTOINIT 0x10
37 33
38extern spinlock_t dma_spin_lock; 34extern spinlock_t dma_spin_lock;
39 35
@@ -52,44 +48,44 @@ static inline void release_dma_lock(unsigned long flags)
52/* Clear the 'DMA Pointer Flip Flop'. 48/* Clear the 'DMA Pointer Flip Flop'.
53 * Write 0 for LSB/MSB, 1 for MSB/LSB access. 49 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
54 */ 50 */
55#define clear_dma_ff(channel) 51#define clear_dma_ff(chan)
56 52
57/* Set only the page register bits of the transfer address. 53/* Set only the page register bits of the transfer address.
58 * 54 *
59 * NOTE: This is an architecture specific function, and should 55 * NOTE: This is an architecture specific function, and should
60 * be hidden from the drivers 56 * be hidden from the drivers
61 */ 57 */
62extern void set_dma_page(dmach_t channel, char pagenr); 58extern void set_dma_page(unsigned int chan, char pagenr);
63 59
64/* Request a DMA channel 60/* Request a DMA channel
65 * 61 *
66 * Some architectures may need to do allocate an interrupt 62 * Some architectures may need to do allocate an interrupt
67 */ 63 */
68extern int request_dma(dmach_t channel, const char * device_id); 64extern int request_dma(unsigned int chan, const char * device_id);
69 65
70/* Free a DMA channel 66/* Free a DMA channel
71 * 67 *
72 * Some architectures may need to do free an interrupt 68 * Some architectures may need to do free an interrupt
73 */ 69 */
74extern void free_dma(dmach_t channel); 70extern void free_dma(unsigned int chan);
75 71
76/* Enable DMA for this channel 72/* Enable DMA for this channel
77 * 73 *
78 * On some architectures, this may have other side effects like 74 * On some architectures, this may have other side effects like
79 * enabling an interrupt and setting the DMA registers. 75 * enabling an interrupt and setting the DMA registers.
80 */ 76 */
81extern void enable_dma(dmach_t channel); 77extern void enable_dma(unsigned int chan);
82 78
83/* Disable DMA for this channel 79/* Disable DMA for this channel
84 * 80 *
85 * On some architectures, this may have other side effects like 81 * On some architectures, this may have other side effects like
86 * disabling an interrupt or whatever. 82 * disabling an interrupt or whatever.
87 */ 83 */
88extern void disable_dma(dmach_t channel); 84extern void disable_dma(unsigned int chan);
89 85
90/* Test whether the specified channel has an active DMA transfer 86/* Test whether the specified channel has an active DMA transfer
91 */ 87 */
92extern int dma_channel_active(dmach_t channel); 88extern int dma_channel_active(unsigned int chan);
93 89
94/* Set the DMA scatter gather list for this channel 90/* Set the DMA scatter gather list for this channel
95 * 91 *
@@ -97,7 +93,7 @@ extern int dma_channel_active(dmach_t channel);
97 * especially since some DMA architectures don't update the 93 * especially since some DMA architectures don't update the
98 * DMA address immediately, but defer it to the enable_dma(). 94 * DMA address immediately, but defer it to the enable_dma().
99 */ 95 */
100extern void set_dma_sg(dmach_t channel, struct scatterlist *sg, int nr_sg); 96extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);
101 97
102/* Set the DMA address for this channel 98/* Set the DMA address for this channel
103 * 99 *
@@ -105,9 +101,9 @@ extern void set_dma_sg(dmach_t channel, struct scatterlist *sg, int nr_sg);
105 * especially since some DMA architectures don't update the 101 * especially since some DMA architectures don't update the
106 * DMA address immediately, but defer it to the enable_dma(). 102 * DMA address immediately, but defer it to the enable_dma().
107 */ 103 */
108extern void __set_dma_addr(dmach_t channel, void *addr); 104extern void __set_dma_addr(unsigned int chan, void *addr);
109#define set_dma_addr(channel, addr) \ 105#define set_dma_addr(chan, addr) \
110 __set_dma_addr(channel, bus_to_virt(addr)) 106 __set_dma_addr(chan, bus_to_virt(addr))
111 107
112/* Set the DMA byte count for this channel 108/* Set the DMA byte count for this channel
113 * 109 *
@@ -115,7 +111,7 @@ extern void __set_dma_addr(dmach_t channel, void *addr);
115 * especially since some DMA architectures don't update the 111 * especially since some DMA architectures don't update the
116 * DMA count immediately, but defer it to the enable_dma(). 112 * DMA count immediately, but defer it to the enable_dma().
117 */ 113 */
118extern void set_dma_count(dmach_t channel, unsigned long count); 114extern void set_dma_count(unsigned int chan, unsigned long count);
119 115
120/* Set the transfer direction for this channel 116/* Set the transfer direction for this channel
121 * 117 *
@@ -124,11 +120,11 @@ extern void set_dma_count(dmach_t channel, unsigned long count);
124 * DMA transfer direction immediately, but defer it to the 120 * DMA transfer direction immediately, but defer it to the
125 * enable_dma(). 121 * enable_dma().
126 */ 122 */
127extern void set_dma_mode(dmach_t channel, dmamode_t mode); 123extern void set_dma_mode(unsigned int chan, unsigned int mode);
128 124
129/* Set the transfer speed for this channel 125/* Set the transfer speed for this channel
130 */ 126 */
131extern void set_dma_speed(dmach_t channel, int cycle_ns); 127extern void set_dma_speed(unsigned int chan, int cycle_ns);
132 128
133/* Get DMA residue count. After a DMA transfer, this 129/* Get DMA residue count. After a DMA transfer, this
134 * should return zero. Reading this while a DMA transfer is 130 * should return zero. Reading this while a DMA transfer is
@@ -136,7 +132,7 @@ extern void set_dma_speed(dmach_t channel, int cycle_ns);
136 * If called before the channel has been used, it may return 1. 132 * If called before the channel has been used, it may return 1.
137 * Otherwise, it returns the number of _bytes_ left to transfer. 133 * Otherwise, it returns the number of _bytes_ left to transfer.
138 */ 134 */
139extern int get_dma_residue(dmach_t channel); 135extern int get_dma_residue(unsigned int chan);
140 136
141#ifndef NO_DMA 137#ifndef NO_DMA
142#define NO_DMA 255 138#define NO_DMA 255
diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
index fc7278ea7146..9e614a18e680 100644
--- a/arch/arm/include/asm/mach/dma.h
+++ b/arch/arm/include/asm/mach/dma.h
@@ -15,13 +15,13 @@ struct dma_struct;
15typedef struct dma_struct dma_t; 15typedef struct dma_struct dma_t;
16 16
17struct dma_ops { 17struct dma_ops {
18 int (*request)(dmach_t, dma_t *); /* optional */ 18 int (*request)(unsigned int, dma_t *); /* optional */
19 void (*free)(dmach_t, dma_t *); /* optional */ 19 void (*free)(unsigned int, dma_t *); /* optional */
20 void (*enable)(dmach_t, dma_t *); /* mandatory */ 20 void (*enable)(unsigned int, dma_t *); /* mandatory */
21 void (*disable)(dmach_t, dma_t *); /* mandatory */ 21 void (*disable)(unsigned int, dma_t *); /* mandatory */
22 int (*residue)(dmach_t, dma_t *); /* optional */ 22 int (*residue)(unsigned int, dma_t *); /* optional */
23 int (*setspeed)(dmach_t, dma_t *, int); /* optional */ 23 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
24 char *type; 24 const char *type;
25}; 25};
26 26
27struct dma_struct { 27struct dma_struct {
@@ -34,24 +34,21 @@ struct dma_struct {
34 unsigned int active:1; /* Transfer active */ 34 unsigned int active:1; /* Transfer active */
35 unsigned int invalid:1; /* Address/Count changed */ 35 unsigned int invalid:1; /* Address/Count changed */
36 36
37 dmamode_t dma_mode; /* DMA mode */ 37 unsigned int dma_mode; /* DMA mode */
38 int speed; /* DMA speed */ 38 int speed; /* DMA speed */
39 39
40 unsigned int lock; /* Device is allocated */ 40 unsigned int lock; /* Device is allocated */
41 const char *device_id; /* Device name */ 41 const char *device_id; /* Device name */
42 42
43 unsigned int dma_base; /* Controller base address */ 43 const struct dma_ops *d_ops;
44 int dma_irq; /* Controller IRQ */
45 struct scatterlist cur_sg; /* Current controller buffer */
46 unsigned int state;
47
48 struct dma_ops *d_ops;
49}; 44};
50 45
51/* Prototype: void arch_dma_init(dma) 46/*
52 * Purpose : Initialise architecture specific DMA 47 * isa_dma_add - add an ISA-style DMA channel
53 * Params : dma - pointer to array of DMA structures
54 */ 48 */
55extern void arch_dma_init(dma_t *dma); 49extern int isa_dma_add(unsigned int, dma_t *dma);
56 50
57extern void isa_init_dma(dma_t *dma); 51/*
52 * Add the ISA DMA controller. Always takes channels 0-7.
53 */
54extern void isa_init_dma(void);
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c
index 4a3a50495c60..0e88e46fc732 100644
--- a/arch/arm/kernel/dma-isa.c
+++ b/arch/arm/kernel/dma-isa.c
@@ -24,11 +24,6 @@
24#include <asm/dma.h> 24#include <asm/dma.h>
25#include <asm/mach/dma.h> 25#include <asm/mach/dma.h>
26 26
27#define ISA_DMA_MODE_READ 0x44
28#define ISA_DMA_MODE_WRITE 0x48
29#define ISA_DMA_MODE_CASCADE 0xc0
30#define ISA_DMA_AUTOINIT 0x10
31
32#define ISA_DMA_MASK 0 27#define ISA_DMA_MASK 0
33#define ISA_DMA_MODE 1 28#define ISA_DMA_MODE 1
34#define ISA_DMA_CLRFF 2 29#define ISA_DMA_CLRFF 2
@@ -49,38 +44,35 @@ static unsigned int isa_dma_port[8][7] = {
49 { 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce } 44 { 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce }
50}; 45};
51 46
52static int isa_get_dma_residue(dmach_t channel, dma_t *dma) 47static int isa_get_dma_residue(unsigned int chan, dma_t *dma)
53{ 48{
54 unsigned int io_port = isa_dma_port[channel][ISA_DMA_COUNT]; 49 unsigned int io_port = isa_dma_port[chan][ISA_DMA_COUNT];
55 int count; 50 int count;
56 51
57 count = 1 + inb(io_port); 52 count = 1 + inb(io_port);
58 count |= inb(io_port) << 8; 53 count |= inb(io_port) << 8;
59 54
60 return channel < 4 ? count : (count << 1); 55 return chan < 4 ? count : (count << 1);
61} 56}
62 57
63static void isa_enable_dma(dmach_t channel, dma_t *dma) 58static void isa_enable_dma(unsigned int chan, dma_t *dma)
64{ 59{
65 if (dma->invalid) { 60 if (dma->invalid) {
66 unsigned long address, length; 61 unsigned long address, length;
67 unsigned int mode; 62 unsigned int mode;
68 enum dma_data_direction direction; 63 enum dma_data_direction direction;
69 64
70 mode = channel & 3; 65 mode = (chan & 3) | dma->dma_mode;
71 switch (dma->dma_mode & DMA_MODE_MASK) { 66 switch (dma->dma_mode & DMA_MODE_MASK) {
72 case DMA_MODE_READ: 67 case DMA_MODE_READ:
73 mode |= ISA_DMA_MODE_READ;
74 direction = DMA_FROM_DEVICE; 68 direction = DMA_FROM_DEVICE;
75 break; 69 break;
76 70
77 case DMA_MODE_WRITE: 71 case DMA_MODE_WRITE:
78 mode |= ISA_DMA_MODE_WRITE;
79 direction = DMA_TO_DEVICE; 72 direction = DMA_TO_DEVICE;
80 break; 73 break;
81 74
82 case DMA_MODE_CASCADE: 75 case DMA_MODE_CASCADE:
83 mode |= ISA_DMA_MODE_CASCADE;
84 direction = DMA_BIDIRECTIONAL; 76 direction = DMA_BIDIRECTIONAL;
85 break; 77 break;
86 78
@@ -105,34 +97,31 @@ static void isa_enable_dma(dmach_t channel, dma_t *dma)
105 address = dma->buf.dma_address; 97 address = dma->buf.dma_address;
106 length = dma->buf.length - 1; 98 length = dma->buf.length - 1;
107 99
108 outb(address >> 16, isa_dma_port[channel][ISA_DMA_PGLO]); 100 outb(address >> 16, isa_dma_port[chan][ISA_DMA_PGLO]);
109 outb(address >> 24, isa_dma_port[channel][ISA_DMA_PGHI]); 101 outb(address >> 24, isa_dma_port[chan][ISA_DMA_PGHI]);
110 102
111 if (channel >= 4) { 103 if (chan >= 4) {
112 address >>= 1; 104 address >>= 1;
113 length >>= 1; 105 length >>= 1;
114 } 106 }
115 107
116 outb(0, isa_dma_port[channel][ISA_DMA_CLRFF]); 108 outb(0, isa_dma_port[chan][ISA_DMA_CLRFF]);
117
118 outb(address, isa_dma_port[channel][ISA_DMA_ADDR]);
119 outb(address >> 8, isa_dma_port[channel][ISA_DMA_ADDR]);
120 109
121 outb(length, isa_dma_port[channel][ISA_DMA_COUNT]); 110 outb(address, isa_dma_port[chan][ISA_DMA_ADDR]);
122 outb(length >> 8, isa_dma_port[channel][ISA_DMA_COUNT]); 111 outb(address >> 8, isa_dma_port[chan][ISA_DMA_ADDR]);
123 112
124 if (dma->dma_mode & DMA_AUTOINIT) 113 outb(length, isa_dma_port[chan][ISA_DMA_COUNT]);
125 mode |= ISA_DMA_AUTOINIT; 114 outb(length >> 8, isa_dma_port[chan][ISA_DMA_COUNT]);
126 115
127 outb(mode, isa_dma_port[channel][ISA_DMA_MODE]); 116 outb(mode, isa_dma_port[chan][ISA_DMA_MODE]);
128 dma->invalid = 0; 117 dma->invalid = 0;
129 } 118 }
130 outb(channel & 3, isa_dma_port[channel][ISA_DMA_MASK]); 119 outb(chan & 3, isa_dma_port[chan][ISA_DMA_MASK]);
131} 120}
132 121
133static void isa_disable_dma(dmach_t channel, dma_t *dma) 122static void isa_disable_dma(unsigned int chan, dma_t *dma)
134{ 123{
135 outb(channel | 4, isa_dma_port[channel][ISA_DMA_MASK]); 124 outb(chan | 4, isa_dma_port[chan][ISA_DMA_MASK]);
136} 125}
137 126
138static struct dma_ops isa_dma_ops = { 127static struct dma_ops isa_dma_ops = {
@@ -160,7 +149,12 @@ static struct resource dma_resources[] = { {
160 .end = 0x048f 149 .end = 0x048f
161} }; 150} };
162 151
163void __init isa_init_dma(dma_t *dma) 152static dma_t isa_dma[8];
153
154/*
155 * ISA DMA always starts at channel 0
156 */
157void __init isa_init_dma(void)
164{ 158{
165 /* 159 /*
166 * Try to autodetect presence of an ISA DMA controller. 160 * Try to autodetect presence of an ISA DMA controller.
@@ -178,11 +172,11 @@ void __init isa_init_dma(dma_t *dma)
178 outb(0xaa, 0x00); 172 outb(0xaa, 0x00);
179 173
180 if (inb(0) == 0x55 && inb(0) == 0xaa) { 174 if (inb(0) == 0x55 && inb(0) == 0xaa) {
181 int channel, i; 175 unsigned int chan, i;
182 176
183 for (channel = 0; channel < 8; channel++) { 177 for (chan = 0; chan < 8; chan++) {
184 dma[channel].d_ops = &isa_dma_ops; 178 isa_dma[chan].d_ops = &isa_dma_ops;
185 isa_disable_dma(channel, NULL); 179 isa_disable_dma(chan, NULL);
186 } 180 }
187 181
188 outb(0x40, 0x0b); 182 outb(0x40, 0x0b);
@@ -217,5 +211,12 @@ void __init isa_init_dma(dma_t *dma)
217 211
218 for (i = 0; i < ARRAY_SIZE(dma_resources); i++) 212 for (i = 0; i < ARRAY_SIZE(dma_resources); i++)
219 request_resource(&ioport_resource, dma_resources + i); 213 request_resource(&ioport_resource, dma_resources + i);
214
215 for (chan = 0; chan < 8; chan++) {
216 int ret = isa_dma_add(chan, &isa_dma[chan]);
217 if (ret)
218 printk(KERN_ERR "ISADMA%u: unable to register: %d\n",
219 chan, ret);
220 }
220 } 221 }
221} 222}
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index d006085ed7e7..7d5b9fb01e71 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/scatterlist.h>
18 19
19#include <asm/dma.h> 20#include <asm/dma.h>
20 21
@@ -23,19 +24,40 @@
23DEFINE_SPINLOCK(dma_spin_lock); 24DEFINE_SPINLOCK(dma_spin_lock);
24EXPORT_SYMBOL(dma_spin_lock); 25EXPORT_SYMBOL(dma_spin_lock);
25 26
26static dma_t dma_chan[MAX_DMA_CHANNELS]; 27static dma_t *dma_chan[MAX_DMA_CHANNELS];
28
29static inline dma_t *dma_channel(unsigned int chan)
30{
31 if (chan >= MAX_DMA_CHANNELS)
32 return NULL;
33
34 return dma_chan[chan];
35}
36
37int __init isa_dma_add(unsigned int chan, dma_t *dma)
38{
39 if (!dma->d_ops)
40 return -EINVAL;
41
42 sg_init_table(&dma->buf, 1);
43
44 if (dma_chan[chan])
45 return -EBUSY;
46 dma_chan[chan] = dma;
47 return 0;
48}
27 49
28/* 50/*
29 * Request DMA channel 51 * Request DMA channel
30 * 52 *
31 * On certain platforms, we have to allocate an interrupt as well... 53 * On certain platforms, we have to allocate an interrupt as well...
32 */ 54 */
33int request_dma(dmach_t channel, const char *device_id) 55int request_dma(unsigned int chan, const char *device_id)
34{ 56{
35 dma_t *dma = dma_chan + channel; 57 dma_t *dma = dma_channel(chan);
36 int ret; 58 int ret;
37 59
38 if (channel >= MAX_DMA_CHANNELS || !dma->d_ops) 60 if (!dma)
39 goto bad_dma; 61 goto bad_dma;
40 62
41 if (xchg(&dma->lock, 1) != 0) 63 if (xchg(&dma->lock, 1) != 0)
@@ -47,7 +69,7 @@ int request_dma(dmach_t channel, const char *device_id)
47 69
48 ret = 0; 70 ret = 0;
49 if (dma->d_ops->request) 71 if (dma->d_ops->request)
50 ret = dma->d_ops->request(channel, dma); 72 ret = dma->d_ops->request(chan, dma);
51 73
52 if (ret) 74 if (ret)
53 xchg(&dma->lock, 0); 75 xchg(&dma->lock, 0);
@@ -55,7 +77,7 @@ int request_dma(dmach_t channel, const char *device_id)
55 return ret; 77 return ret;
56 78
57bad_dma: 79bad_dma:
58 printk(KERN_ERR "dma: trying to allocate DMA%d\n", channel); 80 printk(KERN_ERR "dma: trying to allocate DMA%d\n", chan);
59 return -EINVAL; 81 return -EINVAL;
60 82
61busy: 83busy:
@@ -68,42 +90,42 @@ EXPORT_SYMBOL(request_dma);
68 * 90 *
69 * On certain platforms, we have to free interrupt as well... 91 * On certain platforms, we have to free interrupt as well...
70 */ 92 */
71void free_dma(dmach_t channel) 93void free_dma(unsigned int chan)
72{ 94{
73 dma_t *dma = dma_chan + channel; 95 dma_t *dma = dma_channel(chan);
74 96
75 if (channel >= MAX_DMA_CHANNELS || !dma->d_ops) 97 if (!dma)
76 goto bad_dma; 98 goto bad_dma;
77 99
78 if (dma->active) { 100 if (dma->active) {
79 printk(KERN_ERR "dma%d: freeing active DMA\n", channel); 101 printk(KERN_ERR "dma%d: freeing active DMA\n", chan);
80 dma->d_ops->disable(channel, dma); 102 dma->d_ops->disable(chan, dma);
81 dma->active = 0; 103 dma->active = 0;
82 } 104 }
83 105
84 if (xchg(&dma->lock, 0) != 0) { 106 if (xchg(&dma->lock, 0) != 0) {
85 if (dma->d_ops->free) 107 if (dma->d_ops->free)
86 dma->d_ops->free(channel, dma); 108 dma->d_ops->free(chan, dma);
87 return; 109 return;
88 } 110 }
89 111
90 printk(KERN_ERR "dma%d: trying to free free DMA\n", channel); 112 printk(KERN_ERR "dma%d: trying to free free DMA\n", chan);
91 return; 113 return;
92 114
93bad_dma: 115bad_dma:
94 printk(KERN_ERR "dma: trying to free DMA%d\n", channel); 116 printk(KERN_ERR "dma: trying to free DMA%d\n", chan);
95} 117}
96EXPORT_SYMBOL(free_dma); 118EXPORT_SYMBOL(free_dma);
97 119
98/* Set DMA Scatter-Gather list 120/* Set DMA Scatter-Gather list
99 */ 121 */
100void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg) 122void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg)
101{ 123{
102 dma_t *dma = dma_chan + channel; 124 dma_t *dma = dma_channel(chan);
103 125
104 if (dma->active) 126 if (dma->active)
105 printk(KERN_ERR "dma%d: altering DMA SG while " 127 printk(KERN_ERR "dma%d: altering DMA SG while "
106 "DMA active\n", channel); 128 "DMA active\n", chan);
107 129
108 dma->sg = sg; 130 dma->sg = sg;
109 dma->sgcount = nr_sg; 131 dma->sgcount = nr_sg;
@@ -115,13 +137,13 @@ EXPORT_SYMBOL(set_dma_sg);
115 * 137 *
116 * Copy address to the structure, and set the invalid bit 138 * Copy address to the structure, and set the invalid bit
117 */ 139 */
118void __set_dma_addr (dmach_t channel, void *addr) 140void __set_dma_addr (unsigned int chan, void *addr)
119{ 141{
120 dma_t *dma = dma_chan + channel; 142 dma_t *dma = dma_channel(chan);
121 143
122 if (dma->active) 144 if (dma->active)
123 printk(KERN_ERR "dma%d: altering DMA address while " 145 printk(KERN_ERR "dma%d: altering DMA address while "
124 "DMA active\n", channel); 146 "DMA active\n", chan);
125 147
126 dma->sg = NULL; 148 dma->sg = NULL;
127 dma->addr = addr; 149 dma->addr = addr;
@@ -133,13 +155,13 @@ EXPORT_SYMBOL(__set_dma_addr);
133 * 155 *
134 * Copy address to the structure, and set the invalid bit 156 * Copy address to the structure, and set the invalid bit
135 */ 157 */
136void set_dma_count (dmach_t channel, unsigned long count) 158void set_dma_count (unsigned int chan, unsigned long count)
137{ 159{
138 dma_t *dma = dma_chan + channel; 160 dma_t *dma = dma_channel(chan);
139 161
140 if (dma->active) 162 if (dma->active)
141 printk(KERN_ERR "dma%d: altering DMA count while " 163 printk(KERN_ERR "dma%d: altering DMA count while "
142 "DMA active\n", channel); 164 "DMA active\n", chan);
143 165
144 dma->sg = NULL; 166 dma->sg = NULL;
145 dma->count = count; 167 dma->count = count;
@@ -149,13 +171,13 @@ EXPORT_SYMBOL(set_dma_count);
149 171
150/* Set DMA direction mode 172/* Set DMA direction mode
151 */ 173 */
152void set_dma_mode (dmach_t channel, dmamode_t mode) 174void set_dma_mode (unsigned int chan, unsigned int mode)
153{ 175{
154 dma_t *dma = dma_chan + channel; 176 dma_t *dma = dma_channel(chan);
155 177
156 if (dma->active) 178 if (dma->active)
157 printk(KERN_ERR "dma%d: altering DMA mode while " 179 printk(KERN_ERR "dma%d: altering DMA mode while "
158 "DMA active\n", channel); 180 "DMA active\n", chan);
159 181
160 dma->dma_mode = mode; 182 dma->dma_mode = mode;
161 dma->invalid = 1; 183 dma->invalid = 1;
@@ -164,42 +186,42 @@ EXPORT_SYMBOL(set_dma_mode);
164 186
165/* Enable DMA channel 187/* Enable DMA channel
166 */ 188 */
167void enable_dma (dmach_t channel) 189void enable_dma (unsigned int chan)
168{ 190{
169 dma_t *dma = dma_chan + channel; 191 dma_t *dma = dma_channel(chan);
170 192
171 if (!dma->lock) 193 if (!dma->lock)
172 goto free_dma; 194 goto free_dma;
173 195
174 if (dma->active == 0) { 196 if (dma->active == 0) {
175 dma->active = 1; 197 dma->active = 1;
176 dma->d_ops->enable(channel, dma); 198 dma->d_ops->enable(chan, dma);
177 } 199 }
178 return; 200 return;
179 201
180free_dma: 202free_dma:
181 printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel); 203 printk(KERN_ERR "dma%d: trying to enable free DMA\n", chan);
182 BUG(); 204 BUG();
183} 205}
184EXPORT_SYMBOL(enable_dma); 206EXPORT_SYMBOL(enable_dma);
185 207
186/* Disable DMA channel 208/* Disable DMA channel
187 */ 209 */
188void disable_dma (dmach_t channel) 210void disable_dma (unsigned int chan)
189{ 211{
190 dma_t *dma = dma_chan + channel; 212 dma_t *dma = dma_channel(chan);
191 213
192 if (!dma->lock) 214 if (!dma->lock)
193 goto free_dma; 215 goto free_dma;
194 216
195 if (dma->active == 1) { 217 if (dma->active == 1) {
196 dma->active = 0; 218 dma->active = 0;
197 dma->d_ops->disable(channel, dma); 219 dma->d_ops->disable(chan, dma);
198 } 220 }
199 return; 221 return;
200 222
201free_dma: 223free_dma:
202 printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel); 224 printk(KERN_ERR "dma%d: trying to disable free DMA\n", chan);
203 BUG(); 225 BUG();
204} 226}
205EXPORT_SYMBOL(disable_dma); 227EXPORT_SYMBOL(disable_dma);
@@ -207,45 +229,38 @@ EXPORT_SYMBOL(disable_dma);
207/* 229/*
208 * Is the specified DMA channel active? 230 * Is the specified DMA channel active?
209 */ 231 */
210int dma_channel_active(dmach_t channel) 232int dma_channel_active(unsigned int chan)
211{ 233{
212 return dma_chan[channel].active; 234 dma_t *dma = dma_channel(chan);
235 return dma->active;
213} 236}
214EXPORT_SYMBOL(dma_channel_active); 237EXPORT_SYMBOL(dma_channel_active);
215 238
216void set_dma_page(dmach_t channel, char pagenr) 239void set_dma_page(unsigned int chan, char pagenr)
217{ 240{
218 printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel); 241 printk(KERN_ERR "dma%d: trying to set_dma_page\n", chan);
219} 242}
220EXPORT_SYMBOL(set_dma_page); 243EXPORT_SYMBOL(set_dma_page);
221 244
222void set_dma_speed(dmach_t channel, int cycle_ns) 245void set_dma_speed(unsigned int chan, int cycle_ns)
223{ 246{
224 dma_t *dma = dma_chan + channel; 247 dma_t *dma = dma_channel(chan);
225 int ret = 0; 248 int ret = 0;
226 249
227 if (dma->d_ops->setspeed) 250 if (dma->d_ops->setspeed)
228 ret = dma->d_ops->setspeed(channel, dma, cycle_ns); 251 ret = dma->d_ops->setspeed(chan, dma, cycle_ns);
229 dma->speed = ret; 252 dma->speed = ret;
230} 253}
231EXPORT_SYMBOL(set_dma_speed); 254EXPORT_SYMBOL(set_dma_speed);
232 255
233int get_dma_residue(dmach_t channel) 256int get_dma_residue(unsigned int chan)
234{ 257{
235 dma_t *dma = dma_chan + channel; 258 dma_t *dma = dma_channel(chan);
236 int ret = 0; 259 int ret = 0;
237 260
238 if (dma->d_ops->residue) 261 if (dma->d_ops->residue)
239 ret = dma->d_ops->residue(channel, dma); 262 ret = dma->d_ops->residue(chan, dma);
240 263
241 return ret; 264 return ret;
242} 265}
243EXPORT_SYMBOL(get_dma_residue); 266EXPORT_SYMBOL(get_dma_residue);
244
245static int __init init_dma(void)
246{
247 arch_dma_init(dma_chan);
248 return 0;
249}
250
251core_initcall(init_dma);
diff --git a/arch/arm/mach-footbridge/dma.c b/arch/arm/mach-footbridge/dma.c
index 4f3506346969..e2e0df8bcee2 100644
--- a/arch/arm/mach-footbridge/dma.c
+++ b/arch/arm/mach-footbridge/dma.c
@@ -21,16 +21,16 @@
21#include <asm/hardware/dec21285.h> 21#include <asm/hardware/dec21285.h>
22 22
23#if 0 23#if 0
24static int fb_dma_request(dmach_t channel, dma_t *dma) 24static int fb_dma_request(unsigned int chan, dma_t *dma)
25{ 25{
26 return -EINVAL; 26 return -EINVAL;
27} 27}
28 28
29static void fb_dma_enable(dmach_t channel, dma_t *dma) 29static void fb_dma_enable(unsigned int chan, dma_t *dma)
30{ 30{
31} 31}
32 32
33static void fb_dma_disable(dmach_t channel, dma_t *dma) 33static void fb_dma_disable(unsigned int chan, dma_t *dma)
34{ 34{
35} 35}
36 36
@@ -42,7 +42,7 @@ static struct dma_ops fb_dma_ops = {
42}; 42};
43#endif 43#endif
44 44
45void __init arch_dma_init(dma_t *dma) 45static int __init fb_dma_init(void)
46{ 46{
47#if 0 47#if 0
48 dma[_DC21285_DMA(0)].d_ops = &fb_dma_ops; 48 dma[_DC21285_DMA(0)].d_ops = &fb_dma_ops;
@@ -50,6 +50,8 @@ void __init arch_dma_init(dma_t *dma)
50#endif 50#endif
51#ifdef CONFIG_ISA_DMA 51#ifdef CONFIG_ISA_DMA
52 if (footbridge_cfn_mode()) 52 if (footbridge_cfn_mode())
53 isa_init_dma(dma + _ISA_DMA(0)); 53 isa_init_dma();
54#endif 54#endif
55 return 0;
55} 56}
57core_initcall(fb_dma_init);
diff --git a/arch/arm/mach-rpc/dma.c b/arch/arm/mach-rpc/dma.c
index 7958a30f8932..c47d974d52bd 100644
--- a/arch/arm/mach-rpc/dma.c
+++ b/arch/arm/mach-rpc/dma.c
@@ -26,6 +26,16 @@
26#include <asm/mach/dma.h> 26#include <asm/mach/dma.h>
27#include <asm/hardware/iomd.h> 27#include <asm/hardware/iomd.h>
28 28
29struct iomd_dma {
30 struct dma_struct dma;
31 unsigned int state;
32 unsigned long base; /* Controller base address */
33 int irq; /* Controller IRQ */
34 struct scatterlist cur_sg; /* Current controller buffer */
35 dma_addr_t dma_addr;
36 unsigned int dma_len;
37};
38
29#if 0 39#if 0
30typedef enum { 40typedef enum {
31 dma_size_8 = 1, 41 dma_size_8 = 1,
@@ -44,15 +54,15 @@ typedef enum {
44#define CR (IOMD_IO0CR - IOMD_IO0CURA) 54#define CR (IOMD_IO0CR - IOMD_IO0CURA)
45#define ST (IOMD_IO0ST - IOMD_IO0CURA) 55#define ST (IOMD_IO0ST - IOMD_IO0CURA)
46 56
47static void iomd_get_next_sg(struct scatterlist *sg, dma_t *dma) 57static void iomd_get_next_sg(struct scatterlist *sg, struct iomd_dma *idma)
48{ 58{
49 unsigned long end, offset, flags = 0; 59 unsigned long end, offset, flags = 0;
50 60
51 if (dma->sg) { 61 if (idma->dma.sg) {
52 sg->dma_address = dma->sg->dma_address; 62 sg->dma_address = idma->dma_addr;
53 offset = sg->dma_address & ~PAGE_MASK; 63 offset = sg->dma_address & ~PAGE_MASK;
54 64
55 end = offset + dma->sg->length; 65 end = offset + idma->dma_len;
56 66
57 if (end > PAGE_SIZE) 67 if (end > PAGE_SIZE)
58 end = PAGE_SIZE; 68 end = PAGE_SIZE;
@@ -62,15 +72,17 @@ static void iomd_get_next_sg(struct scatterlist *sg, dma_t *dma)
62 72
63 sg->length = end - TRANSFER_SIZE; 73 sg->length = end - TRANSFER_SIZE;
64 74
65 dma->sg->length -= end - offset; 75 idma->dma_len -= end - offset;
66 dma->sg->dma_address += end - offset; 76 idma->dma_addr += end - offset;
67 77
68 if (dma->sg->length == 0) { 78 if (idma->dma_len == 0) {
69 if (dma->sgcount > 1) { 79 if (idma->dma.sgcount > 1) {
70 dma->sg++; 80 idma->dma.sg = sg_next(idma->dma.sg);
71 dma->sgcount--; 81 idma->dma_addr = idma->dma.sg->dma_address;
82 idma->dma_len = idma->dma.sg->length;
83 idma->dma.sgcount--;
72 } else { 84 } else {
73 dma->sg = NULL; 85 idma->dma.sg = NULL;
74 flags |= DMA_END_S; 86 flags |= DMA_END_S;
75 } 87 }
76 } 88 }
@@ -85,8 +97,8 @@ static void iomd_get_next_sg(struct scatterlist *sg, dma_t *dma)
85 97
86static irqreturn_t iomd_dma_handle(int irq, void *dev_id) 98static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
87{ 99{
88 dma_t *dma = (dma_t *)dev_id; 100 struct iomd_dma *idma = dev_id;
89 unsigned long base = dma->dma_base; 101 unsigned long base = idma->base;
90 102
91 do { 103 do {
92 unsigned int status; 104 unsigned int status;
@@ -95,93 +107,99 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
95 if (!(status & DMA_ST_INT)) 107 if (!(status & DMA_ST_INT))
96 return IRQ_HANDLED; 108 return IRQ_HANDLED;
97 109
98 if ((dma->state ^ status) & DMA_ST_AB) 110 if ((idma->state ^ status) & DMA_ST_AB)
99 iomd_get_next_sg(&dma->cur_sg, dma); 111 iomd_get_next_sg(&idma->cur_sg, idma);
100 112
101 switch (status & (DMA_ST_OFL | DMA_ST_AB)) { 113 switch (status & (DMA_ST_OFL | DMA_ST_AB)) {
102 case DMA_ST_OFL: /* OIA */ 114 case DMA_ST_OFL: /* OIA */
103 case DMA_ST_AB: /* .IB */ 115 case DMA_ST_AB: /* .IB */
104 iomd_writel(dma->cur_sg.dma_address, base + CURA); 116 iomd_writel(idma->cur_sg.dma_address, base + CURA);
105 iomd_writel(dma->cur_sg.length, base + ENDA); 117 iomd_writel(idma->cur_sg.length, base + ENDA);
106 dma->state = DMA_ST_AB; 118 idma->state = DMA_ST_AB;
107 break; 119 break;
108 120
109 case DMA_ST_OFL | DMA_ST_AB: /* OIB */ 121 case DMA_ST_OFL | DMA_ST_AB: /* OIB */
110 case 0: /* .IA */ 122 case 0: /* .IA */
111 iomd_writel(dma->cur_sg.dma_address, base + CURB); 123 iomd_writel(idma->cur_sg.dma_address, base + CURB);
112 iomd_writel(dma->cur_sg.length, base + ENDB); 124 iomd_writel(idma->cur_sg.length, base + ENDB);
113 dma->state = 0; 125 idma->state = 0;
114 break; 126 break;
115 } 127 }
116 128
117 if (status & DMA_ST_OFL && 129 if (status & DMA_ST_OFL &&
118 dma->cur_sg.length == (DMA_END_S|DMA_END_L)) 130 idma->cur_sg.length == (DMA_END_S|DMA_END_L))
119 break; 131 break;
120 } while (1); 132 } while (1);
121 133
122 dma->state = ~DMA_ST_AB; 134 idma->state = ~DMA_ST_AB;
123 disable_irq(irq); 135 disable_irq(irq);
124 136
125 return IRQ_HANDLED; 137 return IRQ_HANDLED;
126} 138}
127 139
128static int iomd_request_dma(dmach_t channel, dma_t *dma) 140static int iomd_request_dma(unsigned int chan, dma_t *dma)
129{ 141{
130 return request_irq(dma->dma_irq, iomd_dma_handle, 142 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
131 IRQF_DISABLED, dma->device_id, dma); 143
144 return request_irq(idma->irq, iomd_dma_handle,
145 IRQF_DISABLED, idma->dma.device_id, idma);
132} 146}
133 147
134static void iomd_free_dma(dmach_t channel, dma_t *dma) 148static void iomd_free_dma(unsigned int chan, dma_t *dma)
135{ 149{
136 free_irq(dma->dma_irq, dma); 150 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
151
152 free_irq(idma->irq, idma);
137} 153}
138 154
139static void iomd_enable_dma(dmach_t channel, dma_t *dma) 155static void iomd_enable_dma(unsigned int chan, dma_t *dma)
140{ 156{
141 unsigned long dma_base = dma->dma_base; 157 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
158 unsigned long dma_base = idma->base;
142 unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E; 159 unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
143 160
144 if (dma->invalid) { 161 if (idma->dma.invalid) {
145 dma->invalid = 0; 162 idma->dma.invalid = 0;
146 163
147 /* 164 /*
148 * Cope with ISA-style drivers which expect cache 165 * Cope with ISA-style drivers which expect cache
149 * coherence. 166 * coherence.
150 */ 167 */
151 if (!dma->sg) { 168 if (!idma->dma.sg) {
152 dma->sg = &dma->buf; 169 idma->dma.sg = &idma->dma.buf;
153 dma->sgcount = 1; 170 idma->dma.sgcount = 1;
154 dma->buf.length = dma->count; 171 idma->dma.buf.length = idma->dma.count;
155 dma->buf.dma_address = dma_map_single(NULL, 172 idma->dma.buf.dma_address = dma_map_single(NULL,
156 dma->addr, dma->count, 173 idma->dma.addr, idma->dma.count,
157 dma->dma_mode == DMA_MODE_READ ? 174 idma->dma.dma_mode == DMA_MODE_READ ?
158 DMA_FROM_DEVICE : DMA_TO_DEVICE); 175 DMA_FROM_DEVICE : DMA_TO_DEVICE);
159 } 176 }
160 177
161 iomd_writeb(DMA_CR_C, dma_base + CR); 178 iomd_writeb(DMA_CR_C, dma_base + CR);
162 dma->state = DMA_ST_AB; 179 idma->state = DMA_ST_AB;
163 } 180 }
164 181
165 if (dma->dma_mode == DMA_MODE_READ) 182 if (idma->dma.dma_mode == DMA_MODE_READ)
166 ctrl |= DMA_CR_D; 183 ctrl |= DMA_CR_D;
167 184
168 iomd_writeb(ctrl, dma_base + CR); 185 iomd_writeb(ctrl, dma_base + CR);
169 enable_irq(dma->dma_irq); 186 enable_irq(idma->irq);
170} 187}
171 188
172static void iomd_disable_dma(dmach_t channel, dma_t *dma) 189static void iomd_disable_dma(unsigned int chan, dma_t *dma)
173{ 190{
174 unsigned long dma_base = dma->dma_base; 191 struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
192 unsigned long dma_base = idma->base;
175 unsigned long flags; 193 unsigned long flags;
176 194
177 local_irq_save(flags); 195 local_irq_save(flags);
178 if (dma->state != ~DMA_ST_AB) 196 if (idma->state != ~DMA_ST_AB)
179 disable_irq(dma->dma_irq); 197 disable_irq(idma->irq);
180 iomd_writeb(0, dma_base + CR); 198 iomd_writeb(0, dma_base + CR);
181 local_irq_restore(flags); 199 local_irq_restore(flags);
182} 200}
183 201
184static int iomd_set_dma_speed(dmach_t channel, dma_t *dma, int cycle) 202static int iomd_set_dma_speed(unsigned int chan, dma_t *dma, int cycle)
185{ 203{
186 int tcr, speed; 204 int tcr, speed;
187 205
@@ -197,7 +215,7 @@ static int iomd_set_dma_speed(dmach_t channel, dma_t *dma, int cycle)
197 tcr = iomd_readb(IOMD_DMATCR); 215 tcr = iomd_readb(IOMD_DMATCR);
198 speed &= 3; 216 speed &= 3;
199 217
200 switch (channel) { 218 switch (chan) {
201 case DMA_0: 219 case DMA_0:
202 tcr = (tcr & ~0x03) | speed; 220 tcr = (tcr & ~0x03) | speed;
203 break; 221 break;
@@ -236,16 +254,22 @@ static struct fiq_handler fh = {
236 .name = "floppydma" 254 .name = "floppydma"
237}; 255};
238 256
239static void floppy_enable_dma(dmach_t channel, dma_t *dma) 257struct floppy_dma {
258 struct dma_struct dma;
259 unsigned int fiq;
260};
261
262static void floppy_enable_dma(unsigned int chan, dma_t *dma)
240{ 263{
264 struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
241 void *fiqhandler_start; 265 void *fiqhandler_start;
242 unsigned int fiqhandler_length; 266 unsigned int fiqhandler_length;
243 struct pt_regs regs; 267 struct pt_regs regs;
244 268
245 if (dma->sg) 269 if (fdma->dma.sg)
246 BUG(); 270 BUG();
247 271
248 if (dma->dma_mode == DMA_MODE_READ) { 272 if (fdma->dma.dma_mode == DMA_MODE_READ) {
249 extern unsigned char floppy_fiqin_start, floppy_fiqin_end; 273 extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
250 fiqhandler_start = &floppy_fiqin_start; 274 fiqhandler_start = &floppy_fiqin_start;
251 fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start; 275 fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
@@ -255,8 +279,8 @@ static void floppy_enable_dma(dmach_t channel, dma_t *dma)
255 fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start; 279 fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
256 } 280 }
257 281
258 regs.ARM_r9 = dma->count; 282 regs.ARM_r9 = fdma->dma.count;
259 regs.ARM_r10 = (unsigned long)dma->addr; 283 regs.ARM_r10 = (unsigned long)fdma->dma.addr;
260 regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE; 284 regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE;
261 285
262 if (claim_fiq(&fh)) { 286 if (claim_fiq(&fh)) {
@@ -266,16 +290,17 @@ static void floppy_enable_dma(dmach_t channel, dma_t *dma)
266 290
267 set_fiq_handler(fiqhandler_start, fiqhandler_length); 291 set_fiq_handler(fiqhandler_start, fiqhandler_length);
268 set_fiq_regs(&regs); 292 set_fiq_regs(&regs);
269 enable_fiq(dma->dma_irq); 293 enable_fiq(fdma->fiq);
270} 294}
271 295
272static void floppy_disable_dma(dmach_t channel, dma_t *dma) 296static void floppy_disable_dma(unsigned int chan, dma_t *dma)
273{ 297{
274 disable_fiq(dma->dma_irq); 298 struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
299 disable_fiq(fdma->fiq);
275 release_fiq(&fh); 300 release_fiq(&fh);
276} 301}
277 302
278static int floppy_get_residue(dmach_t channel, dma_t *dma) 303static int floppy_get_residue(unsigned int chan, dma_t *dma)
279{ 304{
280 struct pt_regs regs; 305 struct pt_regs regs;
281 get_fiq_regs(&regs); 306 get_fiq_regs(&regs);
@@ -292,7 +317,7 @@ static struct dma_ops floppy_dma_ops = {
292/* 317/*
293 * This is virtual DMA - we don't need anything here. 318 * This is virtual DMA - we don't need anything here.
294 */ 319 */
295static void sound_enable_disable_dma(dmach_t channel, dma_t *dma) 320static void sound_enable_disable_dma(unsigned int chan, dma_t *dma)
296{ 321{
297} 322}
298 323
@@ -302,8 +327,24 @@ static struct dma_ops sound_dma_ops = {
302 .disable = sound_enable_disable_dma, 327 .disable = sound_enable_disable_dma,
303}; 328};
304 329
305void __init arch_dma_init(dma_t *dma) 330static struct iomd_dma iomd_dma[6];
331
332static struct floppy_dma floppy_dma = {
333 .dma = {
334 .d_ops = &floppy_dma_ops,
335 },
336 .fiq = FIQ_FLOPPYDATA,
337};
338
339static dma_t sound_dma = {
340 .d_ops = &sound_dma_ops,
341};
342
343static int __init rpc_dma_init(void)
306{ 344{
345 unsigned int i;
346 int ret;
347
307 iomd_writeb(0, IOMD_IO0CR); 348 iomd_writeb(0, IOMD_IO0CR);
308 iomd_writeb(0, IOMD_IO1CR); 349 iomd_writeb(0, IOMD_IO1CR);
309 iomd_writeb(0, IOMD_IO2CR); 350 iomd_writeb(0, IOMD_IO2CR);
@@ -311,31 +352,39 @@ void __init arch_dma_init(dma_t *dma)
311 352
312 iomd_writeb(0xa0, IOMD_DMATCR); 353 iomd_writeb(0xa0, IOMD_DMATCR);
313 354
314 dma[DMA_0].dma_base = IOMD_IO0CURA;
315 dma[DMA_0].dma_irq = IRQ_DMA0;
316 dma[DMA_0].d_ops = &iomd_dma_ops;
317 dma[DMA_1].dma_base = IOMD_IO1CURA;
318 dma[DMA_1].dma_irq = IRQ_DMA1;
319 dma[DMA_1].d_ops = &iomd_dma_ops;
320 dma[DMA_2].dma_base = IOMD_IO2CURA;
321 dma[DMA_2].dma_irq = IRQ_DMA2;
322 dma[DMA_2].d_ops = &iomd_dma_ops;
323 dma[DMA_3].dma_base = IOMD_IO3CURA;
324 dma[DMA_3].dma_irq = IRQ_DMA3;
325 dma[DMA_3].d_ops = &iomd_dma_ops;
326 dma[DMA_S0].dma_base = IOMD_SD0CURA;
327 dma[DMA_S0].dma_irq = IRQ_DMAS0;
328 dma[DMA_S0].d_ops = &iomd_dma_ops;
329 dma[DMA_S1].dma_base = IOMD_SD1CURA;
330 dma[DMA_S1].dma_irq = IRQ_DMAS1;
331 dma[DMA_S1].d_ops = &iomd_dma_ops;
332 dma[DMA_VIRTUAL_FLOPPY].dma_irq = FIQ_FLOPPYDATA;
333 dma[DMA_VIRTUAL_FLOPPY].d_ops = &floppy_dma_ops;
334 dma[DMA_VIRTUAL_SOUND].d_ops = &sound_dma_ops;
335
336 /* 355 /*
337 * Setup DMA channels 2,3 to be for podules 356 * Setup DMA channels 2,3 to be for podules
338 * and channels 0,1 for internal devices 357 * and channels 0,1 for internal devices
339 */ 358 */
340 iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT); 359 iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
360
361 iomd_dma[DMA_0].base = IOMD_IO0CURA;
362 iomd_dma[DMA_0].irq = IRQ_DMA0;
363 iomd_dma[DMA_1].base = IOMD_IO1CURA;
364 iomd_dma[DMA_1].irq = IRQ_DMA1;
365 iomd_dma[DMA_2].base = IOMD_IO2CURA;
366 iomd_dma[DMA_2].irq = IRQ_DMA2;
367 iomd_dma[DMA_3].base = IOMD_IO3CURA;
368 iomd_dma[DMA_3].irq = IRQ_DMA3;
369 iomd_dma[DMA_S0].base = IOMD_SD0CURA;
370 iomd_dma[DMA_S0].irq = IRQ_DMAS0;
371 iomd_dma[DMA_S1].base = IOMD_SD1CURA;
372 iomd_dma[DMA_S1].irq = IRQ_DMAS1;
373
374 for (i = DMA_0; i <= DMA_S1; i++) {
375 iomd_dma[i].dma.d_ops = &iomd_dma_ops;
376
377 ret = isa_dma_add(i, &iomd_dma[i].dma);
378 if (ret)
379 printk("IOMDDMA%u: unable to register: %d\n", i, ret);
380 }
381
382 ret = isa_dma_add(DMA_VIRTUAL_FLOPPY, &floppy_dma.dma);
383 if (ret)
384 printk("IOMDFLOPPY: unable to register: %d\n", ret);
385 ret = isa_dma_add(DMA_VIRTUAL_SOUND, &sound_dma);
386 if (ret)
387 printk("IOMDSOUND: unable to register: %d\n", ret);
388 return 0;
341} 389}
390core_initcall(rpc_dma_init);
diff --git a/arch/arm/mach-rpc/include/mach/isa-dma.h b/arch/arm/mach-rpc/include/mach/isa-dma.h
index bad720548587..67bfc6719c34 100644
--- a/arch/arm/mach-rpc/include/mach/isa-dma.h
+++ b/arch/arm/mach-rpc/include/mach/isa-dma.h
@@ -23,5 +23,7 @@
23 23
24#define DMA_FLOPPY DMA_VIRTUAL_FLOPPY 24#define DMA_FLOPPY DMA_VIRTUAL_FLOPPY
25 25
26#define IOMD_DMA_BOUNDARY (PAGE_SIZE - 1)
27
26#endif /* _ASM_ARCH_DMA_H */ 28#endif /* _ASM_ARCH_DMA_H */
27 29
diff --git a/arch/arm/mach-shark/dma.c b/arch/arm/mach-shark/dma.c
index 6774b8d5d13d..10b5b8b3272a 100644
--- a/arch/arm/mach-shark/dma.c
+++ b/arch/arm/mach-shark/dma.c
@@ -13,9 +13,11 @@
13#include <asm/dma.h> 13#include <asm/dma.h>
14#include <asm/mach/dma.h> 14#include <asm/mach/dma.h>
15 15
16void __init arch_dma_init(dma_t *dma) 16static int __init shark_dma_init(void)
17{ 17{
18#ifdef CONFIG_ISA_DMA 18#ifdef CONFIG_ISA_DMA
19 isa_init_dma(dma); 19 isa_init_dma();
20#endif 20#endif
21 return 0;
21} 22}
23core_initcall(shark_dma_init);
diff --git a/arch/arm/plat-mxc/dma-mx1-mx2.c b/arch/arm/plat-mxc/dma-mx1-mx2.c
index 2905ec758758..a9bab15f0fd0 100644
--- a/arch/arm/plat-mxc/dma-mx1-mx2.c
+++ b/arch/arm/plat-mxc/dma-mx1-mx2.c
@@ -113,7 +113,7 @@ struct imx_dma_channel {
113 void (*err_handler) (int, void *, int errcode); 113 void (*err_handler) (int, void *, int errcode);
114 void (*prog_handler) (int, void *, struct scatterlist *); 114 void (*prog_handler) (int, void *, struct scatterlist *);
115 void *data; 115 void *data;
116 unsigned int dma_mode; 116 unsigned int dma_mode;
117 struct scatterlist *sg; 117 struct scatterlist *sg;
118 unsigned int resbytes; 118 unsigned int resbytes;
119 int dma_num; 119 int dma_num;