diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2009-02-21 16:42:50 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-02-21 16:42:50 -0500 |
commit | 22b61a11fd4e6d7a48d694ce350331bebc0394ed (patch) | |
tree | f4be46c8154f5094c248fcd9fdf644b236f591b3 | |
parent | 423145a5d4def58cff760809d48cfb21316d59a9 (diff) | |
parent | fa4e998999322bc1b11d2c8b19b9fa2016fd1548 (diff) |
Merge branch 'dma' into devel
Conflicts:
arch/arm/plat-mxc/dma-mx1-mx2.c
-rw-r--r-- | arch/arm/include/asm/dma.h | 46 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/dma.h | 35 | ||||
-rw-r--r-- | arch/arm/kernel/dma-isa.c | 67 | ||||
-rw-r--r-- | arch/arm/kernel/dma.c | 119 | ||||
-rw-r--r-- | arch/arm/mach-footbridge/dma.c | 12 | ||||
-rw-r--r-- | arch/arm/mach-rpc/dma.c | 213 | ||||
-rw-r--r-- | arch/arm/mach-rpc/include/mach/isa-dma.h | 2 | ||||
-rw-r--r-- | arch/arm/mach-shark/dma.c | 6 | ||||
-rw-r--r-- | arch/arm/plat-mxc/dma-mx1-mx2.c | 2 | ||||
-rw-r--r-- | drivers/ata/pata_icside.c | 19 | ||||
-rw-r--r-- | drivers/scsi/arm/cumana_2.c | 3 | ||||
-rw-r--r-- | drivers/scsi/arm/eesox.c | 3 | ||||
-rw-r--r-- | drivers/scsi/arm/powertec.c | 3 |
13 files changed, 292 insertions, 238 deletions
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index df5638f3643a..7edf3536df24 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h | |||
@@ -19,21 +19,17 @@ | |||
19 | #include <asm/system.h> | 19 | #include <asm/system.h> |
20 | #include <asm/scatterlist.h> | 20 | #include <asm/scatterlist.h> |
21 | 21 | ||
22 | typedef unsigned int dmach_t; | ||
23 | |||
24 | #include <mach/isa-dma.h> | 22 | #include <mach/isa-dma.h> |
25 | 23 | ||
26 | /* | 24 | /* |
27 | * DMA modes | 25 | * The DMA modes reflect the settings for the ISA DMA controller |
28 | */ | 26 | */ |
29 | typedef unsigned int dmamode_t; | 27 | #define DMA_MODE_MASK 0xcc |
30 | |||
31 | #define DMA_MODE_MASK 3 | ||
32 | 28 | ||
33 | #define DMA_MODE_READ 0 | 29 | #define DMA_MODE_READ 0x44 |
34 | #define DMA_MODE_WRITE 1 | 30 | #define DMA_MODE_WRITE 0x48 |
35 | #define DMA_MODE_CASCADE 2 | 31 | #define DMA_MODE_CASCADE 0xc0 |
36 | #define DMA_AUTOINIT 4 | 32 | #define DMA_AUTOINIT 0x10 |
37 | 33 | ||
38 | extern spinlock_t dma_spin_lock; | 34 | extern spinlock_t dma_spin_lock; |
39 | 35 | ||
@@ -52,44 +48,44 @@ static inline void release_dma_lock(unsigned long flags) | |||
52 | /* Clear the 'DMA Pointer Flip Flop'. | 48 | /* Clear the 'DMA Pointer Flip Flop'. |
53 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | 49 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. |
54 | */ | 50 | */ |
55 | #define clear_dma_ff(channel) | 51 | #define clear_dma_ff(chan) |
56 | 52 | ||
57 | /* Set only the page register bits of the transfer address. | 53 | /* Set only the page register bits of the transfer address. |
58 | * | 54 | * |
59 | * NOTE: This is an architecture specific function, and should | 55 | * NOTE: This is an architecture specific function, and should |
60 | * be hidden from the drivers | 56 | * be hidden from the drivers |
61 | */ | 57 | */ |
62 | extern void set_dma_page(dmach_t channel, char pagenr); | 58 | extern void set_dma_page(unsigned int chan, char pagenr); |
63 | 59 | ||
64 | /* Request a DMA channel | 60 | /* Request a DMA channel |
65 | * | 61 | * |
66 | * Some architectures may need to do allocate an interrupt | 62 | * Some architectures may need to do allocate an interrupt |
67 | */ | 63 | */ |
68 | extern int request_dma(dmach_t channel, const char * device_id); | 64 | extern int request_dma(unsigned int chan, const char * device_id); |
69 | 65 | ||
70 | /* Free a DMA channel | 66 | /* Free a DMA channel |
71 | * | 67 | * |
72 | * Some architectures may need to do free an interrupt | 68 | * Some architectures may need to do free an interrupt |
73 | */ | 69 | */ |
74 | extern void free_dma(dmach_t channel); | 70 | extern void free_dma(unsigned int chan); |
75 | 71 | ||
76 | /* Enable DMA for this channel | 72 | /* Enable DMA for this channel |
77 | * | 73 | * |
78 | * On some architectures, this may have other side effects like | 74 | * On some architectures, this may have other side effects like |
79 | * enabling an interrupt and setting the DMA registers. | 75 | * enabling an interrupt and setting the DMA registers. |
80 | */ | 76 | */ |
81 | extern void enable_dma(dmach_t channel); | 77 | extern void enable_dma(unsigned int chan); |
82 | 78 | ||
83 | /* Disable DMA for this channel | 79 | /* Disable DMA for this channel |
84 | * | 80 | * |
85 | * On some architectures, this may have other side effects like | 81 | * On some architectures, this may have other side effects like |
86 | * disabling an interrupt or whatever. | 82 | * disabling an interrupt or whatever. |
87 | */ | 83 | */ |
88 | extern void disable_dma(dmach_t channel); | 84 | extern void disable_dma(unsigned int chan); |
89 | 85 | ||
90 | /* Test whether the specified channel has an active DMA transfer | 86 | /* Test whether the specified channel has an active DMA transfer |
91 | */ | 87 | */ |
92 | extern int dma_channel_active(dmach_t channel); | 88 | extern int dma_channel_active(unsigned int chan); |
93 | 89 | ||
94 | /* Set the DMA scatter gather list for this channel | 90 | /* Set the DMA scatter gather list for this channel |
95 | * | 91 | * |
@@ -97,7 +93,7 @@ extern int dma_channel_active(dmach_t channel); | |||
97 | * especially since some DMA architectures don't update the | 93 | * especially since some DMA architectures don't update the |
98 | * DMA address immediately, but defer it to the enable_dma(). | 94 | * DMA address immediately, but defer it to the enable_dma(). |
99 | */ | 95 | */ |
100 | extern void set_dma_sg(dmach_t channel, struct scatterlist *sg, int nr_sg); | 96 | extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg); |
101 | 97 | ||
102 | /* Set the DMA address for this channel | 98 | /* Set the DMA address for this channel |
103 | * | 99 | * |
@@ -105,9 +101,9 @@ extern void set_dma_sg(dmach_t channel, struct scatterlist *sg, int nr_sg); | |||
105 | * especially since some DMA architectures don't update the | 101 | * especially since some DMA architectures don't update the |
106 | * DMA address immediately, but defer it to the enable_dma(). | 102 | * DMA address immediately, but defer it to the enable_dma(). |
107 | */ | 103 | */ |
108 | extern void __set_dma_addr(dmach_t channel, void *addr); | 104 | extern void __set_dma_addr(unsigned int chan, void *addr); |
109 | #define set_dma_addr(channel, addr) \ | 105 | #define set_dma_addr(chan, addr) \ |
110 | __set_dma_addr(channel, bus_to_virt(addr)) | 106 | __set_dma_addr(chan, bus_to_virt(addr)) |
111 | 107 | ||
112 | /* Set the DMA byte count for this channel | 108 | /* Set the DMA byte count for this channel |
113 | * | 109 | * |
@@ -115,7 +111,7 @@ extern void __set_dma_addr(dmach_t channel, void *addr); | |||
115 | * especially since some DMA architectures don't update the | 111 | * especially since some DMA architectures don't update the |
116 | * DMA count immediately, but defer it to the enable_dma(). | 112 | * DMA count immediately, but defer it to the enable_dma(). |
117 | */ | 113 | */ |
118 | extern void set_dma_count(dmach_t channel, unsigned long count); | 114 | extern void set_dma_count(unsigned int chan, unsigned long count); |
119 | 115 | ||
120 | /* Set the transfer direction for this channel | 116 | /* Set the transfer direction for this channel |
121 | * | 117 | * |
@@ -124,11 +120,11 @@ extern void set_dma_count(dmach_t channel, unsigned long count); | |||
124 | * DMA transfer direction immediately, but defer it to the | 120 | * DMA transfer direction immediately, but defer it to the |
125 | * enable_dma(). | 121 | * enable_dma(). |
126 | */ | 122 | */ |
127 | extern void set_dma_mode(dmach_t channel, dmamode_t mode); | 123 | extern void set_dma_mode(unsigned int chan, unsigned int mode); |
128 | 124 | ||
129 | /* Set the transfer speed for this channel | 125 | /* Set the transfer speed for this channel |
130 | */ | 126 | */ |
131 | extern void set_dma_speed(dmach_t channel, int cycle_ns); | 127 | extern void set_dma_speed(unsigned int chan, int cycle_ns); |
132 | 128 | ||
133 | /* Get DMA residue count. After a DMA transfer, this | 129 | /* Get DMA residue count. After a DMA transfer, this |
134 | * should return zero. Reading this while a DMA transfer is | 130 | * should return zero. Reading this while a DMA transfer is |
@@ -136,7 +132,7 @@ extern void set_dma_speed(dmach_t channel, int cycle_ns); | |||
136 | * If called before the channel has been used, it may return 1. | 132 | * If called before the channel has been used, it may return 1. |
137 | * Otherwise, it returns the number of _bytes_ left to transfer. | 133 | * Otherwise, it returns the number of _bytes_ left to transfer. |
138 | */ | 134 | */ |
139 | extern int get_dma_residue(dmach_t channel); | 135 | extern int get_dma_residue(unsigned int chan); |
140 | 136 | ||
141 | #ifndef NO_DMA | 137 | #ifndef NO_DMA |
142 | #define NO_DMA 255 | 138 | #define NO_DMA 255 |
diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h index fc7278ea7146..9e614a18e680 100644 --- a/arch/arm/include/asm/mach/dma.h +++ b/arch/arm/include/asm/mach/dma.h | |||
@@ -15,13 +15,13 @@ struct dma_struct; | |||
15 | typedef struct dma_struct dma_t; | 15 | typedef struct dma_struct dma_t; |
16 | 16 | ||
17 | struct dma_ops { | 17 | struct dma_ops { |
18 | int (*request)(dmach_t, dma_t *); /* optional */ | 18 | int (*request)(unsigned int, dma_t *); /* optional */ |
19 | void (*free)(dmach_t, dma_t *); /* optional */ | 19 | void (*free)(unsigned int, dma_t *); /* optional */ |
20 | void (*enable)(dmach_t, dma_t *); /* mandatory */ | 20 | void (*enable)(unsigned int, dma_t *); /* mandatory */ |
21 | void (*disable)(dmach_t, dma_t *); /* mandatory */ | 21 | void (*disable)(unsigned int, dma_t *); /* mandatory */ |
22 | int (*residue)(dmach_t, dma_t *); /* optional */ | 22 | int (*residue)(unsigned int, dma_t *); /* optional */ |
23 | int (*setspeed)(dmach_t, dma_t *, int); /* optional */ | 23 | int (*setspeed)(unsigned int, dma_t *, int); /* optional */ |
24 | char *type; | 24 | const char *type; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | struct dma_struct { | 27 | struct dma_struct { |
@@ -34,24 +34,21 @@ struct dma_struct { | |||
34 | unsigned int active:1; /* Transfer active */ | 34 | unsigned int active:1; /* Transfer active */ |
35 | unsigned int invalid:1; /* Address/Count changed */ | 35 | unsigned int invalid:1; /* Address/Count changed */ |
36 | 36 | ||
37 | dmamode_t dma_mode; /* DMA mode */ | 37 | unsigned int dma_mode; /* DMA mode */ |
38 | int speed; /* DMA speed */ | 38 | int speed; /* DMA speed */ |
39 | 39 | ||
40 | unsigned int lock; /* Device is allocated */ | 40 | unsigned int lock; /* Device is allocated */ |
41 | const char *device_id; /* Device name */ | 41 | const char *device_id; /* Device name */ |
42 | 42 | ||
43 | unsigned int dma_base; /* Controller base address */ | 43 | const struct dma_ops *d_ops; |
44 | int dma_irq; /* Controller IRQ */ | ||
45 | struct scatterlist cur_sg; /* Current controller buffer */ | ||
46 | unsigned int state; | ||
47 | |||
48 | struct dma_ops *d_ops; | ||
49 | }; | 44 | }; |
50 | 45 | ||
51 | /* Prototype: void arch_dma_init(dma) | 46 | /* |
52 | * Purpose : Initialise architecture specific DMA | 47 | * isa_dma_add - add an ISA-style DMA channel |
53 | * Params : dma - pointer to array of DMA structures | ||
54 | */ | 48 | */ |
55 | extern void arch_dma_init(dma_t *dma); | 49 | extern int isa_dma_add(unsigned int, dma_t *dma); |
56 | 50 | ||
57 | extern void isa_init_dma(dma_t *dma); | 51 | /* |
52 | * Add the ISA DMA controller. Always takes channels 0-7. | ||
53 | */ | ||
54 | extern void isa_init_dma(void); | ||
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c index 4a3a50495c60..0e88e46fc732 100644 --- a/arch/arm/kernel/dma-isa.c +++ b/arch/arm/kernel/dma-isa.c | |||
@@ -24,11 +24,6 @@ | |||
24 | #include <asm/dma.h> | 24 | #include <asm/dma.h> |
25 | #include <asm/mach/dma.h> | 25 | #include <asm/mach/dma.h> |
26 | 26 | ||
27 | #define ISA_DMA_MODE_READ 0x44 | ||
28 | #define ISA_DMA_MODE_WRITE 0x48 | ||
29 | #define ISA_DMA_MODE_CASCADE 0xc0 | ||
30 | #define ISA_DMA_AUTOINIT 0x10 | ||
31 | |||
32 | #define ISA_DMA_MASK 0 | 27 | #define ISA_DMA_MASK 0 |
33 | #define ISA_DMA_MODE 1 | 28 | #define ISA_DMA_MODE 1 |
34 | #define ISA_DMA_CLRFF 2 | 29 | #define ISA_DMA_CLRFF 2 |
@@ -49,38 +44,35 @@ static unsigned int isa_dma_port[8][7] = { | |||
49 | { 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce } | 44 | { 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce } |
50 | }; | 45 | }; |
51 | 46 | ||
52 | static int isa_get_dma_residue(dmach_t channel, dma_t *dma) | 47 | static int isa_get_dma_residue(unsigned int chan, dma_t *dma) |
53 | { | 48 | { |
54 | unsigned int io_port = isa_dma_port[channel][ISA_DMA_COUNT]; | 49 | unsigned int io_port = isa_dma_port[chan][ISA_DMA_COUNT]; |
55 | int count; | 50 | int count; |
56 | 51 | ||
57 | count = 1 + inb(io_port); | 52 | count = 1 + inb(io_port); |
58 | count |= inb(io_port) << 8; | 53 | count |= inb(io_port) << 8; |
59 | 54 | ||
60 | return channel < 4 ? count : (count << 1); | 55 | return chan < 4 ? count : (count << 1); |
61 | } | 56 | } |
62 | 57 | ||
63 | static void isa_enable_dma(dmach_t channel, dma_t *dma) | 58 | static void isa_enable_dma(unsigned int chan, dma_t *dma) |
64 | { | 59 | { |
65 | if (dma->invalid) { | 60 | if (dma->invalid) { |
66 | unsigned long address, length; | 61 | unsigned long address, length; |
67 | unsigned int mode; | 62 | unsigned int mode; |
68 | enum dma_data_direction direction; | 63 | enum dma_data_direction direction; |
69 | 64 | ||
70 | mode = channel & 3; | 65 | mode = (chan & 3) | dma->dma_mode; |
71 | switch (dma->dma_mode & DMA_MODE_MASK) { | 66 | switch (dma->dma_mode & DMA_MODE_MASK) { |
72 | case DMA_MODE_READ: | 67 | case DMA_MODE_READ: |
73 | mode |= ISA_DMA_MODE_READ; | ||
74 | direction = DMA_FROM_DEVICE; | 68 | direction = DMA_FROM_DEVICE; |
75 | break; | 69 | break; |
76 | 70 | ||
77 | case DMA_MODE_WRITE: | 71 | case DMA_MODE_WRITE: |
78 | mode |= ISA_DMA_MODE_WRITE; | ||
79 | direction = DMA_TO_DEVICE; | 72 | direction = DMA_TO_DEVICE; |
80 | break; | 73 | break; |
81 | 74 | ||
82 | case DMA_MODE_CASCADE: | 75 | case DMA_MODE_CASCADE: |
83 | mode |= ISA_DMA_MODE_CASCADE; | ||
84 | direction = DMA_BIDIRECTIONAL; | 76 | direction = DMA_BIDIRECTIONAL; |
85 | break; | 77 | break; |
86 | 78 | ||
@@ -105,34 +97,31 @@ static void isa_enable_dma(dmach_t channel, dma_t *dma) | |||
105 | address = dma->buf.dma_address; | 97 | address = dma->buf.dma_address; |
106 | length = dma->buf.length - 1; | 98 | length = dma->buf.length - 1; |
107 | 99 | ||
108 | outb(address >> 16, isa_dma_port[channel][ISA_DMA_PGLO]); | 100 | outb(address >> 16, isa_dma_port[chan][ISA_DMA_PGLO]); |
109 | outb(address >> 24, isa_dma_port[channel][ISA_DMA_PGHI]); | 101 | outb(address >> 24, isa_dma_port[chan][ISA_DMA_PGHI]); |
110 | 102 | ||
111 | if (channel >= 4) { | 103 | if (chan >= 4) { |
112 | address >>= 1; | 104 | address >>= 1; |
113 | length >>= 1; | 105 | length >>= 1; |
114 | } | 106 | } |
115 | 107 | ||
116 | outb(0, isa_dma_port[channel][ISA_DMA_CLRFF]); | 108 | outb(0, isa_dma_port[chan][ISA_DMA_CLRFF]); |
117 | |||
118 | outb(address, isa_dma_port[channel][ISA_DMA_ADDR]); | ||
119 | outb(address >> 8, isa_dma_port[channel][ISA_DMA_ADDR]); | ||
120 | 109 | ||
121 | outb(length, isa_dma_port[channel][ISA_DMA_COUNT]); | 110 | outb(address, isa_dma_port[chan][ISA_DMA_ADDR]); |
122 | outb(length >> 8, isa_dma_port[channel][ISA_DMA_COUNT]); | 111 | outb(address >> 8, isa_dma_port[chan][ISA_DMA_ADDR]); |
123 | 112 | ||
124 | if (dma->dma_mode & DMA_AUTOINIT) | 113 | outb(length, isa_dma_port[chan][ISA_DMA_COUNT]); |
125 | mode |= ISA_DMA_AUTOINIT; | 114 | outb(length >> 8, isa_dma_port[chan][ISA_DMA_COUNT]); |
126 | 115 | ||
127 | outb(mode, isa_dma_port[channel][ISA_DMA_MODE]); | 116 | outb(mode, isa_dma_port[chan][ISA_DMA_MODE]); |
128 | dma->invalid = 0; | 117 | dma->invalid = 0; |
129 | } | 118 | } |
130 | outb(channel & 3, isa_dma_port[channel][ISA_DMA_MASK]); | 119 | outb(chan & 3, isa_dma_port[chan][ISA_DMA_MASK]); |
131 | } | 120 | } |
132 | 121 | ||
133 | static void isa_disable_dma(dmach_t channel, dma_t *dma) | 122 | static void isa_disable_dma(unsigned int chan, dma_t *dma) |
134 | { | 123 | { |
135 | outb(channel | 4, isa_dma_port[channel][ISA_DMA_MASK]); | 124 | outb(chan | 4, isa_dma_port[chan][ISA_DMA_MASK]); |
136 | } | 125 | } |
137 | 126 | ||
138 | static struct dma_ops isa_dma_ops = { | 127 | static struct dma_ops isa_dma_ops = { |
@@ -160,7 +149,12 @@ static struct resource dma_resources[] = { { | |||
160 | .end = 0x048f | 149 | .end = 0x048f |
161 | } }; | 150 | } }; |
162 | 151 | ||
163 | void __init isa_init_dma(dma_t *dma) | 152 | static dma_t isa_dma[8]; |
153 | |||
154 | /* | ||
155 | * ISA DMA always starts at channel 0 | ||
156 | */ | ||
157 | void __init isa_init_dma(void) | ||
164 | { | 158 | { |
165 | /* | 159 | /* |
166 | * Try to autodetect presence of an ISA DMA controller. | 160 | * Try to autodetect presence of an ISA DMA controller. |
@@ -178,11 +172,11 @@ void __init isa_init_dma(dma_t *dma) | |||
178 | outb(0xaa, 0x00); | 172 | outb(0xaa, 0x00); |
179 | 173 | ||
180 | if (inb(0) == 0x55 && inb(0) == 0xaa) { | 174 | if (inb(0) == 0x55 && inb(0) == 0xaa) { |
181 | int channel, i; | 175 | unsigned int chan, i; |
182 | 176 | ||
183 | for (channel = 0; channel < 8; channel++) { | 177 | for (chan = 0; chan < 8; chan++) { |
184 | dma[channel].d_ops = &isa_dma_ops; | 178 | isa_dma[chan].d_ops = &isa_dma_ops; |
185 | isa_disable_dma(channel, NULL); | 179 | isa_disable_dma(chan, NULL); |
186 | } | 180 | } |
187 | 181 | ||
188 | outb(0x40, 0x0b); | 182 | outb(0x40, 0x0b); |
@@ -217,5 +211,12 @@ void __init isa_init_dma(dma_t *dma) | |||
217 | 211 | ||
218 | for (i = 0; i < ARRAY_SIZE(dma_resources); i++) | 212 | for (i = 0; i < ARRAY_SIZE(dma_resources); i++) |
219 | request_resource(&ioport_resource, dma_resources + i); | 213 | request_resource(&ioport_resource, dma_resources + i); |
214 | |||
215 | for (chan = 0; chan < 8; chan++) { | ||
216 | int ret = isa_dma_add(chan, &isa_dma[chan]); | ||
217 | if (ret) | ||
218 | printk(KERN_ERR "ISADMA%u: unable to register: %d\n", | ||
219 | chan, ret); | ||
220 | } | ||
220 | } | 221 | } |
221 | } | 222 | } |
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c index d006085ed7e7..7d5b9fb01e71 100644 --- a/arch/arm/kernel/dma.c +++ b/arch/arm/kernel/dma.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/scatterlist.h> | ||
18 | 19 | ||
19 | #include <asm/dma.h> | 20 | #include <asm/dma.h> |
20 | 21 | ||
@@ -23,19 +24,40 @@ | |||
23 | DEFINE_SPINLOCK(dma_spin_lock); | 24 | DEFINE_SPINLOCK(dma_spin_lock); |
24 | EXPORT_SYMBOL(dma_spin_lock); | 25 | EXPORT_SYMBOL(dma_spin_lock); |
25 | 26 | ||
26 | static dma_t dma_chan[MAX_DMA_CHANNELS]; | 27 | static dma_t *dma_chan[MAX_DMA_CHANNELS]; |
28 | |||
29 | static inline dma_t *dma_channel(unsigned int chan) | ||
30 | { | ||
31 | if (chan >= MAX_DMA_CHANNELS) | ||
32 | return NULL; | ||
33 | |||
34 | return dma_chan[chan]; | ||
35 | } | ||
36 | |||
37 | int __init isa_dma_add(unsigned int chan, dma_t *dma) | ||
38 | { | ||
39 | if (!dma->d_ops) | ||
40 | return -EINVAL; | ||
41 | |||
42 | sg_init_table(&dma->buf, 1); | ||
43 | |||
44 | if (dma_chan[chan]) | ||
45 | return -EBUSY; | ||
46 | dma_chan[chan] = dma; | ||
47 | return 0; | ||
48 | } | ||
27 | 49 | ||
28 | /* | 50 | /* |
29 | * Request DMA channel | 51 | * Request DMA channel |
30 | * | 52 | * |
31 | * On certain platforms, we have to allocate an interrupt as well... | 53 | * On certain platforms, we have to allocate an interrupt as well... |
32 | */ | 54 | */ |
33 | int request_dma(dmach_t channel, const char *device_id) | 55 | int request_dma(unsigned int chan, const char *device_id) |
34 | { | 56 | { |
35 | dma_t *dma = dma_chan + channel; | 57 | dma_t *dma = dma_channel(chan); |
36 | int ret; | 58 | int ret; |
37 | 59 | ||
38 | if (channel >= MAX_DMA_CHANNELS || !dma->d_ops) | 60 | if (!dma) |
39 | goto bad_dma; | 61 | goto bad_dma; |
40 | 62 | ||
41 | if (xchg(&dma->lock, 1) != 0) | 63 | if (xchg(&dma->lock, 1) != 0) |
@@ -47,7 +69,7 @@ int request_dma(dmach_t channel, const char *device_id) | |||
47 | 69 | ||
48 | ret = 0; | 70 | ret = 0; |
49 | if (dma->d_ops->request) | 71 | if (dma->d_ops->request) |
50 | ret = dma->d_ops->request(channel, dma); | 72 | ret = dma->d_ops->request(chan, dma); |
51 | 73 | ||
52 | if (ret) | 74 | if (ret) |
53 | xchg(&dma->lock, 0); | 75 | xchg(&dma->lock, 0); |
@@ -55,7 +77,7 @@ int request_dma(dmach_t channel, const char *device_id) | |||
55 | return ret; | 77 | return ret; |
56 | 78 | ||
57 | bad_dma: | 79 | bad_dma: |
58 | printk(KERN_ERR "dma: trying to allocate DMA%d\n", channel); | 80 | printk(KERN_ERR "dma: trying to allocate DMA%d\n", chan); |
59 | return -EINVAL; | 81 | return -EINVAL; |
60 | 82 | ||
61 | busy: | 83 | busy: |
@@ -68,42 +90,42 @@ EXPORT_SYMBOL(request_dma); | |||
68 | * | 90 | * |
69 | * On certain platforms, we have to free interrupt as well... | 91 | * On certain platforms, we have to free interrupt as well... |
70 | */ | 92 | */ |
71 | void free_dma(dmach_t channel) | 93 | void free_dma(unsigned int chan) |
72 | { | 94 | { |
73 | dma_t *dma = dma_chan + channel; | 95 | dma_t *dma = dma_channel(chan); |
74 | 96 | ||
75 | if (channel >= MAX_DMA_CHANNELS || !dma->d_ops) | 97 | if (!dma) |
76 | goto bad_dma; | 98 | goto bad_dma; |
77 | 99 | ||
78 | if (dma->active) { | 100 | if (dma->active) { |
79 | printk(KERN_ERR "dma%d: freeing active DMA\n", channel); | 101 | printk(KERN_ERR "dma%d: freeing active DMA\n", chan); |
80 | dma->d_ops->disable(channel, dma); | 102 | dma->d_ops->disable(chan, dma); |
81 | dma->active = 0; | 103 | dma->active = 0; |
82 | } | 104 | } |
83 | 105 | ||
84 | if (xchg(&dma->lock, 0) != 0) { | 106 | if (xchg(&dma->lock, 0) != 0) { |
85 | if (dma->d_ops->free) | 107 | if (dma->d_ops->free) |
86 | dma->d_ops->free(channel, dma); | 108 | dma->d_ops->free(chan, dma); |
87 | return; | 109 | return; |
88 | } | 110 | } |
89 | 111 | ||
90 | printk(KERN_ERR "dma%d: trying to free free DMA\n", channel); | 112 | printk(KERN_ERR "dma%d: trying to free free DMA\n", chan); |
91 | return; | 113 | return; |
92 | 114 | ||
93 | bad_dma: | 115 | bad_dma: |
94 | printk(KERN_ERR "dma: trying to free DMA%d\n", channel); | 116 | printk(KERN_ERR "dma: trying to free DMA%d\n", chan); |
95 | } | 117 | } |
96 | EXPORT_SYMBOL(free_dma); | 118 | EXPORT_SYMBOL(free_dma); |
97 | 119 | ||
98 | /* Set DMA Scatter-Gather list | 120 | /* Set DMA Scatter-Gather list |
99 | */ | 121 | */ |
100 | void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg) | 122 | void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg) |
101 | { | 123 | { |
102 | dma_t *dma = dma_chan + channel; | 124 | dma_t *dma = dma_channel(chan); |
103 | 125 | ||
104 | if (dma->active) | 126 | if (dma->active) |
105 | printk(KERN_ERR "dma%d: altering DMA SG while " | 127 | printk(KERN_ERR "dma%d: altering DMA SG while " |
106 | "DMA active\n", channel); | 128 | "DMA active\n", chan); |
107 | 129 | ||
108 | dma->sg = sg; | 130 | dma->sg = sg; |
109 | dma->sgcount = nr_sg; | 131 | dma->sgcount = nr_sg; |
@@ -115,13 +137,13 @@ EXPORT_SYMBOL(set_dma_sg); | |||
115 | * | 137 | * |
116 | * Copy address to the structure, and set the invalid bit | 138 | * Copy address to the structure, and set the invalid bit |
117 | */ | 139 | */ |
118 | void __set_dma_addr (dmach_t channel, void *addr) | 140 | void __set_dma_addr (unsigned int chan, void *addr) |
119 | { | 141 | { |
120 | dma_t *dma = dma_chan + channel; | 142 | dma_t *dma = dma_channel(chan); |
121 | 143 | ||
122 | if (dma->active) | 144 | if (dma->active) |
123 | printk(KERN_ERR "dma%d: altering DMA address while " | 145 | printk(KERN_ERR "dma%d: altering DMA address while " |
124 | "DMA active\n", channel); | 146 | "DMA active\n", chan); |
125 | 147 | ||
126 | dma->sg = NULL; | 148 | dma->sg = NULL; |
127 | dma->addr = addr; | 149 | dma->addr = addr; |
@@ -133,13 +155,13 @@ EXPORT_SYMBOL(__set_dma_addr); | |||
133 | * | 155 | * |
134 | * Copy address to the structure, and set the invalid bit | 156 | * Copy address to the structure, and set the invalid bit |
135 | */ | 157 | */ |
136 | void set_dma_count (dmach_t channel, unsigned long count) | 158 | void set_dma_count (unsigned int chan, unsigned long count) |
137 | { | 159 | { |
138 | dma_t *dma = dma_chan + channel; | 160 | dma_t *dma = dma_channel(chan); |
139 | 161 | ||
140 | if (dma->active) | 162 | if (dma->active) |
141 | printk(KERN_ERR "dma%d: altering DMA count while " | 163 | printk(KERN_ERR "dma%d: altering DMA count while " |
142 | "DMA active\n", channel); | 164 | "DMA active\n", chan); |
143 | 165 | ||
144 | dma->sg = NULL; | 166 | dma->sg = NULL; |
145 | dma->count = count; | 167 | dma->count = count; |
@@ -149,13 +171,13 @@ EXPORT_SYMBOL(set_dma_count); | |||
149 | 171 | ||
150 | /* Set DMA direction mode | 172 | /* Set DMA direction mode |
151 | */ | 173 | */ |
152 | void set_dma_mode (dmach_t channel, dmamode_t mode) | 174 | void set_dma_mode (unsigned int chan, unsigned int mode) |
153 | { | 175 | { |
154 | dma_t *dma = dma_chan + channel; | 176 | dma_t *dma = dma_channel(chan); |
155 | 177 | ||
156 | if (dma->active) | 178 | if (dma->active) |
157 | printk(KERN_ERR "dma%d: altering DMA mode while " | 179 | printk(KERN_ERR "dma%d: altering DMA mode while " |
158 | "DMA active\n", channel); | 180 | "DMA active\n", chan); |
159 | 181 | ||
160 | dma->dma_mode = mode; | 182 | dma->dma_mode = mode; |
161 | dma->invalid = 1; | 183 | dma->invalid = 1; |
@@ -164,42 +186,42 @@ EXPORT_SYMBOL(set_dma_mode); | |||
164 | 186 | ||
165 | /* Enable DMA channel | 187 | /* Enable DMA channel |
166 | */ | 188 | */ |
167 | void enable_dma (dmach_t channel) | 189 | void enable_dma (unsigned int chan) |
168 | { | 190 | { |
169 | dma_t *dma = dma_chan + channel; | 191 | dma_t *dma = dma_channel(chan); |
170 | 192 | ||
171 | if (!dma->lock) | 193 | if (!dma->lock) |
172 | goto free_dma; | 194 | goto free_dma; |
173 | 195 | ||
174 | if (dma->active == 0) { | 196 | if (dma->active == 0) { |
175 | dma->active = 1; | 197 | dma->active = 1; |
176 | dma->d_ops->enable(channel, dma); | 198 | dma->d_ops->enable(chan, dma); |
177 | } | 199 | } |
178 | return; | 200 | return; |
179 | 201 | ||
180 | free_dma: | 202 | free_dma: |
181 | printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel); | 203 | printk(KERN_ERR "dma%d: trying to enable free DMA\n", chan); |
182 | BUG(); | 204 | BUG(); |
183 | } | 205 | } |
184 | EXPORT_SYMBOL(enable_dma); | 206 | EXPORT_SYMBOL(enable_dma); |
185 | 207 | ||
186 | /* Disable DMA channel | 208 | /* Disable DMA channel |
187 | */ | 209 | */ |
188 | void disable_dma (dmach_t channel) | 210 | void disable_dma (unsigned int chan) |
189 | { | 211 | { |
190 | dma_t *dma = dma_chan + channel; | 212 | dma_t *dma = dma_channel(chan); |
191 | 213 | ||
192 | if (!dma->lock) | 214 | if (!dma->lock) |
193 | goto free_dma; | 215 | goto free_dma; |
194 | 216 | ||
195 | if (dma->active == 1) { | 217 | if (dma->active == 1) { |
196 | dma->active = 0; | 218 | dma->active = 0; |
197 | dma->d_ops->disable(channel, dma); | 219 | dma->d_ops->disable(chan, dma); |
198 | } | 220 | } |
199 | return; | 221 | return; |
200 | 222 | ||
201 | free_dma: | 223 | free_dma: |
202 | printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel); | 224 | printk(KERN_ERR "dma%d: trying to disable free DMA\n", chan); |
203 | BUG(); | 225 | BUG(); |
204 | } | 226 | } |
205 | EXPORT_SYMBOL(disable_dma); | 227 | EXPORT_SYMBOL(disable_dma); |
@@ -207,45 +229,38 @@ EXPORT_SYMBOL(disable_dma); | |||
207 | /* | 229 | /* |
208 | * Is the specified DMA channel active? | 230 | * Is the specified DMA channel active? |
209 | */ | 231 | */ |
210 | int dma_channel_active(dmach_t channel) | 232 | int dma_channel_active(unsigned int chan) |
211 | { | 233 | { |
212 | return dma_chan[channel].active; | 234 | dma_t *dma = dma_channel(chan); |
235 | return dma->active; | ||
213 | } | 236 | } |
214 | EXPORT_SYMBOL(dma_channel_active); | 237 | EXPORT_SYMBOL(dma_channel_active); |
215 | 238 | ||
216 | void set_dma_page(dmach_t channel, char pagenr) | 239 | void set_dma_page(unsigned int chan, char pagenr) |
217 | { | 240 | { |
218 | printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel); | 241 | printk(KERN_ERR "dma%d: trying to set_dma_page\n", chan); |
219 | } | 242 | } |
220 | EXPORT_SYMBOL(set_dma_page); | 243 | EXPORT_SYMBOL(set_dma_page); |
221 | 244 | ||
222 | void set_dma_speed(dmach_t channel, int cycle_ns) | 245 | void set_dma_speed(unsigned int chan, int cycle_ns) |
223 | { | 246 | { |
224 | dma_t *dma = dma_chan + channel; | 247 | dma_t *dma = dma_channel(chan); |
225 | int ret = 0; | 248 | int ret = 0; |
226 | 249 | ||
227 | if (dma->d_ops->setspeed) | 250 | if (dma->d_ops->setspeed) |
228 | ret = dma->d_ops->setspeed(channel, dma, cycle_ns); | 251 | ret = dma->d_ops->setspeed(chan, dma, cycle_ns); |
229 | dma->speed = ret; | 252 | dma->speed = ret; |
230 | } | 253 | } |
231 | EXPORT_SYMBOL(set_dma_speed); | 254 | EXPORT_SYMBOL(set_dma_speed); |
232 | 255 | ||
233 | int get_dma_residue(dmach_t channel) | 256 | int get_dma_residue(unsigned int chan) |
234 | { | 257 | { |
235 | dma_t *dma = dma_chan + channel; | 258 | dma_t *dma = dma_channel(chan); |
236 | int ret = 0; | 259 | int ret = 0; |
237 | 260 | ||
238 | if (dma->d_ops->residue) | 261 | if (dma->d_ops->residue) |
239 | ret = dma->d_ops->residue(channel, dma); | 262 | ret = dma->d_ops->residue(chan, dma); |
240 | 263 | ||
241 | return ret; | 264 | return ret; |
242 | } | 265 | } |
243 | EXPORT_SYMBOL(get_dma_residue); | 266 | EXPORT_SYMBOL(get_dma_residue); |
244 | |||
245 | static int __init init_dma(void) | ||
246 | { | ||
247 | arch_dma_init(dma_chan); | ||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | core_initcall(init_dma); | ||
diff --git a/arch/arm/mach-footbridge/dma.c b/arch/arm/mach-footbridge/dma.c index 4f3506346969..e2e0df8bcee2 100644 --- a/arch/arm/mach-footbridge/dma.c +++ b/arch/arm/mach-footbridge/dma.c | |||
@@ -21,16 +21,16 @@ | |||
21 | #include <asm/hardware/dec21285.h> | 21 | #include <asm/hardware/dec21285.h> |
22 | 22 | ||
23 | #if 0 | 23 | #if 0 |
24 | static int fb_dma_request(dmach_t channel, dma_t *dma) | 24 | static int fb_dma_request(unsigned int chan, dma_t *dma) |
25 | { | 25 | { |
26 | return -EINVAL; | 26 | return -EINVAL; |
27 | } | 27 | } |
28 | 28 | ||
29 | static void fb_dma_enable(dmach_t channel, dma_t *dma) | 29 | static void fb_dma_enable(unsigned int chan, dma_t *dma) |
30 | { | 30 | { |
31 | } | 31 | } |
32 | 32 | ||
33 | static void fb_dma_disable(dmach_t channel, dma_t *dma) | 33 | static void fb_dma_disable(unsigned int chan, dma_t *dma) |
34 | { | 34 | { |
35 | } | 35 | } |
36 | 36 | ||
@@ -42,7 +42,7 @@ static struct dma_ops fb_dma_ops = { | |||
42 | }; | 42 | }; |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | void __init arch_dma_init(dma_t *dma) | 45 | static int __init fb_dma_init(void) |
46 | { | 46 | { |
47 | #if 0 | 47 | #if 0 |
48 | dma[_DC21285_DMA(0)].d_ops = &fb_dma_ops; | 48 | dma[_DC21285_DMA(0)].d_ops = &fb_dma_ops; |
@@ -50,6 +50,8 @@ void __init arch_dma_init(dma_t *dma) | |||
50 | #endif | 50 | #endif |
51 | #ifdef CONFIG_ISA_DMA | 51 | #ifdef CONFIG_ISA_DMA |
52 | if (footbridge_cfn_mode()) | 52 | if (footbridge_cfn_mode()) |
53 | isa_init_dma(dma + _ISA_DMA(0)); | 53 | isa_init_dma(); |
54 | #endif | 54 | #endif |
55 | return 0; | ||
55 | } | 56 | } |
57 | core_initcall(fb_dma_init); | ||
diff --git a/arch/arm/mach-rpc/dma.c b/arch/arm/mach-rpc/dma.c index 7958a30f8932..c47d974d52bd 100644 --- a/arch/arm/mach-rpc/dma.c +++ b/arch/arm/mach-rpc/dma.c | |||
@@ -26,6 +26,16 @@ | |||
26 | #include <asm/mach/dma.h> | 26 | #include <asm/mach/dma.h> |
27 | #include <asm/hardware/iomd.h> | 27 | #include <asm/hardware/iomd.h> |
28 | 28 | ||
29 | struct iomd_dma { | ||
30 | struct dma_struct dma; | ||
31 | unsigned int state; | ||
32 | unsigned long base; /* Controller base address */ | ||
33 | int irq; /* Controller IRQ */ | ||
34 | struct scatterlist cur_sg; /* Current controller buffer */ | ||
35 | dma_addr_t dma_addr; | ||
36 | unsigned int dma_len; | ||
37 | }; | ||
38 | |||
29 | #if 0 | 39 | #if 0 |
30 | typedef enum { | 40 | typedef enum { |
31 | dma_size_8 = 1, | 41 | dma_size_8 = 1, |
@@ -44,15 +54,15 @@ typedef enum { | |||
44 | #define CR (IOMD_IO0CR - IOMD_IO0CURA) | 54 | #define CR (IOMD_IO0CR - IOMD_IO0CURA) |
45 | #define ST (IOMD_IO0ST - IOMD_IO0CURA) | 55 | #define ST (IOMD_IO0ST - IOMD_IO0CURA) |
46 | 56 | ||
47 | static void iomd_get_next_sg(struct scatterlist *sg, dma_t *dma) | 57 | static void iomd_get_next_sg(struct scatterlist *sg, struct iomd_dma *idma) |
48 | { | 58 | { |
49 | unsigned long end, offset, flags = 0; | 59 | unsigned long end, offset, flags = 0; |
50 | 60 | ||
51 | if (dma->sg) { | 61 | if (idma->dma.sg) { |
52 | sg->dma_address = dma->sg->dma_address; | 62 | sg->dma_address = idma->dma_addr; |
53 | offset = sg->dma_address & ~PAGE_MASK; | 63 | offset = sg->dma_address & ~PAGE_MASK; |
54 | 64 | ||
55 | end = offset + dma->sg->length; | 65 | end = offset + idma->dma_len; |
56 | 66 | ||
57 | if (end > PAGE_SIZE) | 67 | if (end > PAGE_SIZE) |
58 | end = PAGE_SIZE; | 68 | end = PAGE_SIZE; |
@@ -62,15 +72,17 @@ static void iomd_get_next_sg(struct scatterlist *sg, dma_t *dma) | |||
62 | 72 | ||
63 | sg->length = end - TRANSFER_SIZE; | 73 | sg->length = end - TRANSFER_SIZE; |
64 | 74 | ||
65 | dma->sg->length -= end - offset; | 75 | idma->dma_len -= end - offset; |
66 | dma->sg->dma_address += end - offset; | 76 | idma->dma_addr += end - offset; |
67 | 77 | ||
68 | if (dma->sg->length == 0) { | 78 | if (idma->dma_len == 0) { |
69 | if (dma->sgcount > 1) { | 79 | if (idma->dma.sgcount > 1) { |
70 | dma->sg++; | 80 | idma->dma.sg = sg_next(idma->dma.sg); |
71 | dma->sgcount--; | 81 | idma->dma_addr = idma->dma.sg->dma_address; |
82 | idma->dma_len = idma->dma.sg->length; | ||
83 | idma->dma.sgcount--; | ||
72 | } else { | 84 | } else { |
73 | dma->sg = NULL; | 85 | idma->dma.sg = NULL; |
74 | flags |= DMA_END_S; | 86 | flags |= DMA_END_S; |
75 | } | 87 | } |
76 | } | 88 | } |
@@ -85,8 +97,8 @@ static void iomd_get_next_sg(struct scatterlist *sg, dma_t *dma) | |||
85 | 97 | ||
86 | static irqreturn_t iomd_dma_handle(int irq, void *dev_id) | 98 | static irqreturn_t iomd_dma_handle(int irq, void *dev_id) |
87 | { | 99 | { |
88 | dma_t *dma = (dma_t *)dev_id; | 100 | struct iomd_dma *idma = dev_id; |
89 | unsigned long base = dma->dma_base; | 101 | unsigned long base = idma->base; |
90 | 102 | ||
91 | do { | 103 | do { |
92 | unsigned int status; | 104 | unsigned int status; |
@@ -95,93 +107,99 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id) | |||
95 | if (!(status & DMA_ST_INT)) | 107 | if (!(status & DMA_ST_INT)) |
96 | return IRQ_HANDLED; | 108 | return IRQ_HANDLED; |
97 | 109 | ||
98 | if ((dma->state ^ status) & DMA_ST_AB) | 110 | if ((idma->state ^ status) & DMA_ST_AB) |
99 | iomd_get_next_sg(&dma->cur_sg, dma); | 111 | iomd_get_next_sg(&idma->cur_sg, idma); |
100 | 112 | ||
101 | switch (status & (DMA_ST_OFL | DMA_ST_AB)) { | 113 | switch (status & (DMA_ST_OFL | DMA_ST_AB)) { |
102 | case DMA_ST_OFL: /* OIA */ | 114 | case DMA_ST_OFL: /* OIA */ |
103 | case DMA_ST_AB: /* .IB */ | 115 | case DMA_ST_AB: /* .IB */ |
104 | iomd_writel(dma->cur_sg.dma_address, base + CURA); | 116 | iomd_writel(idma->cur_sg.dma_address, base + CURA); |
105 | iomd_writel(dma->cur_sg.length, base + ENDA); | 117 | iomd_writel(idma->cur_sg.length, base + ENDA); |
106 | dma->state = DMA_ST_AB; | 118 | idma->state = DMA_ST_AB; |
107 | break; | 119 | break; |
108 | 120 | ||
109 | case DMA_ST_OFL | DMA_ST_AB: /* OIB */ | 121 | case DMA_ST_OFL | DMA_ST_AB: /* OIB */ |
110 | case 0: /* .IA */ | 122 | case 0: /* .IA */ |
111 | iomd_writel(dma->cur_sg.dma_address, base + CURB); | 123 | iomd_writel(idma->cur_sg.dma_address, base + CURB); |
112 | iomd_writel(dma->cur_sg.length, base + ENDB); | 124 | iomd_writel(idma->cur_sg.length, base + ENDB); |
113 | dma->state = 0; | 125 | idma->state = 0; |
114 | break; | 126 | break; |
115 | } | 127 | } |
116 | 128 | ||
117 | if (status & DMA_ST_OFL && | 129 | if (status & DMA_ST_OFL && |
118 | dma->cur_sg.length == (DMA_END_S|DMA_END_L)) | 130 | idma->cur_sg.length == (DMA_END_S|DMA_END_L)) |
119 | break; | 131 | break; |
120 | } while (1); | 132 | } while (1); |
121 | 133 | ||
122 | dma->state = ~DMA_ST_AB; | 134 | idma->state = ~DMA_ST_AB; |
123 | disable_irq(irq); | 135 | disable_irq(irq); |
124 | 136 | ||
125 | return IRQ_HANDLED; | 137 | return IRQ_HANDLED; |
126 | } | 138 | } |
127 | 139 | ||
128 | static int iomd_request_dma(dmach_t channel, dma_t *dma) | 140 | static int iomd_request_dma(unsigned int chan, dma_t *dma) |
129 | { | 141 | { |
130 | return request_irq(dma->dma_irq, iomd_dma_handle, | 142 | struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma); |
131 | IRQF_DISABLED, dma->device_id, dma); | 143 | |
144 | return request_irq(idma->irq, iomd_dma_handle, | ||
145 | IRQF_DISABLED, idma->dma.device_id, idma); | ||
132 | } | 146 | } |
133 | 147 | ||
134 | static void iomd_free_dma(dmach_t channel, dma_t *dma) | 148 | static void iomd_free_dma(unsigned int chan, dma_t *dma) |
135 | { | 149 | { |
136 | free_irq(dma->dma_irq, dma); | 150 | struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma); |
151 | |||
152 | free_irq(idma->irq, idma); | ||
137 | } | 153 | } |
138 | 154 | ||
139 | static void iomd_enable_dma(dmach_t channel, dma_t *dma) | 155 | static void iomd_enable_dma(unsigned int chan, dma_t *dma) |
140 | { | 156 | { |
141 | unsigned long dma_base = dma->dma_base; | 157 | struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma); |
158 | unsigned long dma_base = idma->base; | ||
142 | unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E; | 159 | unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E; |
143 | 160 | ||
144 | if (dma->invalid) { | 161 | if (idma->dma.invalid) { |
145 | dma->invalid = 0; | 162 | idma->dma.invalid = 0; |
146 | 163 | ||
147 | /* | 164 | /* |
148 | * Cope with ISA-style drivers which expect cache | 165 | * Cope with ISA-style drivers which expect cache |
149 | * coherence. | 166 | * coherence. |
150 | */ | 167 | */ |
151 | if (!dma->sg) { | 168 | if (!idma->dma.sg) { |
152 | dma->sg = &dma->buf; | 169 | idma->dma.sg = &idma->dma.buf; |
153 | dma->sgcount = 1; | 170 | idma->dma.sgcount = 1; |
154 | dma->buf.length = dma->count; | 171 | idma->dma.buf.length = idma->dma.count; |
155 | dma->buf.dma_address = dma_map_single(NULL, | 172 | idma->dma.buf.dma_address = dma_map_single(NULL, |
156 | dma->addr, dma->count, | 173 | idma->dma.addr, idma->dma.count, |
157 | dma->dma_mode == DMA_MODE_READ ? | 174 | idma->dma.dma_mode == DMA_MODE_READ ? |
158 | DMA_FROM_DEVICE : DMA_TO_DEVICE); | 175 | DMA_FROM_DEVICE : DMA_TO_DEVICE); |
159 | } | 176 | } |
160 | 177 | ||
161 | iomd_writeb(DMA_CR_C, dma_base + CR); | 178 | iomd_writeb(DMA_CR_C, dma_base + CR); |
162 | dma->state = DMA_ST_AB; | 179 | idma->state = DMA_ST_AB; |
163 | } | 180 | } |
164 | 181 | ||
165 | if (dma->dma_mode == DMA_MODE_READ) | 182 | if (idma->dma.dma_mode == DMA_MODE_READ) |
166 | ctrl |= DMA_CR_D; | 183 | ctrl |= DMA_CR_D; |
167 | 184 | ||
168 | iomd_writeb(ctrl, dma_base + CR); | 185 | iomd_writeb(ctrl, dma_base + CR); |
169 | enable_irq(dma->dma_irq); | 186 | enable_irq(idma->irq); |
170 | } | 187 | } |
171 | 188 | ||
172 | static void iomd_disable_dma(dmach_t channel, dma_t *dma) | 189 | static void iomd_disable_dma(unsigned int chan, dma_t *dma) |
173 | { | 190 | { |
174 | unsigned long dma_base = dma->dma_base; | 191 | struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma); |
192 | unsigned long dma_base = idma->base; | ||
175 | unsigned long flags; | 193 | unsigned long flags; |
176 | 194 | ||
177 | local_irq_save(flags); | 195 | local_irq_save(flags); |
178 | if (dma->state != ~DMA_ST_AB) | 196 | if (idma->state != ~DMA_ST_AB) |
179 | disable_irq(dma->dma_irq); | 197 | disable_irq(idma->irq); |
180 | iomd_writeb(0, dma_base + CR); | 198 | iomd_writeb(0, dma_base + CR); |
181 | local_irq_restore(flags); | 199 | local_irq_restore(flags); |
182 | } | 200 | } |
183 | 201 | ||
184 | static int iomd_set_dma_speed(dmach_t channel, dma_t *dma, int cycle) | 202 | static int iomd_set_dma_speed(unsigned int chan, dma_t *dma, int cycle) |
185 | { | 203 | { |
186 | int tcr, speed; | 204 | int tcr, speed; |
187 | 205 | ||
@@ -197,7 +215,7 @@ static int iomd_set_dma_speed(dmach_t channel, dma_t *dma, int cycle) | |||
197 | tcr = iomd_readb(IOMD_DMATCR); | 215 | tcr = iomd_readb(IOMD_DMATCR); |
198 | speed &= 3; | 216 | speed &= 3; |
199 | 217 | ||
200 | switch (channel) { | 218 | switch (chan) { |
201 | case DMA_0: | 219 | case DMA_0: |
202 | tcr = (tcr & ~0x03) | speed; | 220 | tcr = (tcr & ~0x03) | speed; |
203 | break; | 221 | break; |
@@ -236,16 +254,22 @@ static struct fiq_handler fh = { | |||
236 | .name = "floppydma" | 254 | .name = "floppydma" |
237 | }; | 255 | }; |
238 | 256 | ||
239 | static void floppy_enable_dma(dmach_t channel, dma_t *dma) | 257 | struct floppy_dma { |
258 | struct dma_struct dma; | ||
259 | unsigned int fiq; | ||
260 | }; | ||
261 | |||
262 | static void floppy_enable_dma(unsigned int chan, dma_t *dma) | ||
240 | { | 263 | { |
264 | struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma); | ||
241 | void *fiqhandler_start; | 265 | void *fiqhandler_start; |
242 | unsigned int fiqhandler_length; | 266 | unsigned int fiqhandler_length; |
243 | struct pt_regs regs; | 267 | struct pt_regs regs; |
244 | 268 | ||
245 | if (dma->sg) | 269 | if (fdma->dma.sg) |
246 | BUG(); | 270 | BUG(); |
247 | 271 | ||
248 | if (dma->dma_mode == DMA_MODE_READ) { | 272 | if (fdma->dma.dma_mode == DMA_MODE_READ) { |
249 | extern unsigned char floppy_fiqin_start, floppy_fiqin_end; | 273 | extern unsigned char floppy_fiqin_start, floppy_fiqin_end; |
250 | fiqhandler_start = &floppy_fiqin_start; | 274 | fiqhandler_start = &floppy_fiqin_start; |
251 | fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start; | 275 | fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start; |
@@ -255,8 +279,8 @@ static void floppy_enable_dma(dmach_t channel, dma_t *dma) | |||
255 | fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start; | 279 | fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start; |
256 | } | 280 | } |
257 | 281 | ||
258 | regs.ARM_r9 = dma->count; | 282 | regs.ARM_r9 = fdma->dma.count; |
259 | regs.ARM_r10 = (unsigned long)dma->addr; | 283 | regs.ARM_r10 = (unsigned long)fdma->dma.addr; |
260 | regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE; | 284 | regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE; |
261 | 285 | ||
262 | if (claim_fiq(&fh)) { | 286 | if (claim_fiq(&fh)) { |
@@ -266,16 +290,17 @@ static void floppy_enable_dma(dmach_t channel, dma_t *dma) | |||
266 | 290 | ||
267 | set_fiq_handler(fiqhandler_start, fiqhandler_length); | 291 | set_fiq_handler(fiqhandler_start, fiqhandler_length); |
268 | set_fiq_regs(®s); | 292 | set_fiq_regs(®s); |
269 | enable_fiq(dma->dma_irq); | 293 | enable_fiq(fdma->fiq); |
270 | } | 294 | } |
271 | 295 | ||
272 | static void floppy_disable_dma(dmach_t channel, dma_t *dma) | 296 | static void floppy_disable_dma(unsigned int chan, dma_t *dma) |
273 | { | 297 | { |
274 | disable_fiq(dma->dma_irq); | 298 | struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma); |
299 | disable_fiq(fdma->fiq); | ||
275 | release_fiq(&fh); | 300 | release_fiq(&fh); |
276 | } | 301 | } |
277 | 302 | ||
278 | static int floppy_get_residue(dmach_t channel, dma_t *dma) | 303 | static int floppy_get_residue(unsigned int chan, dma_t *dma) |
279 | { | 304 | { |
280 | struct pt_regs regs; | 305 | struct pt_regs regs; |
281 | get_fiq_regs(®s); | 306 | get_fiq_regs(®s); |
@@ -292,7 +317,7 @@ static struct dma_ops floppy_dma_ops = { | |||
292 | /* | 317 | /* |
293 | * This is virtual DMA - we don't need anything here. | 318 | * This is virtual DMA - we don't need anything here. |
294 | */ | 319 | */ |
295 | static void sound_enable_disable_dma(dmach_t channel, dma_t *dma) | 320 | static void sound_enable_disable_dma(unsigned int chan, dma_t *dma) |
296 | { | 321 | { |
297 | } | 322 | } |
298 | 323 | ||
@@ -302,8 +327,24 @@ static struct dma_ops sound_dma_ops = { | |||
302 | .disable = sound_enable_disable_dma, | 327 | .disable = sound_enable_disable_dma, |
303 | }; | 328 | }; |
304 | 329 | ||
305 | void __init arch_dma_init(dma_t *dma) | 330 | static struct iomd_dma iomd_dma[6]; |
331 | |||
332 | static struct floppy_dma floppy_dma = { | ||
333 | .dma = { | ||
334 | .d_ops = &floppy_dma_ops, | ||
335 | }, | ||
336 | .fiq = FIQ_FLOPPYDATA, | ||
337 | }; | ||
338 | |||
339 | static dma_t sound_dma = { | ||
340 | .d_ops = &sound_dma_ops, | ||
341 | }; | ||
342 | |||
343 | static int __init rpc_dma_init(void) | ||
306 | { | 344 | { |
345 | unsigned int i; | ||
346 | int ret; | ||
347 | |||
307 | iomd_writeb(0, IOMD_IO0CR); | 348 | iomd_writeb(0, IOMD_IO0CR); |
308 | iomd_writeb(0, IOMD_IO1CR); | 349 | iomd_writeb(0, IOMD_IO1CR); |
309 | iomd_writeb(0, IOMD_IO2CR); | 350 | iomd_writeb(0, IOMD_IO2CR); |
@@ -311,31 +352,39 @@ void __init arch_dma_init(dma_t *dma) | |||
311 | 352 | ||
312 | iomd_writeb(0xa0, IOMD_DMATCR); | 353 | iomd_writeb(0xa0, IOMD_DMATCR); |
313 | 354 | ||
314 | dma[DMA_0].dma_base = IOMD_IO0CURA; | ||
315 | dma[DMA_0].dma_irq = IRQ_DMA0; | ||
316 | dma[DMA_0].d_ops = &iomd_dma_ops; | ||
317 | dma[DMA_1].dma_base = IOMD_IO1CURA; | ||
318 | dma[DMA_1].dma_irq = IRQ_DMA1; | ||
319 | dma[DMA_1].d_ops = &iomd_dma_ops; | ||
320 | dma[DMA_2].dma_base = IOMD_IO2CURA; | ||
321 | dma[DMA_2].dma_irq = IRQ_DMA2; | ||
322 | dma[DMA_2].d_ops = &iomd_dma_ops; | ||
323 | dma[DMA_3].dma_base = IOMD_IO3CURA; | ||
324 | dma[DMA_3].dma_irq = IRQ_DMA3; | ||
325 | dma[DMA_3].d_ops = &iomd_dma_ops; | ||
326 | dma[DMA_S0].dma_base = IOMD_SD0CURA; | ||
327 | dma[DMA_S0].dma_irq = IRQ_DMAS0; | ||
328 | dma[DMA_S0].d_ops = &iomd_dma_ops; | ||
329 | dma[DMA_S1].dma_base = IOMD_SD1CURA; | ||
330 | dma[DMA_S1].dma_irq = IRQ_DMAS1; | ||
331 | dma[DMA_S1].d_ops = &iomd_dma_ops; | ||
332 | dma[DMA_VIRTUAL_FLOPPY].dma_irq = FIQ_FLOPPYDATA; | ||
333 | dma[DMA_VIRTUAL_FLOPPY].d_ops = &floppy_dma_ops; | ||
334 | dma[DMA_VIRTUAL_SOUND].d_ops = &sound_dma_ops; | ||
335 | |||
336 | /* | 355 | /* |
337 | * Setup DMA channels 2,3 to be for podules | 356 | * Setup DMA channels 2,3 to be for podules |
338 | * and channels 0,1 for internal devices | 357 | * and channels 0,1 for internal devices |
339 | */ | 358 | */ |
340 | iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT); | 359 | iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT); |
360 | |||
361 | iomd_dma[DMA_0].base = IOMD_IO0CURA; | ||
362 | iomd_dma[DMA_0].irq = IRQ_DMA0; | ||
363 | iomd_dma[DMA_1].base = IOMD_IO1CURA; | ||
364 | iomd_dma[DMA_1].irq = IRQ_DMA1; | ||
365 | iomd_dma[DMA_2].base = IOMD_IO2CURA; | ||
366 | iomd_dma[DMA_2].irq = IRQ_DMA2; | ||
367 | iomd_dma[DMA_3].base = IOMD_IO3CURA; | ||
368 | iomd_dma[DMA_3].irq = IRQ_DMA3; | ||
369 | iomd_dma[DMA_S0].base = IOMD_SD0CURA; | ||
370 | iomd_dma[DMA_S0].irq = IRQ_DMAS0; | ||
371 | iomd_dma[DMA_S1].base = IOMD_SD1CURA; | ||
372 | iomd_dma[DMA_S1].irq = IRQ_DMAS1; | ||
373 | |||
374 | for (i = DMA_0; i <= DMA_S1; i++) { | ||
375 | iomd_dma[i].dma.d_ops = &iomd_dma_ops; | ||
376 | |||
377 | ret = isa_dma_add(i, &iomd_dma[i].dma); | ||
378 | if (ret) | ||
379 | printk("IOMDDMA%u: unable to register: %d\n", i, ret); | ||
380 | } | ||
381 | |||
382 | ret = isa_dma_add(DMA_VIRTUAL_FLOPPY, &floppy_dma.dma); | ||
383 | if (ret) | ||
384 | printk("IOMDFLOPPY: unable to register: %d\n", ret); | ||
385 | ret = isa_dma_add(DMA_VIRTUAL_SOUND, &sound_dma); | ||
386 | if (ret) | ||
387 | printk("IOMDSOUND: unable to register: %d\n", ret); | ||
388 | return 0; | ||
341 | } | 389 | } |
390 | core_initcall(rpc_dma_init); | ||
diff --git a/arch/arm/mach-rpc/include/mach/isa-dma.h b/arch/arm/mach-rpc/include/mach/isa-dma.h index bad720548587..67bfc6719c34 100644 --- a/arch/arm/mach-rpc/include/mach/isa-dma.h +++ b/arch/arm/mach-rpc/include/mach/isa-dma.h | |||
@@ -23,5 +23,7 @@ | |||
23 | 23 | ||
24 | #define DMA_FLOPPY DMA_VIRTUAL_FLOPPY | 24 | #define DMA_FLOPPY DMA_VIRTUAL_FLOPPY |
25 | 25 | ||
26 | #define IOMD_DMA_BOUNDARY (PAGE_SIZE - 1) | ||
27 | |||
26 | #endif /* _ASM_ARCH_DMA_H */ | 28 | #endif /* _ASM_ARCH_DMA_H */ |
27 | 29 | ||
diff --git a/arch/arm/mach-shark/dma.c b/arch/arm/mach-shark/dma.c index 6774b8d5d13d..10b5b8b3272a 100644 --- a/arch/arm/mach-shark/dma.c +++ b/arch/arm/mach-shark/dma.c | |||
@@ -13,9 +13,11 @@ | |||
13 | #include <asm/dma.h> | 13 | #include <asm/dma.h> |
14 | #include <asm/mach/dma.h> | 14 | #include <asm/mach/dma.h> |
15 | 15 | ||
16 | void __init arch_dma_init(dma_t *dma) | 16 | static int __init shark_dma_init(void) |
17 | { | 17 | { |
18 | #ifdef CONFIG_ISA_DMA | 18 | #ifdef CONFIG_ISA_DMA |
19 | isa_init_dma(dma); | 19 | isa_init_dma(); |
20 | #endif | 20 | #endif |
21 | return 0; | ||
21 | } | 22 | } |
23 | core_initcall(shark_dma_init); | ||
diff --git a/arch/arm/plat-mxc/dma-mx1-mx2.c b/arch/arm/plat-mxc/dma-mx1-mx2.c index 2905ec758758..a9bab15f0fd0 100644 --- a/arch/arm/plat-mxc/dma-mx1-mx2.c +++ b/arch/arm/plat-mxc/dma-mx1-mx2.c | |||
@@ -113,7 +113,7 @@ struct imx_dma_channel { | |||
113 | void (*err_handler) (int, void *, int errcode); | 113 | void (*err_handler) (int, void *, int errcode); |
114 | void (*prog_handler) (int, void *, struct scatterlist *); | 114 | void (*prog_handler) (int, void *, struct scatterlist *); |
115 | void *data; | 115 | void *data; |
116 | unsigned int dma_mode; | 116 | unsigned int dma_mode; |
117 | struct scatterlist *sg; | 117 | struct scatterlist *sg; |
118 | unsigned int resbytes; | 118 | unsigned int resbytes; |
119 | int dma_num; | 119 | int dma_num; |
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index cf9e9848f8b5..d7bc925c524d 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c | |||
@@ -45,8 +45,6 @@ static const struct portinfo pata_icside_portinfo_v6_2 = { | |||
45 | .stepping = 6, | 45 | .stepping = 6, |
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define PATA_ICSIDE_MAX_SG 128 | ||
49 | |||
50 | struct pata_icside_state { | 48 | struct pata_icside_state { |
51 | void __iomem *irq_port; | 49 | void __iomem *irq_port; |
52 | void __iomem *ioc_base; | 50 | void __iomem *ioc_base; |
@@ -57,7 +55,6 @@ struct pata_icside_state { | |||
57 | u8 disabled; | 55 | u8 disabled; |
58 | unsigned int speed[ATA_MAX_DEVICES]; | 56 | unsigned int speed[ATA_MAX_DEVICES]; |
59 | } port[2]; | 57 | } port[2]; |
60 | struct scatterlist sg[PATA_ICSIDE_MAX_SG]; | ||
61 | }; | 58 | }; |
62 | 59 | ||
63 | struct pata_icside_info { | 60 | struct pata_icside_info { |
@@ -222,9 +219,7 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc) | |||
222 | { | 219 | { |
223 | struct ata_port *ap = qc->ap; | 220 | struct ata_port *ap = qc->ap; |
224 | struct pata_icside_state *state = ap->host->private_data; | 221 | struct pata_icside_state *state = ap->host->private_data; |
225 | struct scatterlist *sg, *rsg = state->sg; | ||
226 | unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE; | 222 | unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE; |
227 | unsigned int si; | ||
228 | 223 | ||
229 | /* | 224 | /* |
230 | * We are simplex; BUG if we try to fiddle with DMA | 225 | * We are simplex; BUG if we try to fiddle with DMA |
@@ -233,20 +228,12 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc) | |||
233 | BUG_ON(dma_channel_active(state->dma)); | 228 | BUG_ON(dma_channel_active(state->dma)); |
234 | 229 | ||
235 | /* | 230 | /* |
236 | * Copy ATAs scattered sg list into a contiguous array of sg | ||
237 | */ | ||
238 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
239 | memcpy(rsg, sg, sizeof(*sg)); | ||
240 | rsg++; | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * Route the DMA signals to the correct interface | 231 | * Route the DMA signals to the correct interface |
245 | */ | 232 | */ |
246 | writeb(state->port[ap->port_no].port_sel, state->ioc_base); | 233 | writeb(state->port[ap->port_no].port_sel, state->ioc_base); |
247 | 234 | ||
248 | set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]); | 235 | set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]); |
249 | set_dma_sg(state->dma, state->sg, rsg - state->sg); | 236 | set_dma_sg(state->dma, qc->sg, qc->n_elem); |
250 | set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ); | 237 | set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ); |
251 | 238 | ||
252 | /* issue r/w command */ | 239 | /* issue r/w command */ |
@@ -306,8 +293,8 @@ static int icside_dma_init(struct pata_icside_info *info) | |||
306 | 293 | ||
307 | static struct scsi_host_template pata_icside_sht = { | 294 | static struct scsi_host_template pata_icside_sht = { |
308 | ATA_BASE_SHT(DRV_NAME), | 295 | ATA_BASE_SHT(DRV_NAME), |
309 | .sg_tablesize = PATA_ICSIDE_MAX_SG, | 296 | .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, |
310 | .dma_boundary = ~0, /* no dma boundaries */ | 297 | .dma_boundary = IOMD_DMA_BOUNDARY, |
311 | }; | 298 | }; |
312 | 299 | ||
313 | static void pata_icside_postreset(struct ata_link *link, unsigned int *classes) | 300 | static void pata_icside_postreset(struct ata_link *link, unsigned int *classes) |
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index 68a64123af8f..8ee01b907332 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c | |||
@@ -390,7 +390,8 @@ static struct scsi_host_template cumanascsi2_template = { | |||
390 | .eh_abort_handler = fas216_eh_abort, | 390 | .eh_abort_handler = fas216_eh_abort, |
391 | .can_queue = 1, | 391 | .can_queue = 1, |
392 | .this_id = 7, | 392 | .this_id = 7, |
393 | .sg_tablesize = SG_ALL, | 393 | .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, |
394 | .dma_boundary = IOMD_DMA_BOUNDARY, | ||
394 | .cmd_per_lun = 1, | 395 | .cmd_per_lun = 1, |
395 | .use_clustering = DISABLE_CLUSTERING, | 396 | .use_clustering = DISABLE_CLUSTERING, |
396 | .proc_name = "cumanascsi2", | 397 | .proc_name = "cumanascsi2", |
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c index bb2477b3fb0b..d8435132f461 100644 --- a/drivers/scsi/arm/eesox.c +++ b/drivers/scsi/arm/eesox.c | |||
@@ -508,7 +508,8 @@ static struct scsi_host_template eesox_template = { | |||
508 | .eh_abort_handler = fas216_eh_abort, | 508 | .eh_abort_handler = fas216_eh_abort, |
509 | .can_queue = 1, | 509 | .can_queue = 1, |
510 | .this_id = 7, | 510 | .this_id = 7, |
511 | .sg_tablesize = SG_ALL, | 511 | .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, |
512 | .dma_boundary = IOMD_DMA_BOUNDARY, | ||
512 | .cmd_per_lun = 1, | 513 | .cmd_per_lun = 1, |
513 | .use_clustering = DISABLE_CLUSTERING, | 514 | .use_clustering = DISABLE_CLUSTERING, |
514 | .proc_name = "eesox", | 515 | .proc_name = "eesox", |
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index d9a546d1917c..e2297b4c1b9e 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c | |||
@@ -302,7 +302,8 @@ static struct scsi_host_template powertecscsi_template = { | |||
302 | 302 | ||
303 | .can_queue = 8, | 303 | .can_queue = 8, |
304 | .this_id = 7, | 304 | .this_id = 7, |
305 | .sg_tablesize = SG_ALL, | 305 | .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, |
306 | .dma_boundary = IOMD_DMA_BOUNDARY, | ||
306 | .cmd_per_lun = 2, | 307 | .cmd_per_lun = 2, |
307 | .use_clustering = ENABLE_CLUSTERING, | 308 | .use_clustering = ENABLE_CLUSTERING, |
308 | .proc_name = "powertec", | 309 | .proc_name = "powertec", |