diff options
author | Jordan Crouse <jordan.crouse@amd.com> | 2005-12-14 20:17:46 -0500 |
---|---|---|
committer | Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> | 2005-12-14 20:17:46 -0500 |
commit | 8f29e650bffc1e22ed6b2d0b321bc77627f3bb7a (patch) | |
tree | d6b785c09c57a8f742e9a81162ee0d176677dc21 /drivers/ide/mips/au1xxx-ide.c | |
parent | 65e5f2e3b457b6b20a5c4481312189d141a33d24 (diff) |
[PATCH] ide: AU1200 IDE update
Changes here include removing all of CONFIG_PM while it is being repeatedly
smacked with a lead pipe, moving the BURSTMODE param to a #define (it should
be defined almost always anyway), fixing the rqsize stuff, pulling ide_ioreg_t,
and general cleanups and whatnot.
Signed-off-by: Jordan Crouse <jordan.crouse@amd.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Diffstat (limited to 'drivers/ide/mips/au1xxx-ide.c')
-rw-r--r-- | drivers/ide/mips/au1xxx-ide.c | 1498 |
1 files changed, 532 insertions, 966 deletions
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c index 2b6327c576b9..32431dcf5d8e 100644 --- a/drivers/ide/mips/au1xxx-ide.c +++ b/drivers/ide/mips/au1xxx-ide.c | |||
@@ -31,865 +31,638 @@ | |||
31 | */ | 31 | */ |
32 | #undef REALLY_SLOW_IO /* most systems can safely undef this */ | 32 | #undef REALLY_SLOW_IO /* most systems can safely undef this */ |
33 | 33 | ||
34 | #include <linux/config.h> /* for CONFIG_BLK_DEV_IDEPCI */ | ||
35 | #include <linux/types.h> | 34 | #include <linux/types.h> |
36 | #include <linux/module.h> | 35 | #include <linux/module.h> |
37 | #include <linux/kernel.h> | 36 | #include <linux/kernel.h> |
38 | #include <linux/delay.h> | 37 | #include <linux/delay.h> |
39 | #include <linux/timer.h> | 38 | #include <linux/platform_device.h> |
40 | #include <linux/mm.h> | 39 | |
41 | #include <linux/ioport.h> | ||
42 | #include <linux/hdreg.h> | ||
43 | #include <linux/init.h> | 40 | #include <linux/init.h> |
44 | #include <linux/ide.h> | 41 | #include <linux/ide.h> |
45 | #include <linux/sysdev.h> | 42 | #include <linux/sysdev.h> |
46 | 43 | ||
47 | #include <linux/dma-mapping.h> | 44 | #include <linux/dma-mapping.h> |
48 | 45 | ||
46 | #include "ide-timing.h" | ||
47 | |||
49 | #include <asm/io.h> | 48 | #include <asm/io.h> |
50 | #include <asm/mach-au1x00/au1xxx.h> | 49 | #include <asm/mach-au1x00/au1xxx.h> |
51 | #include <asm/mach-au1x00/au1xxx_dbdma.h> | 50 | #include <asm/mach-au1x00/au1xxx_dbdma.h> |
52 | 51 | ||
53 | #if CONFIG_PM | ||
54 | #include <asm/mach-au1x00/au1xxx_pm.h> | ||
55 | #endif | ||
56 | |||
57 | #include <asm/mach-au1x00/au1xxx_ide.h> | 52 | #include <asm/mach-au1x00/au1xxx_ide.h> |
58 | 53 | ||
59 | #define DRV_NAME "au1200-ide" | 54 | #define DRV_NAME "au1200-ide" |
60 | #define DRV_VERSION "1.0" | 55 | #define DRV_VERSION "1.0" |
61 | #define DRV_AUTHOR "AMD PCS / Pete Popov <ppopov@embeddedalley.com>" | 56 | #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>" |
62 | #define DRV_DESC "Au1200 IDE" | ||
63 | |||
64 | static _auide_hwif auide_hwif; | ||
65 | static spinlock_t ide_tune_drive_spin_lock = SPIN_LOCK_UNLOCKED; | ||
66 | static spinlock_t ide_tune_chipset_spin_lock = SPIN_LOCK_UNLOCKED; | ||
67 | static int dbdma_init_done = 0; | ||
68 | |||
69 | /* | ||
70 | * local I/O functions | ||
71 | */ | ||
72 | u8 auide_inb(unsigned long port) | ||
73 | { | ||
74 | return (au_readb(port)); | ||
75 | } | ||
76 | 57 | ||
77 | u16 auide_inw(unsigned long port) | 58 | /* enable the burstmode in the dbdma */ |
78 | { | 59 | #define IDE_AU1XXX_BURSTMODE 1 |
79 | return (au_readw(port)); | ||
80 | } | ||
81 | 60 | ||
82 | u32 auide_inl(unsigned long port) | 61 | static _auide_hwif auide_hwif; |
83 | { | 62 | static int dbdma_init_done; |
84 | return (au_readl(port)); | ||
85 | } | ||
86 | 63 | ||
87 | void auide_insw(unsigned long port, void *addr, u32 count) | ||
88 | { | ||
89 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) | 64 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) |
90 | 65 | ||
91 | _auide_hwif *ahwif = &auide_hwif; | 66 | void auide_insw(unsigned long port, void *addr, u32 count) |
92 | chan_tab_t *ctp; | ||
93 | au1x_ddma_desc_t *dp; | ||
94 | |||
95 | if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, | ||
96 | DDMA_FLAGS_NOIE)) { | ||
97 | printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); | ||
98 | return; | ||
99 | } | ||
100 | ctp = *((chan_tab_t **)ahwif->rx_chan); | ||
101 | dp = ctp->cur_ptr; | ||
102 | while (dp->dscr_cmd0 & DSCR_CMD0_V) | ||
103 | ; | ||
104 | ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); | ||
105 | #else | ||
106 | while (count--) | ||
107 | { | ||
108 | *(u16 *)addr = au_readw(port); | ||
109 | addr +=2 ; | ||
110 | } | ||
111 | #endif | ||
112 | } | ||
113 | |||
114 | void auide_insl(unsigned long port, void *addr, u32 count) | ||
115 | { | ||
116 | while (count--) | ||
117 | { | ||
118 | *(u32 *)addr = au_readl(port); | ||
119 | /* NOTE: For IDE interfaces over PCMCIA, | ||
120 | * 32-bit access does not work | ||
121 | */ | ||
122 | addr += 4; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | void auide_outb(u8 addr, unsigned long port) | ||
127 | { | 67 | { |
128 | return (au_writeb(addr, port)); | 68 | _auide_hwif *ahwif = &auide_hwif; |
129 | } | 69 | chan_tab_t *ctp; |
70 | au1x_ddma_desc_t *dp; | ||
130 | 71 | ||
131 | void auide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port) | 72 | if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, |
132 | { | 73 | DDMA_FLAGS_NOIE)) { |
133 | return (au_writeb(addr, port)); | 74 | printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); |
75 | return; | ||
76 | } | ||
77 | ctp = *((chan_tab_t **)ahwif->rx_chan); | ||
78 | dp = ctp->cur_ptr; | ||
79 | while (dp->dscr_cmd0 & DSCR_CMD0_V) | ||
80 | ; | ||
81 | ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); | ||
134 | } | 82 | } |
135 | 83 | ||
136 | void auide_outw(u16 addr, unsigned long port) | 84 | void auide_outsw(unsigned long port, void *addr, u32 count) |
137 | { | 85 | { |
138 | return (au_writew(addr, port)); | 86 | _auide_hwif *ahwif = &auide_hwif; |
139 | } | 87 | chan_tab_t *ctp; |
88 | au1x_ddma_desc_t *dp; | ||
140 | 89 | ||
141 | void auide_outl(u32 addr, unsigned long port) | 90 | if(!put_source_flags(ahwif->tx_chan, (void*)addr, |
142 | { | 91 | count << 1, DDMA_FLAGS_NOIE)) { |
143 | return (au_writel(addr, port)); | 92 | printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); |
93 | return; | ||
94 | } | ||
95 | ctp = *((chan_tab_t **)ahwif->tx_chan); | ||
96 | dp = ctp->cur_ptr; | ||
97 | while (dp->dscr_cmd0 & DSCR_CMD0_V) | ||
98 | ; | ||
99 | ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); | ||
144 | } | 100 | } |
145 | 101 | ||
146 | void auide_outsw(unsigned long port, void *addr, u32 count) | ||
147 | { | ||
148 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) | ||
149 | _auide_hwif *ahwif = &auide_hwif; | ||
150 | chan_tab_t *ctp; | ||
151 | au1x_ddma_desc_t *dp; | ||
152 | |||
153 | if(!put_source_flags(ahwif->tx_chan, (void*)addr, | ||
154 | count << 1, DDMA_FLAGS_NOIE)) { | ||
155 | printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); | ||
156 | return; | ||
157 | } | ||
158 | ctp = *((chan_tab_t **)ahwif->tx_chan); | ||
159 | dp = ctp->cur_ptr; | ||
160 | while (dp->dscr_cmd0 & DSCR_CMD0_V) | ||
161 | ; | ||
162 | ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); | ||
163 | #else | ||
164 | while (count--) | ||
165 | { | ||
166 | au_writew(*(u16 *)addr, port); | ||
167 | addr += 2; | ||
168 | } | ||
169 | #endif | 102 | #endif |
170 | } | ||
171 | |||
172 | void auide_outsl(unsigned long port, void *addr, u32 count) | ||
173 | { | ||
174 | while (count--) | ||
175 | { | ||
176 | au_writel(*(u32 *)addr, port); | ||
177 | /* NOTE: For IDE interfaces over PCMCIA, | ||
178 | * 32-bit access does not work | ||
179 | */ | ||
180 | addr += 4; | ||
181 | } | ||
182 | } | ||
183 | 103 | ||
184 | static void auide_tune_drive(ide_drive_t *drive, byte pio) | 104 | static void auide_tune_drive(ide_drive_t *drive, byte pio) |
185 | { | 105 | { |
186 | int mem_sttime; | 106 | int mem_sttime; |
187 | int mem_stcfg; | 107 | int mem_stcfg; |
188 | unsigned long flags; | 108 | u8 speed; |
189 | u8 speed; | 109 | |
190 | 110 | /* get the best pio mode for the drive */ | |
191 | /* get the best pio mode for the drive */ | 111 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); |
192 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); | 112 | |
193 | 113 | printk(KERN_INFO "%s: setting Au1XXX IDE to PIO mode%d\n", | |
194 | printk("%s: setting Au1XXX IDE to PIO mode%d\n", | 114 | drive->name, pio); |
195 | drive->name, pio); | 115 | |
196 | 116 | mem_sttime = 0; | |
197 | spin_lock_irqsave(&ide_tune_drive_spin_lock, flags); | 117 | mem_stcfg = au_readl(MEM_STCFG2); |
198 | 118 | ||
199 | mem_sttime = 0; | 119 | /* set pio mode! */ |
200 | mem_stcfg = au_readl(MEM_STCFG2); | 120 | switch(pio) { |
201 | 121 | case 0: | |
202 | /* set pio mode! */ | 122 | mem_sttime = SBC_IDE_TIMING(PIO0); |
203 | switch(pio) { | 123 | |
204 | case 0: | 124 | /* set configuration for RCS2# */ |
205 | /* set timing parameters for RCS2# */ | 125 | mem_stcfg |= TS_MASK; |
206 | mem_sttime = SBC_IDE_PIO0_TWCS | 126 | mem_stcfg &= ~TCSOE_MASK; |
207 | | SBC_IDE_PIO0_TCSH | 127 | mem_stcfg &= ~TOECS_MASK; |
208 | | SBC_IDE_PIO0_TCSOFF | 128 | mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS; |
209 | | SBC_IDE_PIO0_TWP | 129 | break; |
210 | | SBC_IDE_PIO0_TCSW | 130 | |
211 | | SBC_IDE_PIO0_TPM | 131 | case 1: |
212 | | SBC_IDE_PIO0_TA; | 132 | mem_sttime = SBC_IDE_TIMING(PIO1); |
213 | /* set configuration for RCS2# */ | 133 | |
214 | mem_stcfg |= TS_MASK; | 134 | /* set configuration for RCS2# */ |
215 | mem_stcfg &= ~TCSOE_MASK; | 135 | mem_stcfg |= TS_MASK; |
216 | mem_stcfg &= ~TOECS_MASK; | 136 | mem_stcfg &= ~TCSOE_MASK; |
217 | mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS; | 137 | mem_stcfg &= ~TOECS_MASK; |
218 | 138 | mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS; | |
219 | au_writel(mem_sttime,MEM_STTIME2); | 139 | break; |
220 | au_writel(mem_stcfg,MEM_STCFG2); | 140 | |
221 | break; | 141 | case 2: |
222 | 142 | mem_sttime = SBC_IDE_TIMING(PIO2); | |
223 | case 1: | 143 | |
224 | /* set timing parameters for RCS2# */ | 144 | /* set configuration for RCS2# */ |
225 | mem_sttime = SBC_IDE_PIO1_TWCS | 145 | mem_stcfg &= ~TS_MASK; |
226 | | SBC_IDE_PIO1_TCSH | 146 | mem_stcfg &= ~TCSOE_MASK; |
227 | | SBC_IDE_PIO1_TCSOFF | 147 | mem_stcfg &= ~TOECS_MASK; |
228 | | SBC_IDE_PIO1_TWP | 148 | mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS; |
229 | | SBC_IDE_PIO1_TCSW | 149 | break; |
230 | | SBC_IDE_PIO1_TPM | 150 | |
231 | | SBC_IDE_PIO1_TA; | 151 | case 3: |
232 | /* set configuration for RCS2# */ | 152 | mem_sttime = SBC_IDE_TIMING(PIO3); |
233 | mem_stcfg |= TS_MASK; | 153 | |
234 | mem_stcfg &= ~TCSOE_MASK; | 154 | /* set configuration for RCS2# */ |
235 | mem_stcfg &= ~TOECS_MASK; | 155 | mem_stcfg &= ~TS_MASK; |
236 | mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS; | 156 | mem_stcfg &= ~TCSOE_MASK; |
237 | break; | 157 | mem_stcfg &= ~TOECS_MASK; |
238 | 158 | mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS; | |
239 | case 2: | 159 | |
240 | /* set timing parameters for RCS2# */ | 160 | break; |
241 | mem_sttime = SBC_IDE_PIO2_TWCS | 161 | |
242 | | SBC_IDE_PIO2_TCSH | 162 | case 4: |
243 | | SBC_IDE_PIO2_TCSOFF | 163 | mem_sttime = SBC_IDE_TIMING(PIO4); |
244 | | SBC_IDE_PIO2_TWP | 164 | |
245 | | SBC_IDE_PIO2_TCSW | 165 | /* set configuration for RCS2# */ |
246 | | SBC_IDE_PIO2_TPM | 166 | mem_stcfg &= ~TS_MASK; |
247 | | SBC_IDE_PIO2_TA; | 167 | mem_stcfg &= ~TCSOE_MASK; |
248 | /* set configuration for RCS2# */ | 168 | mem_stcfg &= ~TOECS_MASK; |
249 | mem_stcfg &= ~TS_MASK; | 169 | mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS; |
250 | mem_stcfg &= ~TCSOE_MASK; | 170 | break; |
251 | mem_stcfg &= ~TOECS_MASK; | 171 | } |
252 | mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS; | 172 | |
253 | break; | 173 | au_writel(mem_sttime,MEM_STTIME2); |
254 | 174 | au_writel(mem_stcfg,MEM_STCFG2); | |
255 | case 3: | 175 | |
256 | /* set timing parameters for RCS2# */ | 176 | speed = pio + XFER_PIO_0; |
257 | mem_sttime = SBC_IDE_PIO3_TWCS | 177 | ide_config_drive_speed(drive, speed); |
258 | | SBC_IDE_PIO3_TCSH | ||
259 | | SBC_IDE_PIO3_TCSOFF | ||
260 | | SBC_IDE_PIO3_TWP | ||
261 | | SBC_IDE_PIO3_TCSW | ||
262 | | SBC_IDE_PIO3_TPM | ||
263 | | SBC_IDE_PIO3_TA; | ||
264 | /* set configuration for RCS2# */ | ||
265 | mem_stcfg |= TS_MASK; | ||
266 | mem_stcfg &= ~TS_MASK; | ||
267 | mem_stcfg &= ~TCSOE_MASK; | ||
268 | mem_stcfg &= ~TOECS_MASK; | ||
269 | mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS; | ||
270 | |||
271 | break; | ||
272 | |||
273 | case 4: | ||
274 | /* set timing parameters for RCS2# */ | ||
275 | mem_sttime = SBC_IDE_PIO4_TWCS | ||
276 | | SBC_IDE_PIO4_TCSH | ||
277 | | SBC_IDE_PIO4_TCSOFF | ||
278 | | SBC_IDE_PIO4_TWP | ||
279 | | SBC_IDE_PIO4_TCSW | ||
280 | | SBC_IDE_PIO4_TPM | ||
281 | | SBC_IDE_PIO4_TA; | ||
282 | /* set configuration for RCS2# */ | ||
283 | mem_stcfg &= ~TS_MASK; | ||
284 | mem_stcfg &= ~TCSOE_MASK; | ||
285 | mem_stcfg &= ~TOECS_MASK; | ||
286 | mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS; | ||
287 | break; | ||
288 | } | ||
289 | |||
290 | au_writel(mem_sttime,MEM_STTIME2); | ||
291 | au_writel(mem_stcfg,MEM_STCFG2); | ||
292 | |||
293 | spin_unlock_irqrestore(&ide_tune_drive_spin_lock, flags); | ||
294 | |||
295 | speed = pio + XFER_PIO_0; | ||
296 | ide_config_drive_speed(drive, speed); | ||
297 | } | 178 | } |
298 | 179 | ||
299 | static int auide_tune_chipset (ide_drive_t *drive, u8 speed) | 180 | static int auide_tune_chipset (ide_drive_t *drive, u8 speed) |
300 | { | 181 | { |
301 | u8 mode = 0; | 182 | int mem_sttime; |
302 | int mem_sttime; | 183 | int mem_stcfg; |
303 | int mem_stcfg; | 184 | unsigned long mode; |
304 | unsigned long flags; | 185 | |
305 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 186 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
306 | struct hd_driveid *id = drive->id; | 187 | if (ide_use_dma(drive)) |
307 | 188 | mode = ide_dma_speed(drive, 0); | |
308 | /* | ||
309 | * Now see what the current drive is capable of, | ||
310 | * selecting UDMA only if the mate said it was ok. | ||
311 | */ | ||
312 | if (id && (id->capability & 1) && drive->autodma && | ||
313 | !__ide_dma_bad_drive(drive)) { | ||
314 | if (!mode && (id->field_valid & 2) && (id->dma_mword & 7)) { | ||
315 | if (id->dma_mword & 4) | ||
316 | mode = XFER_MW_DMA_2; | ||
317 | else if (id->dma_mword & 2) | ||
318 | mode = XFER_MW_DMA_1; | ||
319 | else if (id->dma_mword & 1) | ||
320 | mode = XFER_MW_DMA_0; | ||
321 | } | ||
322 | } | ||
323 | #endif | 189 | #endif |
324 | 190 | ||
325 | spin_lock_irqsave(&ide_tune_chipset_spin_lock, flags); | 191 | mem_sttime = 0; |
192 | mem_stcfg = au_readl(MEM_STCFG2); | ||
326 | 193 | ||
327 | mem_sttime = 0; | 194 | if (speed >= XFER_PIO_0 && speed <= XFER_PIO_4) { |
328 | mem_stcfg = au_readl(MEM_STCFG2); | 195 | auide_tune_drive(drive, speed - XFER_PIO_0); |
329 | 196 | return 0; | |
330 | switch(speed) { | 197 | } |
331 | case XFER_PIO_4: | 198 | |
332 | case XFER_PIO_3: | 199 | switch(speed) { |
333 | case XFER_PIO_2: | ||
334 | case XFER_PIO_1: | ||
335 | case XFER_PIO_0: | ||
336 | auide_tune_drive(drive, (speed - XFER_PIO_0)); | ||
337 | break; | ||
338 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 200 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
339 | case XFER_MW_DMA_2: | 201 | case XFER_MW_DMA_2: |
340 | /* set timing parameters for RCS2# */ | 202 | mem_sttime = SBC_IDE_TIMING(MDMA2); |
341 | mem_sttime = SBC_IDE_MDMA2_TWCS | 203 | |
342 | | SBC_IDE_MDMA2_TCSH | 204 | /* set configuration for RCS2# */ |
343 | | SBC_IDE_MDMA2_TCSOFF | 205 | mem_stcfg &= ~TS_MASK; |
344 | | SBC_IDE_MDMA2_TWP | 206 | mem_stcfg &= ~TCSOE_MASK; |
345 | | SBC_IDE_MDMA2_TCSW | 207 | mem_stcfg &= ~TOECS_MASK; |
346 | | SBC_IDE_MDMA2_TPM | 208 | mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS; |
347 | | SBC_IDE_MDMA2_TA; | 209 | |
348 | /* set configuration for RCS2# */ | 210 | mode = XFER_MW_DMA_2; |
349 | mem_stcfg &= ~TS_MASK; | 211 | break; |
350 | mem_stcfg &= ~TCSOE_MASK; | 212 | case XFER_MW_DMA_1: |
351 | mem_stcfg &= ~TOECS_MASK; | 213 | mem_sttime = SBC_IDE_TIMING(MDMA1); |
352 | mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS; | 214 | |
353 | 215 | /* set configuration for RCS2# */ | |
354 | mode = XFER_MW_DMA_2; | 216 | mem_stcfg &= ~TS_MASK; |
355 | break; | 217 | mem_stcfg &= ~TCSOE_MASK; |
356 | case XFER_MW_DMA_1: | 218 | mem_stcfg &= ~TOECS_MASK; |
357 | /* set timing parameters for RCS2# */ | 219 | mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS; |
358 | mem_sttime = SBC_IDE_MDMA1_TWCS | 220 | |
359 | | SBC_IDE_MDMA1_TCSH | 221 | mode = XFER_MW_DMA_1; |
360 | | SBC_IDE_MDMA1_TCSOFF | 222 | break; |
361 | | SBC_IDE_MDMA1_TWP | 223 | case XFER_MW_DMA_0: |
362 | | SBC_IDE_MDMA1_TCSW | 224 | mem_sttime = SBC_IDE_TIMING(MDMA0); |
363 | | SBC_IDE_MDMA1_TPM | 225 | |
364 | | SBC_IDE_MDMA1_TA; | 226 | /* set configuration for RCS2# */ |
365 | /* set configuration for RCS2# */ | 227 | mem_stcfg |= TS_MASK; |
366 | mem_stcfg &= ~TS_MASK; | 228 | mem_stcfg &= ~TCSOE_MASK; |
367 | mem_stcfg &= ~TCSOE_MASK; | 229 | mem_stcfg &= ~TOECS_MASK; |
368 | mem_stcfg &= ~TOECS_MASK; | 230 | mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS; |
369 | mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS; | 231 | |
370 | 232 | mode = XFER_MW_DMA_0; | |
371 | mode = XFER_MW_DMA_1; | 233 | break; |
372 | break; | ||
373 | case XFER_MW_DMA_0: | ||
374 | /* set timing parameters for RCS2# */ | ||
375 | mem_sttime = SBC_IDE_MDMA0_TWCS | ||
376 | | SBC_IDE_MDMA0_TCSH | ||
377 | | SBC_IDE_MDMA0_TCSOFF | ||
378 | | SBC_IDE_MDMA0_TWP | ||
379 | | SBC_IDE_MDMA0_TCSW | ||
380 | | SBC_IDE_MDMA0_TPM | ||
381 | | SBC_IDE_MDMA0_TA; | ||
382 | /* set configuration for RCS2# */ | ||
383 | mem_stcfg |= TS_MASK; | ||
384 | mem_stcfg &= ~TCSOE_MASK; | ||
385 | mem_stcfg &= ~TOECS_MASK; | ||
386 | mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS; | ||
387 | |||
388 | mode = XFER_MW_DMA_0; | ||
389 | break; | ||
390 | #endif | 234 | #endif |
391 | default: | 235 | default: |
392 | return 1; | 236 | return 1; |
393 | } | 237 | } |
394 | 238 | ||
395 | /* | 239 | if (ide_config_drive_speed(drive, mode)) |
396 | * Tell the drive to switch to the new mode; abort on failure. | 240 | return 1; |
397 | */ | ||
398 | if (!mode || ide_config_drive_speed(drive, mode)) | ||
399 | { | ||
400 | return 1; /* failure */ | ||
401 | } | ||
402 | |||
403 | |||
404 | au_writel(mem_sttime,MEM_STTIME2); | ||
405 | au_writel(mem_stcfg,MEM_STCFG2); | ||
406 | 241 | ||
407 | spin_unlock_irqrestore(&ide_tune_chipset_spin_lock, flags); | 242 | au_writel(mem_sttime,MEM_STTIME2); |
243 | au_writel(mem_stcfg,MEM_STCFG2); | ||
408 | 244 | ||
409 | return 0; | 245 | return 0; |
410 | } | 246 | } |
411 | 247 | ||
412 | /* | 248 | /* |
413 | * Multi-Word DMA + DbDMA functions | 249 | * Multi-Word DMA + DbDMA functions |
414 | */ | 250 | */ |
415 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | ||
416 | 251 | ||
417 | static int in_drive_list(struct hd_driveid *id, | 252 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
418 | const struct drive_list_entry *drive_table) | ||
419 | { | ||
420 | for ( ; drive_table->id_model ; drive_table++){ | ||
421 | if ((!strcmp(drive_table->id_model, id->model)) && | ||
422 | ((strstr(drive_table->id_firmware, id->fw_rev)) || | ||
423 | (!strcmp(drive_table->id_firmware, "ALL"))) | ||
424 | ) | ||
425 | return 1; | ||
426 | } | ||
427 | return 0; | ||
428 | } | ||
429 | 253 | ||
430 | static int auide_build_sglist(ide_drive_t *drive, struct request *rq) | 254 | static int auide_build_sglist(ide_drive_t *drive, struct request *rq) |
431 | { | 255 | { |
432 | ide_hwif_t *hwif = drive->hwif; | 256 | ide_hwif_t *hwif = drive->hwif; |
433 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; | 257 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; |
434 | struct scatterlist *sg = hwif->sg_table; | 258 | struct scatterlist *sg = hwif->sg_table; |
435 | 259 | ||
436 | ide_map_sg(drive, rq); | 260 | ide_map_sg(drive, rq); |
437 | 261 | ||
438 | if (rq_data_dir(rq) == READ) | 262 | if (rq_data_dir(rq) == READ) |
439 | hwif->sg_dma_direction = DMA_FROM_DEVICE; | 263 | hwif->sg_dma_direction = DMA_FROM_DEVICE; |
440 | else | 264 | else |
441 | hwif->sg_dma_direction = DMA_TO_DEVICE; | 265 | hwif->sg_dma_direction = DMA_TO_DEVICE; |
442 | 266 | ||
443 | return dma_map_sg(ahwif->dev, sg, hwif->sg_nents, | 267 | return dma_map_sg(ahwif->dev, sg, hwif->sg_nents, |
444 | hwif->sg_dma_direction); | 268 | hwif->sg_dma_direction); |
445 | } | 269 | } |
446 | 270 | ||
447 | static int auide_build_dmatable(ide_drive_t *drive) | 271 | static int auide_build_dmatable(ide_drive_t *drive) |
448 | { | 272 | { |
449 | int i, iswrite, count = 0; | 273 | int i, iswrite, count = 0; |
450 | ide_hwif_t *hwif = HWIF(drive); | 274 | ide_hwif_t *hwif = HWIF(drive); |
451 | 275 | ||
452 | struct request *rq = HWGROUP(drive)->rq; | 276 | struct request *rq = HWGROUP(drive)->rq; |
453 | 277 | ||
454 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; | 278 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; |
455 | struct scatterlist *sg; | 279 | struct scatterlist *sg; |
456 | 280 | ||
457 | iswrite = (rq_data_dir(rq) == WRITE); | 281 | iswrite = (rq_data_dir(rq) == WRITE); |
458 | /* Save for interrupt context */ | 282 | /* Save for interrupt context */ |
459 | ahwif->drive = drive; | 283 | ahwif->drive = drive; |
460 | 284 | ||
461 | /* Build sglist */ | 285 | /* Build sglist */ |
462 | hwif->sg_nents = i = auide_build_sglist(drive, rq); | 286 | hwif->sg_nents = i = auide_build_sglist(drive, rq); |
463 | 287 | ||
464 | if (!i) | 288 | if (!i) |
465 | return 0; | 289 | return 0; |
466 | 290 | ||
467 | /* fill the descriptors */ | 291 | /* fill the descriptors */ |
468 | sg = hwif->sg_table; | 292 | sg = hwif->sg_table; |
469 | while (i && sg_dma_len(sg)) { | 293 | while (i && sg_dma_len(sg)) { |
470 | u32 cur_addr; | 294 | u32 cur_addr; |
471 | u32 cur_len; | 295 | u32 cur_len; |
472 | 296 | ||
473 | cur_addr = sg_dma_address(sg); | 297 | cur_addr = sg_dma_address(sg); |
474 | cur_len = sg_dma_len(sg); | 298 | cur_len = sg_dma_len(sg); |
475 | 299 | ||
476 | while (cur_len) { | 300 | while (cur_len) { |
477 | u32 flags = DDMA_FLAGS_NOIE; | 301 | u32 flags = DDMA_FLAGS_NOIE; |
478 | unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00; | 302 | unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00; |
479 | 303 | ||
480 | if (++count >= PRD_ENTRIES) { | 304 | if (++count >= PRD_ENTRIES) { |
481 | printk(KERN_WARNING "%s: DMA table too small\n", | 305 | printk(KERN_WARNING "%s: DMA table too small\n", |
482 | drive->name); | 306 | drive->name); |
483 | goto use_pio_instead; | 307 | goto use_pio_instead; |
484 | } | 308 | } |
485 | 309 | ||
486 | /* Lets enable intr for the last descriptor only */ | 310 | /* Lets enable intr for the last descriptor only */ |
487 | if (1==i) | 311 | if (1==i) |
488 | flags = DDMA_FLAGS_IE; | 312 | flags = DDMA_FLAGS_IE; |
489 | else | 313 | else |
490 | flags = DDMA_FLAGS_NOIE; | 314 | flags = DDMA_FLAGS_NOIE; |
491 | 315 | ||
492 | if (iswrite) { | 316 | if (iswrite) { |
493 | if(!put_source_flags(ahwif->tx_chan, | 317 | if(!put_source_flags(ahwif->tx_chan, |
494 | (void*)(page_address(sg->page) | 318 | (void*)(page_address(sg->page) |
495 | + sg->offset), | 319 | + sg->offset), |
496 | tc, flags)) { | 320 | tc, flags)) { |
497 | printk(KERN_ERR "%s failed %d\n", | 321 | printk(KERN_ERR "%s failed %d\n", |
498 | __FUNCTION__, __LINE__); | 322 | __FUNCTION__, __LINE__); |
499 | } | 323 | } |
500 | } else | 324 | } else |
501 | { | 325 | { |
502 | if(!put_dest_flags(ahwif->rx_chan, | 326 | if(!put_dest_flags(ahwif->rx_chan, |
503 | (void*)(page_address(sg->page) | 327 | (void*)(page_address(sg->page) |
504 | + sg->offset), | 328 | + sg->offset), |
505 | tc, flags)) { | 329 | tc, flags)) { |
506 | printk(KERN_ERR "%s failed %d\n", | 330 | printk(KERN_ERR "%s failed %d\n", |
507 | __FUNCTION__, __LINE__); | 331 | __FUNCTION__, __LINE__); |
508 | } | 332 | } |
509 | } | 333 | } |
510 | 334 | ||
511 | cur_addr += tc; | 335 | cur_addr += tc; |
512 | cur_len -= tc; | 336 | cur_len -= tc; |
513 | } | 337 | } |
514 | sg++; | 338 | sg++; |
515 | i--; | 339 | i--; |
516 | } | 340 | } |
517 | 341 | ||
518 | if (count) | 342 | if (count) |
519 | return 1; | 343 | return 1; |
520 | 344 | ||
521 | use_pio_instead: | 345 | use_pio_instead: |
522 | dma_unmap_sg(ahwif->dev, | 346 | dma_unmap_sg(ahwif->dev, |
523 | hwif->sg_table, | 347 | hwif->sg_table, |
524 | hwif->sg_nents, | 348 | hwif->sg_nents, |
525 | hwif->sg_dma_direction); | 349 | hwif->sg_dma_direction); |
526 | 350 | ||
527 | return 0; /* revert to PIO for this request */ | 351 | return 0; /* revert to PIO for this request */ |
528 | } | 352 | } |
529 | 353 | ||
530 | static int auide_dma_end(ide_drive_t *drive) | 354 | static int auide_dma_end(ide_drive_t *drive) |
531 | { | 355 | { |
532 | ide_hwif_t *hwif = HWIF(drive); | 356 | ide_hwif_t *hwif = HWIF(drive); |
533 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; | 357 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; |
534 | 358 | ||
535 | if (hwif->sg_nents) { | 359 | if (hwif->sg_nents) { |
536 | dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents, | 360 | dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents, |
537 | hwif->sg_dma_direction); | 361 | hwif->sg_dma_direction); |
538 | hwif->sg_nents = 0; | 362 | hwif->sg_nents = 0; |
539 | } | 363 | } |
540 | 364 | ||
541 | return 0; | 365 | return 0; |
542 | } | 366 | } |
543 | 367 | ||
544 | static void auide_dma_start(ide_drive_t *drive ) | 368 | static void auide_dma_start(ide_drive_t *drive ) |
545 | { | 369 | { |
546 | // printk("%s\n", __FUNCTION__); | ||
547 | } | 370 | } |
548 | 371 | ||
549 | ide_startstop_t auide_dma_intr(ide_drive_t *drive) | ||
550 | { | ||
551 | //printk("%s\n", __FUNCTION__); | ||
552 | |||
553 | u8 stat = 0, dma_stat = 0; | ||
554 | |||
555 | dma_stat = HWIF(drive)->ide_dma_end(drive); | ||
556 | stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */ | ||
557 | if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { | ||
558 | if (!dma_stat) { | ||
559 | struct request *rq = HWGROUP(drive)->rq; | ||
560 | |||
561 | ide_end_request(drive, 1, rq->nr_sectors); | ||
562 | return ide_stopped; | ||
563 | } | ||
564 | printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", | ||
565 | drive->name, dma_stat); | ||
566 | } | ||
567 | return ide_error(drive, "dma_intr", stat); | ||
568 | } | ||
569 | 372 | ||
570 | static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command) | 373 | static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command) |
571 | { | 374 | { |
572 | //printk("%s\n", __FUNCTION__); | 375 | /* issue cmd to drive */ |
573 | 376 | ide_execute_command(drive, command, &ide_dma_intr, | |
574 | /* issue cmd to drive */ | 377 | (2*WAIT_CMD), NULL); |
575 | ide_execute_command(drive, command, &auide_dma_intr, | ||
576 | (2*WAIT_CMD), NULL); | ||
577 | } | 378 | } |
578 | 379 | ||
579 | static int auide_dma_setup(ide_drive_t *drive) | 380 | static int auide_dma_setup(ide_drive_t *drive) |
580 | { | 381 | { |
581 | // printk("%s\n", __FUNCTION__); | 382 | struct request *rq = HWGROUP(drive)->rq; |
582 | |||
583 | if (drive->media != ide_disk) | ||
584 | return 1; | ||
585 | |||
586 | if (!auide_build_dmatable(drive)) | ||
587 | /* try PIO instead of DMA */ | ||
588 | return 1; | ||
589 | 383 | ||
590 | drive->waiting_for_dma = 1; | 384 | if (!auide_build_dmatable(drive)) { |
385 | ide_map_sg(drive, rq); | ||
386 | return 1; | ||
387 | } | ||
591 | 388 | ||
592 | return 0; | 389 | drive->waiting_for_dma = 1; |
390 | return 0; | ||
593 | } | 391 | } |
594 | 392 | ||
595 | static int auide_dma_check(ide_drive_t *drive) | 393 | static int auide_dma_check(ide_drive_t *drive) |
596 | { | 394 | { |
597 | // printk("%s\n", __FUNCTION__); | 395 | u8 speed; |
598 | 396 | ||
599 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 397 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
600 | if( !dbdma_init_done ){ | 398 | |
601 | auide_hwif.white_list = in_drive_list(drive->id, | 399 | if( dbdma_init_done == 0 ){ |
602 | dma_white_list); | 400 | auide_hwif.white_list = ide_in_drive_list(drive->id, |
603 | auide_hwif.black_list = in_drive_list(drive->id, | 401 | dma_white_list); |
604 | dma_black_list); | 402 | auide_hwif.black_list = ide_in_drive_list(drive->id, |
605 | auide_hwif.drive = drive; | 403 | dma_black_list); |
606 | auide_ddma_init(&auide_hwif); | 404 | auide_hwif.drive = drive; |
607 | dbdma_init_done = 1; | 405 | auide_ddma_init(&auide_hwif); |
608 | } | 406 | dbdma_init_done = 1; |
407 | } | ||
609 | #endif | 408 | #endif |
610 | 409 | ||
611 | /* Is the drive in our DMA black list? */ | 410 | /* Is the drive in our DMA black list? */ |
612 | if ( auide_hwif.black_list ) { | 411 | |
613 | drive->using_dma = 0; | 412 | if ( auide_hwif.black_list ) { |
614 | printk("%s found in dma_blacklist[]! Disabling DMA.\n", | 413 | drive->using_dma = 0; |
615 | drive->id->model); | 414 | |
616 | } | 415 | /* Borrowed the warning message from ide-dma.c */ |
617 | else | ||
618 | drive->using_dma = 1; | ||
619 | 416 | ||
620 | return HWIF(drive)->ide_dma_host_on(drive); | 417 | printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n", |
418 | drive->name, drive->id->model); | ||
419 | } | ||
420 | else | ||
421 | drive->using_dma = 1; | ||
422 | |||
423 | speed = ide_find_best_mode(drive, XFER_PIO | XFER_MWDMA); | ||
424 | |||
425 | if (drive->autodma && (speed & XFER_MODE) != XFER_PIO) | ||
426 | return HWIF(drive)->ide_dma_on(drive); | ||
427 | |||
428 | return HWIF(drive)->ide_dma_off_quietly(drive); | ||
621 | } | 429 | } |
622 | 430 | ||
623 | static int auide_dma_test_irq(ide_drive_t *drive) | 431 | static int auide_dma_test_irq(ide_drive_t *drive) |
624 | { | 432 | { |
625 | // printk("%s\n", __FUNCTION__); | 433 | if (drive->waiting_for_dma == 0) |
626 | 434 | printk(KERN_WARNING "%s: ide_dma_test_irq \ | |
627 | if (!drive->waiting_for_dma) | ||
628 | printk(KERN_WARNING "%s: ide_dma_test_irq \ | ||
629 | called while not waiting\n", drive->name); | 435 | called while not waiting\n", drive->name); |
630 | 436 | ||
631 | /* If dbdma didn't execute the STOP command yet, the | 437 | /* If dbdma didn't execute the STOP command yet, the |
632 | * active bit is still set | 438 | * active bit is still set |
633 | */ | 439 | */ |
634 | drive->waiting_for_dma++; | 440 | drive->waiting_for_dma++; |
635 | if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) { | 441 | if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) { |
636 | printk(KERN_WARNING "%s: timeout waiting for ddma to \ | 442 | printk(KERN_WARNING "%s: timeout waiting for ddma to \ |
637 | complete\n", drive->name); | 443 | complete\n", drive->name); |
638 | return 1; | 444 | return 1; |
639 | } | 445 | } |
640 | udelay(10); | 446 | udelay(10); |
641 | return 0; | 447 | return 0; |
642 | } | 448 | } |
643 | 449 | ||
644 | static int auide_dma_host_on(ide_drive_t *drive) | 450 | static int auide_dma_host_on(ide_drive_t *drive) |
645 | { | 451 | { |
646 | // printk("%s\n", __FUNCTION__); | 452 | return 0; |
647 | return 0; | ||
648 | } | 453 | } |
649 | 454 | ||
650 | static int auide_dma_on(ide_drive_t *drive) | 455 | static int auide_dma_on(ide_drive_t *drive) |
651 | { | 456 | { |
652 | // printk("%s\n", __FUNCTION__); | 457 | drive->using_dma = 1; |
653 | drive->using_dma = 1; | 458 | return auide_dma_host_on(drive); |
654 | return auide_dma_host_on(drive); | ||
655 | } | 459 | } |
656 | 460 | ||
657 | 461 | ||
658 | static int auide_dma_host_off(ide_drive_t *drive) | 462 | static int auide_dma_host_off(ide_drive_t *drive) |
659 | { | 463 | { |
660 | // printk("%s\n", __FUNCTION__); | 464 | return 0; |
661 | return 0; | ||
662 | } | 465 | } |
663 | 466 | ||
664 | static int auide_dma_off_quietly(ide_drive_t *drive) | 467 | static int auide_dma_off_quietly(ide_drive_t *drive) |
665 | { | 468 | { |
666 | // printk("%s\n", __FUNCTION__); | 469 | drive->using_dma = 0; |
667 | drive->using_dma = 0; | 470 | return auide_dma_host_off(drive); |
668 | return auide_dma_host_off(drive); | ||
669 | } | 471 | } |
670 | 472 | ||
671 | static int auide_dma_lostirq(ide_drive_t *drive) | 473 | static int auide_dma_lostirq(ide_drive_t *drive) |
672 | { | 474 | { |
673 | // printk("%s\n", __FUNCTION__); | 475 | printk(KERN_ERR "%s: IRQ lost\n", drive->name); |
674 | 476 | return 0; | |
675 | printk(KERN_ERR "%s: IRQ lost\n", drive->name); | ||
676 | return 0; | ||
677 | } | 477 | } |
678 | 478 | ||
679 | static void auide_ddma_tx_callback(int irq, void *param, struct pt_regs *regs) | 479 | static void auide_ddma_tx_callback(int irq, void *param, struct pt_regs *regs) |
680 | { | 480 | { |
681 | // printk("%s\n", __FUNCTION__); | 481 | _auide_hwif *ahwif = (_auide_hwif*)param; |
682 | 482 | ahwif->drive->waiting_for_dma = 0; | |
683 | _auide_hwif *ahwif = (_auide_hwif*)param; | ||
684 | ahwif->drive->waiting_for_dma = 0; | ||
685 | return; | ||
686 | } | 483 | } |
687 | 484 | ||
688 | static void auide_ddma_rx_callback(int irq, void *param, struct pt_regs *regs) | 485 | static void auide_ddma_rx_callback(int irq, void *param, struct pt_regs *regs) |
689 | { | 486 | { |
690 | // printk("%s\n", __FUNCTION__); | 487 | _auide_hwif *ahwif = (_auide_hwif*)param; |
488 | ahwif->drive->waiting_for_dma = 0; | ||
489 | } | ||
490 | |||
491 | #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ | ||
691 | 492 | ||
692 | _auide_hwif *ahwif = (_auide_hwif*)param; | 493 | static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags) |
693 | ahwif->drive->waiting_for_dma = 0; | 494 | { |
694 | return; | 495 | dev->dev_id = dev_id; |
496 | dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
497 | dev->dev_intlevel = 0; | ||
498 | dev->dev_intpolarity = 0; | ||
499 | dev->dev_tsize = tsize; | ||
500 | dev->dev_devwidth = devwidth; | ||
501 | dev->dev_flags = flags; | ||
695 | } | 502 | } |
503 | |||
504 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | ||
696 | 505 | ||
697 | static int auide_dma_timeout(ide_drive_t *drive) | 506 | static int auide_dma_timeout(ide_drive_t *drive) |
698 | { | 507 | { |
699 | // printk("%s\n", __FUNCTION__); | 508 | // printk("%s\n", __FUNCTION__); |
700 | 509 | ||
701 | printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); | 510 | printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); |
702 | 511 | ||
703 | if (HWIF(drive)->ide_dma_test_irq(drive)) | 512 | if (HWIF(drive)->ide_dma_test_irq(drive)) |
704 | return 0; | 513 | return 0; |
705 | 514 | ||
706 | return HWIF(drive)->ide_dma_end(drive); | 515 | return HWIF(drive)->ide_dma_end(drive); |
707 | } | 516 | } |
708 | #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ | 517 | |
709 | 518 | ||
519 | static int auide_ddma_init(_auide_hwif *auide) { | ||
520 | |||
521 | dbdev_tab_t source_dev_tab, target_dev_tab; | ||
522 | u32 dev_id, tsize, devwidth, flags; | ||
523 | ide_hwif_t *hwif = auide->hwif; | ||
710 | 524 | ||
711 | static int auide_ddma_init( _auide_hwif *auide ) | 525 | dev_id = AU1XXX_ATA_DDMA_REQ; |
712 | { | ||
713 | // printk("%s\n", __FUNCTION__); | ||
714 | 526 | ||
715 | dbdev_tab_t source_dev_tab; | 527 | if (auide->white_list || auide->black_list) { |
716 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | 528 | tsize = 8; |
717 | dbdev_tab_t target_dev_tab; | 529 | devwidth = 32; |
718 | ide_hwif_t *hwif = auide->hwif; | 530 | } |
719 | char warning_output [2][80]; | 531 | else { |
720 | int i; | 532 | tsize = 1; |
721 | #endif | 533 | devwidth = 16; |
534 | |||
535 | printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model); | ||
536 | printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'"); | ||
537 | } | ||
722 | 538 | ||
723 | /* Add our custom device to DDMA device table */ | 539 | #ifdef IDE_AU1XXX_BURSTMODE |
724 | /* Create our new device entries in the table */ | 540 | flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE; |
725 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | ||
726 | source_dev_tab.dev_id = AU1XXX_ATA_DDMA_REQ; | ||
727 | |||
728 | if( auide->white_list || auide->black_list ){ | ||
729 | source_dev_tab.dev_tsize = 8; | ||
730 | source_dev_tab.dev_devwidth = 32; | ||
731 | source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
732 | source_dev_tab.dev_intlevel = 0; | ||
733 | source_dev_tab.dev_intpolarity = 0; | ||
734 | |||
735 | /* init device table for target - static bus controller - */ | ||
736 | target_dev_tab.dev_id = DSCR_CMD0_ALWAYS; | ||
737 | target_dev_tab.dev_tsize = 8; | ||
738 | target_dev_tab.dev_devwidth = 32; | ||
739 | target_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
740 | target_dev_tab.dev_intlevel = 0; | ||
741 | target_dev_tab.dev_intpolarity = 0; | ||
742 | target_dev_tab.dev_flags = DEV_FLAGS_ANYUSE; | ||
743 | } | ||
744 | else{ | ||
745 | source_dev_tab.dev_tsize = 1; | ||
746 | source_dev_tab.dev_devwidth = 16; | ||
747 | source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
748 | source_dev_tab.dev_intlevel = 0; | ||
749 | source_dev_tab.dev_intpolarity = 0; | ||
750 | |||
751 | /* init device table for target - static bus controller - */ | ||
752 | target_dev_tab.dev_id = DSCR_CMD0_ALWAYS; | ||
753 | target_dev_tab.dev_tsize = 1; | ||
754 | target_dev_tab.dev_devwidth = 16; | ||
755 | target_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
756 | target_dev_tab.dev_intlevel = 0; | ||
757 | target_dev_tab.dev_intpolarity = 0; | ||
758 | target_dev_tab.dev_flags = DEV_FLAGS_ANYUSE; | ||
759 | |||
760 | sprintf(&warning_output[0][0], | ||
761 | "%s is not on ide driver white list.", | ||
762 | auide_hwif.drive->id->model); | ||
763 | for ( i=strlen(&warning_output[0][0]) ; i<76; i++ ){ | ||
764 | sprintf(&warning_output[0][i]," "); | ||
765 | } | ||
766 | |||
767 | sprintf(&warning_output[1][0], | ||
768 | "To add %s please read 'Documentation/mips/AU1xxx_IDE.README'.", | ||
769 | auide_hwif.drive->id->model); | ||
770 | for ( i=strlen(&warning_output[1][0]) ; i<76; i++ ){ | ||
771 | sprintf(&warning_output[1][i]," "); | ||
772 | } | ||
773 | |||
774 | printk("\n****************************************"); | ||
775 | printk("****************************************\n"); | ||
776 | printk("* %s *\n",&warning_output[0][0]); | ||
777 | printk("* Switch to safe MWDMA Mode! "); | ||
778 | printk(" *\n"); | ||
779 | printk("* %s *\n",&warning_output[1][0]); | ||
780 | printk("****************************************"); | ||
781 | printk("****************************************\n\n"); | ||
782 | } | ||
783 | #else | 541 | #else |
784 | source_dev_tab.dev_id = DSCR_CMD0_ALWAYS; | 542 | flags = DEV_FLAGS_SYNC; |
785 | source_dev_tab.dev_tsize = 8; | ||
786 | source_dev_tab.dev_devwidth = 32; | ||
787 | source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
788 | source_dev_tab.dev_intlevel = 0; | ||
789 | source_dev_tab.dev_intpolarity = 0; | ||
790 | #endif | 543 | #endif |
791 | 544 | ||
792 | #if CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON | 545 | /* setup dev_tab for tx channel */ |
793 | /* set flags for tx channel */ | 546 | auide_init_dbdma_dev( &source_dev_tab, |
794 | source_dev_tab.dev_flags = DEV_FLAGS_OUT | 547 | dev_id, |
795 | | DEV_FLAGS_SYNC | 548 | tsize, devwidth, DEV_FLAGS_OUT | flags); |
796 | | DEV_FLAGS_BURSTABLE; | 549 | auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); |
797 | auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); | 550 | |
798 | /* set flags for rx channel */ | 551 | auide_init_dbdma_dev( &source_dev_tab, |
799 | source_dev_tab.dev_flags = DEV_FLAGS_IN | 552 | dev_id, |
800 | | DEV_FLAGS_SYNC | 553 | tsize, devwidth, DEV_FLAGS_IN | flags); |
801 | | DEV_FLAGS_BURSTABLE; | 554 | auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); |
802 | auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); | 555 | |
556 | /* We also need to add a target device for the DMA */ | ||
557 | auide_init_dbdma_dev( &target_dev_tab, | ||
558 | (u32)DSCR_CMD0_ALWAYS, | ||
559 | tsize, devwidth, DEV_FLAGS_ANYUSE); | ||
560 | auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab); | ||
561 | |||
562 | /* Get a channel for TX */ | ||
563 | auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id, | ||
564 | auide->tx_dev_id, | ||
565 | auide_ddma_tx_callback, | ||
566 | (void*)auide); | ||
567 | |||
568 | /* Get a channel for RX */ | ||
569 | auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, | ||
570 | auide->target_dev_id, | ||
571 | auide_ddma_rx_callback, | ||
572 | (void*)auide); | ||
573 | |||
574 | auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan, | ||
575 | NUM_DESCRIPTORS); | ||
576 | auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan, | ||
577 | NUM_DESCRIPTORS); | ||
578 | |||
579 | hwif->dmatable_cpu = dma_alloc_coherent(auide->dev, | ||
580 | PRD_ENTRIES * PRD_BYTES, /* 1 Page */ | ||
581 | &hwif->dmatable_dma, GFP_KERNEL); | ||
582 | |||
583 | au1xxx_dbdma_start( auide->tx_chan ); | ||
584 | au1xxx_dbdma_start( auide->rx_chan ); | ||
585 | |||
586 | return 0; | ||
587 | } | ||
803 | #else | 588 | #else |
804 | /* set flags for tx channel */ | 589 | |
805 | source_dev_tab.dev_flags = DEV_FLAGS_OUT | DEV_FLAGS_SYNC; | 590 | static int auide_ddma_init( _auide_hwif *auide ) |
806 | auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); | 591 | { |
807 | /* set flags for rx channel */ | 592 | dbdev_tab_t source_dev_tab; |
808 | source_dev_tab.dev_flags = DEV_FLAGS_IN | DEV_FLAGS_SYNC; | 593 | int flags; |
809 | auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); | ||
810 | #endif | ||
811 | 594 | ||
812 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | 595 | #ifdef IDE_AU1XXX_BURSTMODE |
813 | 596 | flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE; | |
814 | auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab); | 597 | #else |
815 | 598 | flags = DEV_FLAGS_SYNC; | |
816 | /* Get a channel for TX */ | ||
817 | auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id, | ||
818 | auide->tx_dev_id, | ||
819 | auide_ddma_tx_callback, | ||
820 | (void*)auide); | ||
821 | /* Get a channel for RX */ | ||
822 | auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, | ||
823 | auide->target_dev_id, | ||
824 | auide_ddma_rx_callback, | ||
825 | (void*)auide); | ||
826 | #else /* CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA */ | ||
827 | /* | ||
828 | * Note: if call back is not enabled, update ctp->cur_ptr manually | ||
829 | */ | ||
830 | auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS, | ||
831 | auide->tx_dev_id, | ||
832 | NULL, | ||
833 | (void*)auide); | ||
834 | auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, | ||
835 | DSCR_CMD0_ALWAYS, | ||
836 | NULL, | ||
837 | (void*)auide); | ||
838 | #endif | 599 | #endif |
839 | auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan, | ||
840 | NUM_DESCRIPTORS); | ||
841 | auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan, | ||
842 | NUM_DESCRIPTORS); | ||
843 | 600 | ||
844 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | 601 | /* setup dev_tab for tx channel */ |
845 | hwif->dmatable_cpu = dma_alloc_coherent(auide->dev, | 602 | auide_init_dbdma_dev( &source_dev_tab, |
846 | PRD_ENTRIES * PRD_BYTES, /* 1 Page */ | 603 | (u32)DSCR_CMD0_ALWAYS, |
847 | &hwif->dmatable_dma, GFP_KERNEL); | 604 | 8, 32, DEV_FLAGS_OUT | flags); |
848 | 605 | auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); | |
849 | auide->sg_table = kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES, | 606 | |
850 | GFP_KERNEL|GFP_DMA); | 607 | auide_init_dbdma_dev( &source_dev_tab, |
851 | if (auide->sg_table == NULL) { | 608 | (u32)DSCR_CMD0_ALWAYS, |
852 | return -ENOMEM; | 609 | 8, 32, DEV_FLAGS_IN | flags); |
853 | } | 610 | auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); |
854 | #endif | 611 | |
855 | au1xxx_dbdma_start( auide->tx_chan ); | 612 | /* Get a channel for TX */ |
856 | au1xxx_dbdma_start( auide->rx_chan ); | 613 | auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS, |
857 | return 0; | 614 | auide->tx_dev_id, |
615 | NULL, | ||
616 | (void*)auide); | ||
617 | |||
618 | /* Get a channel for RX */ | ||
619 | auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, | ||
620 | DSCR_CMD0_ALWAYS, | ||
621 | NULL, | ||
622 | (void*)auide); | ||
623 | |||
624 | auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan, | ||
625 | NUM_DESCRIPTORS); | ||
626 | auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan, | ||
627 | NUM_DESCRIPTORS); | ||
628 | |||
629 | au1xxx_dbdma_start( auide->tx_chan ); | ||
630 | au1xxx_dbdma_start( auide->rx_chan ); | ||
631 | |||
632 | return 0; | ||
858 | } | 633 | } |
634 | #endif | ||
859 | 635 | ||
860 | static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) | 636 | static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) |
861 | { | 637 | { |
862 | int i; | 638 | int i; |
863 | #define ide_ioreg_t unsigned long | 639 | unsigned long *ata_regs = hw->io_ports; |
864 | ide_ioreg_t *ata_regs = hw->io_ports; | 640 | |
865 | 641 | /* FIXME? */ | |
866 | /* fixme */ | 642 | for (i = 0; i < IDE_CONTROL_OFFSET; i++) { |
867 | for (i = 0; i < IDE_CONTROL_OFFSET; i++) { | 643 | *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET); |
868 | *ata_regs++ = (ide_ioreg_t) ahwif->regbase | 644 | } |
869 | + (ide_ioreg_t)(i << AU1XXX_ATA_REG_OFFSET); | 645 | |
870 | } | 646 | /* set the Alternative Status register */ |
871 | 647 | *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET); | |
872 | /* set the Alternative Status register */ | ||
873 | *ata_regs = (ide_ioreg_t) ahwif->regbase | ||
874 | + (ide_ioreg_t)(14 << AU1XXX_ATA_REG_OFFSET); | ||
875 | } | 648 | } |
876 | 649 | ||
877 | static int au_ide_probe(struct device *dev) | 650 | static int au_ide_probe(struct device *dev) |
878 | { | 651 | { |
879 | struct platform_device *pdev = to_platform_device(dev); | 652 | struct platform_device *pdev = to_platform_device(dev); |
880 | _auide_hwif *ahwif = &auide_hwif; | 653 | _auide_hwif *ahwif = &auide_hwif; |
881 | ide_hwif_t *hwif; | 654 | ide_hwif_t *hwif; |
882 | struct resource *res; | 655 | struct resource *res; |
883 | int ret = 0; | 656 | int ret = 0; |
884 | 657 | ||
885 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | 658 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) |
886 | char *mode = "MWDMA2"; | 659 | char *mode = "MWDMA2"; |
887 | #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) | 660 | #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) |
888 | char *mode = "PIO+DDMA(offload)"; | 661 | char *mode = "PIO+DDMA(offload)"; |
889 | #endif | 662 | #endif |
890 | 663 | ||
891 | memset(&auide_hwif, 0, sizeof(_auide_hwif)); | 664 | memset(&auide_hwif, 0, sizeof(_auide_hwif)); |
892 | auide_hwif.dev = 0; | 665 | auide_hwif.dev = 0; |
893 | 666 | ||
894 | ahwif->dev = dev; | 667 | ahwif->dev = dev; |
895 | ahwif->irq = platform_get_irq(pdev, 0); | 668 | ahwif->irq = platform_get_irq(pdev, 0); |
@@ -902,11 +675,11 @@ static int au_ide_probe(struct device *dev) | |||
902 | goto out; | 675 | goto out; |
903 | } | 676 | } |
904 | 677 | ||
905 | if (!request_mem_region (res->start, res->end-res->start, pdev->name)) { | 678 | if (!request_mem_region (res->start, res->end-res->start, pdev->name)) { |
906 | pr_debug("%s: request_mem_region failed\n", DRV_NAME); | 679 | pr_debug("%s: request_mem_region failed\n", DRV_NAME); |
907 | ret = -EBUSY; | 680 | ret = -EBUSY; |
908 | goto out; | 681 | goto out; |
909 | } | 682 | } |
910 | 683 | ||
911 | ahwif->regbase = (u32)ioremap(res->start, res->end-res->start); | 684 | ahwif->regbase = (u32)ioremap(res->start, res->end-res->start); |
912 | if (ahwif->regbase == 0) { | 685 | if (ahwif->regbase == 0) { |
@@ -914,130 +687,92 @@ static int au_ide_probe(struct device *dev) | |||
914 | goto out; | 687 | goto out; |
915 | } | 688 | } |
916 | 689 | ||
917 | hwif = &ide_hwifs[pdev->id]; | 690 | /* FIXME: This might possibly break PCMCIA IDE devices */ |
691 | |||
692 | hwif = &ide_hwifs[pdev->id]; | ||
918 | hw_regs_t *hw = &hwif->hw; | 693 | hw_regs_t *hw = &hwif->hw; |
919 | hwif->irq = hw->irq = ahwif->irq; | 694 | hwif->irq = hw->irq = ahwif->irq; |
920 | hwif->chipset = ide_au1xxx; | 695 | hwif->chipset = ide_au1xxx; |
921 | 696 | ||
922 | auide_setup_ports(hw, ahwif); | 697 | auide_setup_ports(hw, ahwif); |
923 | memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports)); | 698 | memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports)); |
924 | 699 | ||
925 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ | 700 | hwif->ultra_mask = 0x0; /* Disable Ultra DMA */ |
926 | hwif->rqsize = CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ; | ||
927 | hwif->rqsize = ((hwif->rqsize > AU1XXX_ATA_RQSIZE) | ||
928 | || (hwif->rqsize < 32)) ? AU1XXX_ATA_RQSIZE : hwif->rqsize; | ||
929 | #else /* if kernel config is not set */ | ||
930 | hwif->rqsize = AU1XXX_ATA_RQSIZE; | ||
931 | #endif | ||
932 | |||
933 | hwif->ultra_mask = 0x0; /* Disable Ultra DMA */ | ||
934 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 701 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
935 | hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */ | 702 | hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */ |
936 | hwif->swdma_mask = 0x07; | 703 | hwif->swdma_mask = 0x00; |
937 | #else | 704 | #else |
938 | hwif->mwdma_mask = 0x0; | 705 | hwif->mwdma_mask = 0x0; |
939 | hwif->swdma_mask = 0x0; | 706 | hwif->swdma_mask = 0x0; |
707 | #endif | ||
708 | |||
709 | hwif->noprobe = 0; | ||
710 | hwif->drives[0].unmask = 1; | ||
711 | hwif->drives[1].unmask = 1; | ||
712 | |||
713 | /* hold should be on in all cases */ | ||
714 | hwif->hold = 1; | ||
715 | hwif->mmio = 2; | ||
716 | |||
717 | /* If the user has selected DDMA assisted copies, | ||
718 | then set up a few local I/O function entry points | ||
719 | */ | ||
720 | |||
721 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA | ||
722 | hwif->INSW = auide_insw; | ||
723 | hwif->OUTSW = auide_outsw; | ||
940 | #endif | 724 | #endif |
941 | //hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET]; | 725 | |
942 | hwif->noprobe = 0; | 726 | hwif->tuneproc = &auide_tune_drive; |
943 | hwif->drives[0].unmask = 1; | 727 | hwif->speedproc = &auide_tune_chipset; |
944 | hwif->drives[1].unmask = 1; | ||
945 | |||
946 | /* hold should be on in all cases */ | ||
947 | hwif->hold = 1; | ||
948 | hwif->mmio = 2; | ||
949 | |||
950 | /* set up local I/O function entry points */ | ||
951 | hwif->INB = auide_inb; | ||
952 | hwif->INW = auide_inw; | ||
953 | hwif->INL = auide_inl; | ||
954 | hwif->INSW = auide_insw; | ||
955 | hwif->INSL = auide_insl; | ||
956 | hwif->OUTB = auide_outb; | ||
957 | hwif->OUTBSYNC = auide_outbsync; | ||
958 | hwif->OUTW = auide_outw; | ||
959 | hwif->OUTL = auide_outl; | ||
960 | hwif->OUTSW = auide_outsw; | ||
961 | hwif->OUTSL = auide_outsl; | ||
962 | |||
963 | hwif->tuneproc = &auide_tune_drive; | ||
964 | hwif->speedproc = &auide_tune_chipset; | ||
965 | 728 | ||
966 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 729 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
967 | hwif->ide_dma_off_quietly = &auide_dma_off_quietly; | 730 | hwif->ide_dma_off_quietly = &auide_dma_off_quietly; |
968 | hwif->ide_dma_timeout = &auide_dma_timeout; | 731 | hwif->ide_dma_timeout = &auide_dma_timeout; |
969 | 732 | ||
970 | hwif->ide_dma_check = &auide_dma_check; | 733 | hwif->ide_dma_check = &auide_dma_check; |
971 | hwif->dma_exec_cmd = &auide_dma_exec_cmd; | 734 | hwif->dma_exec_cmd = &auide_dma_exec_cmd; |
972 | hwif->dma_start = &auide_dma_start; | 735 | hwif->dma_start = &auide_dma_start; |
973 | hwif->ide_dma_end = &auide_dma_end; | 736 | hwif->ide_dma_end = &auide_dma_end; |
974 | hwif->dma_setup = &auide_dma_setup; | 737 | hwif->dma_setup = &auide_dma_setup; |
975 | hwif->ide_dma_test_irq = &auide_dma_test_irq; | 738 | hwif->ide_dma_test_irq = &auide_dma_test_irq; |
976 | hwif->ide_dma_host_off = &auide_dma_host_off; | 739 | hwif->ide_dma_host_off = &auide_dma_host_off; |
977 | hwif->ide_dma_host_on = &auide_dma_host_on; | 740 | hwif->ide_dma_host_on = &auide_dma_host_on; |
978 | hwif->ide_dma_lostirq = &auide_dma_lostirq; | 741 | hwif->ide_dma_lostirq = &auide_dma_lostirq; |
979 | hwif->ide_dma_on = &auide_dma_on; | 742 | hwif->ide_dma_on = &auide_dma_on; |
980 | 743 | ||
981 | hwif->autodma = 1; | 744 | hwif->autodma = 1; |
982 | hwif->drives[0].autodma = hwif->autodma; | 745 | hwif->drives[0].autodma = hwif->autodma; |
983 | hwif->drives[1].autodma = hwif->autodma; | 746 | hwif->drives[1].autodma = hwif->autodma; |
984 | hwif->atapi_dma = 1; | 747 | hwif->atapi_dma = 1; |
985 | hwif->drives[0].using_dma = 1; | 748 | |
986 | hwif->drives[1].using_dma = 1; | ||
987 | #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ | 749 | #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ |
988 | hwif->autodma = 0; | 750 | hwif->autodma = 0; |
989 | hwif->channel = 0; | 751 | hwif->channel = 0; |
990 | hwif->hold = 1; | 752 | hwif->hold = 1; |
991 | hwif->select_data = 0; /* no chipset-specific code */ | 753 | hwif->select_data = 0; /* no chipset-specific code */ |
992 | hwif->config_data = 0; /* no chipset-specific code */ | 754 | hwif->config_data = 0; /* no chipset-specific code */ |
993 | 755 | ||
994 | hwif->drives[0].autodma = 0; | 756 | hwif->drives[0].autodma = 0; |
995 | hwif->drives[0].drive_data = 0; /* no drive data */ | 757 | hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */ |
996 | hwif->drives[0].using_dma = 0; | ||
997 | hwif->drives[0].waiting_for_dma = 0; | ||
998 | hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */ | ||
999 | /* secondary hdd not supported */ | ||
1000 | hwif->drives[1].autodma = 0; | ||
1001 | |||
1002 | hwif->drives[1].drive_data = 0; | ||
1003 | hwif->drives[1].using_dma = 0; | ||
1004 | hwif->drives[1].waiting_for_dma = 0; | ||
1005 | hwif->drives[1].autotune = 2; /* 1=autotune, 2=noautotune, 0=default */ | ||
1006 | #endif | ||
1007 | hwif->drives[0].io_32bit = 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */ | ||
1008 | hwif->drives[1].io_32bit = 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */ | ||
1009 | |||
1010 | /*Register Driver with PM Framework*/ | ||
1011 | #ifdef CONFIG_PM | ||
1012 | auide_hwif.pm.lock = SPIN_LOCK_UNLOCKED; | ||
1013 | auide_hwif.pm.stopped = 0; | ||
1014 | |||
1015 | auide_hwif.pm.dev = new_au1xxx_power_device( "ide", | ||
1016 | &au1200ide_pm_callback, | ||
1017 | NULL); | ||
1018 | if ( auide_hwif.pm.dev == NULL ) | ||
1019 | printk(KERN_INFO "Unable to create a power management \ | ||
1020 | device entry for the au1200-IDE.\n"); | ||
1021 | else | ||
1022 | printk(KERN_INFO "Power management device entry for the \ | ||
1023 | au1200-IDE loaded.\n"); | ||
1024 | #endif | 758 | #endif |
759 | hwif->drives[0].no_io_32bit = 1; | ||
1025 | 760 | ||
1026 | auide_hwif.hwif = hwif; | 761 | auide_hwif.hwif = hwif; |
1027 | hwif->hwif_data = &auide_hwif; | 762 | hwif->hwif_data = &auide_hwif; |
1028 | 763 | ||
1029 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA | 764 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA |
1030 | auide_ddma_init(&auide_hwif); | 765 | auide_ddma_init(&auide_hwif); |
1031 | dbdma_init_done = 1; | 766 | dbdma_init_done = 1; |
1032 | #endif | 767 | #endif |
1033 | 768 | ||
1034 | probe_hwif_init(hwif); | 769 | probe_hwif_init(hwif); |
1035 | dev_set_drvdata(dev, hwif); | 770 | dev_set_drvdata(dev, hwif); |
1036 | 771 | ||
1037 | printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); | 772 | printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); |
1038 | 773 | ||
1039 | out: | 774 | out: |
1040 | return ret; | 775 | return ret; |
1041 | } | 776 | } |
1042 | 777 | ||
1043 | static int au_ide_remove(struct device *dev) | 778 | static int au_ide_remove(struct device *dev) |
@@ -1045,7 +780,7 @@ static int au_ide_remove(struct device *dev) | |||
1045 | struct platform_device *pdev = to_platform_device(dev); | 780 | struct platform_device *pdev = to_platform_device(dev); |
1046 | struct resource *res; | 781 | struct resource *res; |
1047 | ide_hwif_t *hwif = dev_get_drvdata(dev); | 782 | ide_hwif_t *hwif = dev_get_drvdata(dev); |
1048 | _auide_hwif *ahwif = &auide_hwif; | 783 | _auide_hwif *ahwif = &auide_hwif; |
1049 | 784 | ||
1050 | ide_unregister(hwif - ide_hwifs); | 785 | ide_unregister(hwif - ide_hwifs); |
1051 | 786 | ||
@@ -1069,180 +804,11 @@ static int __init au_ide_init(void) | |||
1069 | return driver_register(&au1200_ide_driver); | 804 | return driver_register(&au1200_ide_driver); |
1070 | } | 805 | } |
1071 | 806 | ||
1072 | static void __init au_ide_exit(void) | 807 | static void __exit au_ide_exit(void) |
1073 | { | 808 | { |
1074 | driver_unregister(&au1200_ide_driver); | 809 | driver_unregister(&au1200_ide_driver); |
1075 | } | 810 | } |
1076 | 811 | ||
1077 | #ifdef CONFIG_PM | ||
1078 | int au1200ide_pm_callback( au1xxx_power_dev_t *dev,\ | ||
1079 | au1xxx_request_t request, void *data) { | ||
1080 | |||
1081 | unsigned int d, err = 0; | ||
1082 | unsigned long flags; | ||
1083 | |||
1084 | spin_lock_irqsave(auide_hwif.pm.lock, flags); | ||
1085 | |||
1086 | switch (request){ | ||
1087 | case AU1XXX_PM_SLEEP: | ||
1088 | err = au1xxxide_pm_sleep(dev); | ||
1089 | break; | ||
1090 | case AU1XXX_PM_WAKEUP: | ||
1091 | d = *((unsigned int*)data); | ||
1092 | if ( d > 0 && d <= 99) { | ||
1093 | err = au1xxxide_pm_standby(dev); | ||
1094 | } | ||
1095 | else { | ||
1096 | err = au1xxxide_pm_resume(dev); | ||
1097 | } | ||
1098 | break; | ||
1099 | case AU1XXX_PM_GETSTATUS: | ||
1100 | err = au1xxxide_pm_getstatus(dev); | ||
1101 | break; | ||
1102 | case AU1XXX_PM_ACCESS: | ||
1103 | err = au1xxxide_pm_access(dev); | ||
1104 | break; | ||
1105 | case AU1XXX_PM_IDLE: | ||
1106 | err = au1xxxide_pm_idle(dev); | ||
1107 | break; | ||
1108 | case AU1XXX_PM_CLEANUP: | ||
1109 | err = au1xxxide_pm_cleanup(dev); | ||
1110 | break; | ||
1111 | default: | ||
1112 | err = -1; | ||
1113 | break; | ||
1114 | } | ||
1115 | |||
1116 | spin_unlock_irqrestore(auide_hwif.pm.lock, flags); | ||
1117 | |||
1118 | return err; | ||
1119 | } | ||
1120 | |||
1121 | static int au1xxxide_pm_standby( au1xxx_power_dev_t *dev ) { | ||
1122 | return 0; | ||
1123 | } | ||
1124 | |||
1125 | static int au1xxxide_pm_sleep( au1xxx_power_dev_t *dev ) { | ||
1126 | |||
1127 | int retval; | ||
1128 | ide_hwif_t *hwif = auide_hwif.hwif; | ||
1129 | struct request rq; | ||
1130 | struct request_pm_state rqpm; | ||
1131 | ide_task_t args; | ||
1132 | |||
1133 | if(auide_hwif.pm.stopped) | ||
1134 | return -1; | ||
1135 | |||
1136 | /* | ||
1137 | * wait until hard disc is ready | ||
1138 | */ | ||
1139 | if ( wait_for_ready(&hwif->drives[0], 35000) ) { | ||
1140 | printk("Wait for drive sleep timeout!\n"); | ||
1141 | retval = -1; | ||
1142 | } | ||
1143 | |||
1144 | /* | ||
1145 | * sequenz to tell the high level ide driver that pm is resuming | ||
1146 | */ | ||
1147 | memset(&rq, 0, sizeof(rq)); | ||
1148 | memset(&rqpm, 0, sizeof(rqpm)); | ||
1149 | memset(&args, 0, sizeof(args)); | ||
1150 | rq.flags = REQ_PM_SUSPEND; | ||
1151 | rq.special = &args; | ||
1152 | rq.pm = &rqpm; | ||
1153 | rqpm.pm_step = ide_pm_state_start_suspend; | ||
1154 | rqpm.pm_state = PMSG_SUSPEND; | ||
1155 | |||
1156 | retval = ide_do_drive_cmd(&hwif->drives[0], &rq, ide_wait); | ||
1157 | |||
1158 | if (wait_for_ready (&hwif->drives[0], 35000)) { | ||
1159 | printk("Wait for drive sleep timeout!\n"); | ||
1160 | retval = -1; | ||
1161 | } | ||
1162 | |||
1163 | /* | ||
1164 | * stop dbdma channels | ||
1165 | */ | ||
1166 | au1xxx_dbdma_reset(auide_hwif.tx_chan); | ||
1167 | au1xxx_dbdma_reset(auide_hwif.rx_chan); | ||
1168 | |||
1169 | auide_hwif.pm.stopped = 1; | ||
1170 | |||
1171 | return retval; | ||
1172 | } | ||
1173 | |||
1174 | static int au1xxxide_pm_resume( au1xxx_power_dev_t *dev ) { | ||
1175 | |||
1176 | int retval; | ||
1177 | ide_hwif_t *hwif = auide_hwif.hwif; | ||
1178 | struct request rq; | ||
1179 | struct request_pm_state rqpm; | ||
1180 | ide_task_t args; | ||
1181 | |||
1182 | if(!auide_hwif.pm.stopped) | ||
1183 | return -1; | ||
1184 | |||
1185 | /* | ||
1186 | * start dbdma channels | ||
1187 | */ | ||
1188 | au1xxx_dbdma_start(auide_hwif.tx_chan); | ||
1189 | au1xxx_dbdma_start(auide_hwif.rx_chan); | ||
1190 | |||
1191 | /* | ||
1192 | * wait until hard disc is ready | ||
1193 | */ | ||
1194 | if (wait_for_ready ( &hwif->drives[0], 35000)) { | ||
1195 | printk("Wait for drive wake up timeout!\n"); | ||
1196 | retval = -1; | ||
1197 | } | ||
1198 | |||
1199 | /* | ||
1200 | * sequenz to tell the high level ide driver that pm is resuming | ||
1201 | */ | ||
1202 | memset(&rq, 0, sizeof(rq)); | ||
1203 | memset(&rqpm, 0, sizeof(rqpm)); | ||
1204 | memset(&args, 0, sizeof(args)); | ||
1205 | rq.flags = REQ_PM_RESUME; | ||
1206 | rq.special = &args; | ||
1207 | rq.pm = &rqpm; | ||
1208 | rqpm.pm_step = ide_pm_state_start_resume; | ||
1209 | rqpm.pm_state = PMSG_ON; | ||
1210 | |||
1211 | retval = ide_do_drive_cmd(&hwif->drives[0], &rq, ide_head_wait); | ||
1212 | |||
1213 | /* | ||
1214 | * wait for hard disc | ||
1215 | */ | ||
1216 | if ( wait_for_ready(&hwif->drives[0], 35000) ) { | ||
1217 | printk("Wait for drive wake up timeout!\n"); | ||
1218 | retval = -1; | ||
1219 | } | ||
1220 | |||
1221 | auide_hwif.pm.stopped = 0; | ||
1222 | |||
1223 | return retval; | ||
1224 | } | ||
1225 | |||
1226 | static int au1xxxide_pm_getstatus( au1xxx_power_dev_t *dev ) { | ||
1227 | return dev->cur_state; | ||
1228 | } | ||
1229 | |||
1230 | static int au1xxxide_pm_access( au1xxx_power_dev_t *dev ) { | ||
1231 | if (dev->cur_state != AWAKE_STATE) | ||
1232 | return 0; | ||
1233 | else | ||
1234 | return -1; | ||
1235 | } | ||
1236 | |||
1237 | static int au1xxxide_pm_idle( au1xxx_power_dev_t *dev ) { | ||
1238 | return 0; | ||
1239 | } | ||
1240 | |||
1241 | static int au1xxxide_pm_cleanup( au1xxx_power_dev_t *dev ) { | ||
1242 | return 0; | ||
1243 | } | ||
1244 | #endif /* CONFIG_PM */ | ||
1245 | |||
1246 | MODULE_LICENSE("GPL"); | 812 | MODULE_LICENSE("GPL"); |
1247 | MODULE_DESCRIPTION("AU1200 IDE driver"); | 813 | MODULE_DESCRIPTION("AU1200 IDE driver"); |
1248 | 814 | ||