diff options
Diffstat (limited to 'drivers/ide/ide-iops.c')
-rw-r--r-- | drivers/ide/ide-iops.c | 742 |
1 files changed, 29 insertions, 713 deletions
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index b1892bd95c6f..317c5dadd7c0 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c | |||
@@ -27,35 +27,7 @@ | |||
27 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
28 | #include <asm/io.h> | 28 | #include <asm/io.h> |
29 | 29 | ||
30 | /* | 30 | void SELECT_DRIVE(ide_drive_t *drive) |
31 | * Conventional PIO operations for ATA devices | ||
32 | */ | ||
33 | |||
34 | static u8 ide_inb (unsigned long port) | ||
35 | { | ||
36 | return (u8) inb(port); | ||
37 | } | ||
38 | |||
39 | static void ide_outb (u8 val, unsigned long port) | ||
40 | { | ||
41 | outb(val, port); | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * MMIO operations, typically used for SATA controllers | ||
46 | */ | ||
47 | |||
48 | static u8 ide_mm_inb (unsigned long port) | ||
49 | { | ||
50 | return (u8) readb((void __iomem *) port); | ||
51 | } | ||
52 | |||
53 | static void ide_mm_outb (u8 value, unsigned long port) | ||
54 | { | ||
55 | writeb(value, (void __iomem *) port); | ||
56 | } | ||
57 | |||
58 | void SELECT_DRIVE (ide_drive_t *drive) | ||
59 | { | 31 | { |
60 | ide_hwif_t *hwif = drive->hwif; | 32 | ide_hwif_t *hwif = drive->hwif; |
61 | const struct ide_port_ops *port_ops = hwif->port_ops; | 33 | const struct ide_port_ops *port_ops = hwif->port_ops; |
@@ -78,277 +50,6 @@ void SELECT_MASK(ide_drive_t *drive, int mask) | |||
78 | port_ops->maskproc(drive, mask); | 50 | port_ops->maskproc(drive, mask); |
79 | } | 51 | } |
80 | 52 | ||
81 | void ide_exec_command(ide_hwif_t *hwif, u8 cmd) | ||
82 | { | ||
83 | if (hwif->host_flags & IDE_HFLAG_MMIO) | ||
84 | writeb(cmd, (void __iomem *)hwif->io_ports.command_addr); | ||
85 | else | ||
86 | outb(cmd, hwif->io_ports.command_addr); | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(ide_exec_command); | ||
89 | |||
90 | u8 ide_read_status(ide_hwif_t *hwif) | ||
91 | { | ||
92 | if (hwif->host_flags & IDE_HFLAG_MMIO) | ||
93 | return readb((void __iomem *)hwif->io_ports.status_addr); | ||
94 | else | ||
95 | return inb(hwif->io_ports.status_addr); | ||
96 | } | ||
97 | EXPORT_SYMBOL_GPL(ide_read_status); | ||
98 | |||
99 | u8 ide_read_altstatus(ide_hwif_t *hwif) | ||
100 | { | ||
101 | if (hwif->host_flags & IDE_HFLAG_MMIO) | ||
102 | return readb((void __iomem *)hwif->io_ports.ctl_addr); | ||
103 | else | ||
104 | return inb(hwif->io_ports.ctl_addr); | ||
105 | } | ||
106 | EXPORT_SYMBOL_GPL(ide_read_altstatus); | ||
107 | |||
108 | void ide_set_irq(ide_hwif_t *hwif, int on) | ||
109 | { | ||
110 | u8 ctl = ATA_DEVCTL_OBS; | ||
111 | |||
112 | if (on == 4) { /* hack for SRST */ | ||
113 | ctl |= 4; | ||
114 | on &= ~4; | ||
115 | } | ||
116 | |||
117 | ctl |= on ? 0 : 2; | ||
118 | |||
119 | if (hwif->host_flags & IDE_HFLAG_MMIO) | ||
120 | writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr); | ||
121 | else | ||
122 | outb(ctl, hwif->io_ports.ctl_addr); | ||
123 | } | ||
124 | EXPORT_SYMBOL_GPL(ide_set_irq); | ||
125 | |||
126 | void ide_tf_load(ide_drive_t *drive, ide_task_t *task) | ||
127 | { | ||
128 | ide_hwif_t *hwif = drive->hwif; | ||
129 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
130 | struct ide_taskfile *tf = &task->tf; | ||
131 | void (*tf_outb)(u8 addr, unsigned long port); | ||
132 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; | ||
133 | u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; | ||
134 | |||
135 | if (mmio) | ||
136 | tf_outb = ide_mm_outb; | ||
137 | else | ||
138 | tf_outb = ide_outb; | ||
139 | |||
140 | if (task->tf_flags & IDE_TFLAG_FLAGGED) | ||
141 | HIHI = 0xFF; | ||
142 | |||
143 | if (task->tf_flags & IDE_TFLAG_OUT_DATA) { | ||
144 | u16 data = (tf->hob_data << 8) | tf->data; | ||
145 | |||
146 | if (mmio) | ||
147 | writew(data, (void __iomem *)io_ports->data_addr); | ||
148 | else | ||
149 | outw(data, io_ports->data_addr); | ||
150 | } | ||
151 | |||
152 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) | ||
153 | tf_outb(tf->hob_feature, io_ports->feature_addr); | ||
154 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) | ||
155 | tf_outb(tf->hob_nsect, io_ports->nsect_addr); | ||
156 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) | ||
157 | tf_outb(tf->hob_lbal, io_ports->lbal_addr); | ||
158 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) | ||
159 | tf_outb(tf->hob_lbam, io_ports->lbam_addr); | ||
160 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) | ||
161 | tf_outb(tf->hob_lbah, io_ports->lbah_addr); | ||
162 | |||
163 | if (task->tf_flags & IDE_TFLAG_OUT_FEATURE) | ||
164 | tf_outb(tf->feature, io_ports->feature_addr); | ||
165 | if (task->tf_flags & IDE_TFLAG_OUT_NSECT) | ||
166 | tf_outb(tf->nsect, io_ports->nsect_addr); | ||
167 | if (task->tf_flags & IDE_TFLAG_OUT_LBAL) | ||
168 | tf_outb(tf->lbal, io_ports->lbal_addr); | ||
169 | if (task->tf_flags & IDE_TFLAG_OUT_LBAM) | ||
170 | tf_outb(tf->lbam, io_ports->lbam_addr); | ||
171 | if (task->tf_flags & IDE_TFLAG_OUT_LBAH) | ||
172 | tf_outb(tf->lbah, io_ports->lbah_addr); | ||
173 | |||
174 | if (task->tf_flags & IDE_TFLAG_OUT_DEVICE) | ||
175 | tf_outb((tf->device & HIHI) | drive->select, | ||
176 | io_ports->device_addr); | ||
177 | } | ||
178 | EXPORT_SYMBOL_GPL(ide_tf_load); | ||
179 | |||
180 | void ide_tf_read(ide_drive_t *drive, ide_task_t *task) | ||
181 | { | ||
182 | ide_hwif_t *hwif = drive->hwif; | ||
183 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
184 | struct ide_taskfile *tf = &task->tf; | ||
185 | void (*tf_outb)(u8 addr, unsigned long port); | ||
186 | u8 (*tf_inb)(unsigned long port); | ||
187 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; | ||
188 | |||
189 | if (mmio) { | ||
190 | tf_outb = ide_mm_outb; | ||
191 | tf_inb = ide_mm_inb; | ||
192 | } else { | ||
193 | tf_outb = ide_outb; | ||
194 | tf_inb = ide_inb; | ||
195 | } | ||
196 | |||
197 | if (task->tf_flags & IDE_TFLAG_IN_DATA) { | ||
198 | u16 data; | ||
199 | |||
200 | if (mmio) | ||
201 | data = readw((void __iomem *)io_ports->data_addr); | ||
202 | else | ||
203 | data = inw(io_ports->data_addr); | ||
204 | |||
205 | tf->data = data & 0xff; | ||
206 | tf->hob_data = (data >> 8) & 0xff; | ||
207 | } | ||
208 | |||
209 | /* be sure we're looking at the low order bits */ | ||
210 | tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); | ||
211 | |||
212 | if (task->tf_flags & IDE_TFLAG_IN_FEATURE) | ||
213 | tf->feature = tf_inb(io_ports->feature_addr); | ||
214 | if (task->tf_flags & IDE_TFLAG_IN_NSECT) | ||
215 | tf->nsect = tf_inb(io_ports->nsect_addr); | ||
216 | if (task->tf_flags & IDE_TFLAG_IN_LBAL) | ||
217 | tf->lbal = tf_inb(io_ports->lbal_addr); | ||
218 | if (task->tf_flags & IDE_TFLAG_IN_LBAM) | ||
219 | tf->lbam = tf_inb(io_ports->lbam_addr); | ||
220 | if (task->tf_flags & IDE_TFLAG_IN_LBAH) | ||
221 | tf->lbah = tf_inb(io_ports->lbah_addr); | ||
222 | if (task->tf_flags & IDE_TFLAG_IN_DEVICE) | ||
223 | tf->device = tf_inb(io_ports->device_addr); | ||
224 | |||
225 | if (task->tf_flags & IDE_TFLAG_LBA48) { | ||
226 | tf_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr); | ||
227 | |||
228 | if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) | ||
229 | tf->hob_feature = tf_inb(io_ports->feature_addr); | ||
230 | if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) | ||
231 | tf->hob_nsect = tf_inb(io_ports->nsect_addr); | ||
232 | if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) | ||
233 | tf->hob_lbal = tf_inb(io_ports->lbal_addr); | ||
234 | if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) | ||
235 | tf->hob_lbam = tf_inb(io_ports->lbam_addr); | ||
236 | if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) | ||
237 | tf->hob_lbah = tf_inb(io_ports->lbah_addr); | ||
238 | } | ||
239 | } | ||
240 | EXPORT_SYMBOL_GPL(ide_tf_read); | ||
241 | |||
242 | /* | ||
243 | * Some localbus EIDE interfaces require a special access sequence | ||
244 | * when using 32-bit I/O instructions to transfer data. We call this | ||
245 | * the "vlb_sync" sequence, which consists of three successive reads | ||
246 | * of the sector count register location, with interrupts disabled | ||
247 | * to ensure that the reads all happen together. | ||
248 | */ | ||
249 | static void ata_vlb_sync(unsigned long port) | ||
250 | { | ||
251 | (void)inb(port); | ||
252 | (void)inb(port); | ||
253 | (void)inb(port); | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * This is used for most PIO data transfers *from* the IDE interface | ||
258 | * | ||
259 | * These routines will round up any request for an odd number of bytes, | ||
260 | * so if an odd len is specified, be sure that there's at least one | ||
261 | * extra byte allocated for the buffer. | ||
262 | */ | ||
263 | void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf, | ||
264 | unsigned int len) | ||
265 | { | ||
266 | ide_hwif_t *hwif = drive->hwif; | ||
267 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
268 | unsigned long data_addr = io_ports->data_addr; | ||
269 | u8 io_32bit = drive->io_32bit; | ||
270 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; | ||
271 | |||
272 | len++; | ||
273 | |||
274 | if (io_32bit) { | ||
275 | unsigned long uninitialized_var(flags); | ||
276 | |||
277 | if ((io_32bit & 2) && !mmio) { | ||
278 | local_irq_save(flags); | ||
279 | ata_vlb_sync(io_ports->nsect_addr); | ||
280 | } | ||
281 | |||
282 | if (mmio) | ||
283 | __ide_mm_insl((void __iomem *)data_addr, buf, len / 4); | ||
284 | else | ||
285 | insl(data_addr, buf, len / 4); | ||
286 | |||
287 | if ((io_32bit & 2) && !mmio) | ||
288 | local_irq_restore(flags); | ||
289 | |||
290 | if ((len & 3) >= 2) { | ||
291 | if (mmio) | ||
292 | __ide_mm_insw((void __iomem *)data_addr, | ||
293 | (u8 *)buf + (len & ~3), 1); | ||
294 | else | ||
295 | insw(data_addr, (u8 *)buf + (len & ~3), 1); | ||
296 | } | ||
297 | } else { | ||
298 | if (mmio) | ||
299 | __ide_mm_insw((void __iomem *)data_addr, buf, len / 2); | ||
300 | else | ||
301 | insw(data_addr, buf, len / 2); | ||
302 | } | ||
303 | } | ||
304 | EXPORT_SYMBOL_GPL(ide_input_data); | ||
305 | |||
306 | /* | ||
307 | * This is used for most PIO data transfers *to* the IDE interface | ||
308 | */ | ||
309 | void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf, | ||
310 | unsigned int len) | ||
311 | { | ||
312 | ide_hwif_t *hwif = drive->hwif; | ||
313 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
314 | unsigned long data_addr = io_ports->data_addr; | ||
315 | u8 io_32bit = drive->io_32bit; | ||
316 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; | ||
317 | |||
318 | len++; | ||
319 | |||
320 | if (io_32bit) { | ||
321 | unsigned long uninitialized_var(flags); | ||
322 | |||
323 | if ((io_32bit & 2) && !mmio) { | ||
324 | local_irq_save(flags); | ||
325 | ata_vlb_sync(io_ports->nsect_addr); | ||
326 | } | ||
327 | |||
328 | if (mmio) | ||
329 | __ide_mm_outsl((void __iomem *)data_addr, buf, len / 4); | ||
330 | else | ||
331 | outsl(data_addr, buf, len / 4); | ||
332 | |||
333 | if ((io_32bit & 2) && !mmio) | ||
334 | local_irq_restore(flags); | ||
335 | |||
336 | if ((len & 3) >= 2) { | ||
337 | if (mmio) | ||
338 | __ide_mm_outsw((void __iomem *)data_addr, | ||
339 | (u8 *)buf + (len & ~3), 1); | ||
340 | else | ||
341 | outsw(data_addr, (u8 *)buf + (len & ~3), 1); | ||
342 | } | ||
343 | } else { | ||
344 | if (mmio) | ||
345 | __ide_mm_outsw((void __iomem *)data_addr, buf, len / 2); | ||
346 | else | ||
347 | outsw(data_addr, buf, len / 2); | ||
348 | } | ||
349 | } | ||
350 | EXPORT_SYMBOL_GPL(ide_output_data); | ||
351 | |||
352 | u8 ide_read_error(ide_drive_t *drive) | 53 | u8 ide_read_error(ide_drive_t *drive) |
353 | { | 54 | { |
354 | ide_task_t task; | 55 | ide_task_t task; |
@@ -362,35 +63,6 @@ u8 ide_read_error(ide_drive_t *drive) | |||
362 | } | 63 | } |
363 | EXPORT_SYMBOL_GPL(ide_read_error); | 64 | EXPORT_SYMBOL_GPL(ide_read_error); |
364 | 65 | ||
365 | void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason) | ||
366 | { | ||
367 | ide_task_t task; | ||
368 | |||
369 | memset(&task, 0, sizeof(task)); | ||
370 | task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM | | ||
371 | IDE_TFLAG_IN_NSECT; | ||
372 | |||
373 | drive->hwif->tp_ops->tf_read(drive, &task); | ||
374 | |||
375 | *bcount = (task.tf.lbah << 8) | task.tf.lbam; | ||
376 | *ireason = task.tf.nsect & 3; | ||
377 | } | ||
378 | EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason); | ||
379 | |||
380 | const struct ide_tp_ops default_tp_ops = { | ||
381 | .exec_command = ide_exec_command, | ||
382 | .read_status = ide_read_status, | ||
383 | .read_altstatus = ide_read_altstatus, | ||
384 | |||
385 | .set_irq = ide_set_irq, | ||
386 | |||
387 | .tf_load = ide_tf_load, | ||
388 | .tf_read = ide_tf_read, | ||
389 | |||
390 | .input_data = ide_input_data, | ||
391 | .output_data = ide_output_data, | ||
392 | }; | ||
393 | |||
394 | void ide_fix_driveid(u16 *id) | 66 | void ide_fix_driveid(u16 *id) |
395 | { | 67 | { |
396 | #ifndef __LITTLE_ENDIAN | 68 | #ifndef __LITTLE_ENDIAN |
@@ -412,7 +84,7 @@ void ide_fix_driveid(u16 *id) | |||
412 | * returned by the ATA_CMD_ID_ATA[PI] commands. | 84 | * returned by the ATA_CMD_ID_ATA[PI] commands. |
413 | */ | 85 | */ |
414 | 86 | ||
415 | void ide_fixstring (u8 *s, const int bytecount, const int byteswap) | 87 | void ide_fixstring(u8 *s, const int bytecount, const int byteswap) |
416 | { | 88 | { |
417 | u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */ | 89 | u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */ |
418 | 90 | ||
@@ -435,44 +107,9 @@ void ide_fixstring (u8 *s, const int bytecount, const int byteswap) | |||
435 | while (p != end) | 107 | while (p != end) |
436 | *p++ = '\0'; | 108 | *p++ = '\0'; |
437 | } | 109 | } |
438 | |||
439 | EXPORT_SYMBOL(ide_fixstring); | 110 | EXPORT_SYMBOL(ide_fixstring); |
440 | 111 | ||
441 | /* | 112 | /* |
442 | * Needed for PCI irq sharing | ||
443 | */ | ||
444 | int drive_is_ready (ide_drive_t *drive) | ||
445 | { | ||
446 | ide_hwif_t *hwif = drive->hwif; | ||
447 | u8 stat = 0; | ||
448 | |||
449 | if (drive->waiting_for_dma) | ||
450 | return hwif->dma_ops->dma_test_irq(drive); | ||
451 | |||
452 | /* | ||
453 | * We do a passive status test under shared PCI interrupts on | ||
454 | * cards that truly share the ATA side interrupt, but may also share | ||
455 | * an interrupt with another pci card/device. We make no assumptions | ||
456 | * about possible isa-pnp and pci-pnp issues yet. | ||
457 | */ | ||
458 | if (hwif->io_ports.ctl_addr && | ||
459 | (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) | ||
460 | stat = hwif->tp_ops->read_altstatus(hwif); | ||
461 | else | ||
462 | /* Note: this may clear a pending IRQ!! */ | ||
463 | stat = hwif->tp_ops->read_status(hwif); | ||
464 | |||
465 | if (stat & ATA_BUSY) | ||
466 | /* drive busy: definitely not interrupting */ | ||
467 | return 0; | ||
468 | |||
469 | /* drive ready: *might* be interrupting */ | ||
470 | return 1; | ||
471 | } | ||
472 | |||
473 | EXPORT_SYMBOL(drive_is_ready); | ||
474 | |||
475 | /* | ||
476 | * This routine busy-waits for the drive status to be not "busy". | 113 | * This routine busy-waits for the drive status to be not "busy". |
477 | * It then checks the status for all of the "good" bits and none | 114 | * It then checks the status for all of the "good" bits and none |
478 | * of the "bad" bits, and if all is okay it returns 0. All other | 115 | * of the "bad" bits, and if all is okay it returns 0. All other |
@@ -483,7 +120,8 @@ EXPORT_SYMBOL(drive_is_ready); | |||
483 | * setting a timer to wake up at half second intervals thereafter, | 120 | * setting a timer to wake up at half second intervals thereafter, |
484 | * until timeout is achieved, before timing out. | 121 | * until timeout is achieved, before timing out. |
485 | */ | 122 | */ |
486 | static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat) | 123 | static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, |
124 | unsigned long timeout, u8 *rstat) | ||
487 | { | 125 | { |
488 | ide_hwif_t *hwif = drive->hwif; | 126 | ide_hwif_t *hwif = drive->hwif; |
489 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | 127 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; |
@@ -541,7 +179,8 @@ static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long ti | |||
541 | * The caller should return the updated value of "startstop" in this case, | 179 | * The caller should return the updated value of "startstop" in this case, |
542 | * "startstop" is unchanged when the function returns 0. | 180 | * "startstop" is unchanged when the function returns 0. |
543 | */ | 181 | */ |
544 | int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout) | 182 | int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, |
183 | u8 bad, unsigned long timeout) | ||
545 | { | 184 | { |
546 | int err; | 185 | int err; |
547 | u8 stat; | 186 | u8 stat; |
@@ -561,7 +200,6 @@ int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 ba | |||
561 | 200 | ||
562 | return err; | 201 | return err; |
563 | } | 202 | } |
564 | |||
565 | EXPORT_SYMBOL(ide_wait_stat); | 203 | EXPORT_SYMBOL(ide_wait_stat); |
566 | 204 | ||
567 | /** | 205 | /** |
@@ -582,7 +220,6 @@ int ide_in_drive_list(u16 *id, const struct drive_list_entry *table) | |||
582 | return 1; | 220 | return 1; |
583 | return 0; | 221 | return 0; |
584 | } | 222 | } |
585 | |||
586 | EXPORT_SYMBOL_GPL(ide_in_drive_list); | 223 | EXPORT_SYMBOL_GPL(ide_in_drive_list); |
587 | 224 | ||
588 | /* | 225 | /* |
@@ -607,7 +244,7 @@ static const struct drive_list_entry ivb_list[] = { | |||
607 | * All hosts that use the 80c ribbon must use! | 244 | * All hosts that use the 80c ribbon must use! |
608 | * The name is derived from upper byte of word 93 and the 80c ribbon. | 245 | * The name is derived from upper byte of word 93 and the 80c ribbon. |
609 | */ | 246 | */ |
610 | u8 eighty_ninty_three (ide_drive_t *drive) | 247 | u8 eighty_ninty_three(ide_drive_t *drive) |
611 | { | 248 | { |
612 | ide_hwif_t *hwif = drive->hwif; | 249 | ide_hwif_t *hwif = drive->hwif; |
613 | u16 *id = drive->id; | 250 | u16 *id = drive->id; |
@@ -652,47 +289,19 @@ no_80w: | |||
652 | 289 | ||
653 | int ide_driveid_update(ide_drive_t *drive) | 290 | int ide_driveid_update(ide_drive_t *drive) |
654 | { | 291 | { |
655 | ide_hwif_t *hwif = drive->hwif; | ||
656 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | ||
657 | u16 *id; | 292 | u16 *id; |
658 | unsigned long flags; | 293 | int rc; |
659 | u8 stat; | ||
660 | |||
661 | /* | ||
662 | * Re-read drive->id for possible DMA mode | ||
663 | * change (copied from ide-probe.c) | ||
664 | */ | ||
665 | |||
666 | SELECT_MASK(drive, 1); | ||
667 | tp_ops->set_irq(hwif, 0); | ||
668 | msleep(50); | ||
669 | tp_ops->exec_command(hwif, ATA_CMD_ID_ATA); | ||
670 | 294 | ||
671 | if (ide_busy_sleep(hwif, WAIT_WORSTCASE, 1)) { | 295 | id = kmalloc(SECTOR_SIZE, GFP_ATOMIC); |
672 | SELECT_MASK(drive, 0); | 296 | if (id == NULL) |
673 | return 0; | 297 | return 0; |
674 | } | ||
675 | |||
676 | msleep(50); /* wait for IRQ and ATA_DRQ */ | ||
677 | stat = tp_ops->read_status(hwif); | ||
678 | 298 | ||
679 | if (!OK_STAT(stat, ATA_DRQ, BAD_R_STAT)) { | 299 | SELECT_MASK(drive, 1); |
680 | SELECT_MASK(drive, 0); | 300 | rc = ide_dev_read_id(drive, ATA_CMD_ID_ATA, id); |
681 | printk("%s: CHECK for good STATUS\n", drive->name); | ||
682 | return 0; | ||
683 | } | ||
684 | local_irq_save(flags); | ||
685 | SELECT_MASK(drive, 0); | 301 | SELECT_MASK(drive, 0); |
686 | id = kmalloc(SECTOR_SIZE, GFP_ATOMIC); | 302 | |
687 | if (!id) { | 303 | if (rc) |
688 | local_irq_restore(flags); | 304 | goto out_err; |
689 | return 0; | ||
690 | } | ||
691 | tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); | ||
692 | (void)tp_ops->read_status(hwif); /* clear drive IRQ */ | ||
693 | local_irq_enable(); | ||
694 | local_irq_restore(flags); | ||
695 | ide_fix_driveid(id); | ||
696 | 305 | ||
697 | drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES]; | 306 | drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES]; |
698 | drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES]; | 307 | drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES]; |
@@ -705,6 +314,12 @@ int ide_driveid_update(ide_drive_t *drive) | |||
705 | ide_dma_off(drive); | 314 | ide_dma_off(drive); |
706 | 315 | ||
707 | return 1; | 316 | return 1; |
317 | out_err: | ||
318 | SELECT_MASK(drive, 0); | ||
319 | if (rc == 2) | ||
320 | printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__); | ||
321 | kfree(id); | ||
322 | return 0; | ||
708 | } | 323 | } |
709 | 324 | ||
710 | int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | 325 | int ide_config_drive_speed(ide_drive_t *drive, u8 speed) |
@@ -731,18 +346,15 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | |||
731 | * but for some reason these don't work at | 346 | * but for some reason these don't work at |
732 | * this point (lost interrupt). | 347 | * this point (lost interrupt). |
733 | */ | 348 | */ |
734 | /* | 349 | |
735 | * Select the drive, and issue the SETFEATURES command | ||
736 | */ | ||
737 | disable_irq_nosync(hwif->irq); | ||
738 | |||
739 | /* | 350 | /* |
740 | * FIXME: we race against the running IRQ here if | 351 | * FIXME: we race against the running IRQ here if |
741 | * this is called from non IRQ context. If we use | 352 | * this is called from non IRQ context. If we use |
742 | * disable_irq() we hang on the error path. Work | 353 | * disable_irq() we hang on the error path. Work |
743 | * is needed. | 354 | * is needed. |
744 | */ | 355 | */ |
745 | 356 | disable_irq_nosync(hwif->irq); | |
357 | |||
746 | udelay(1); | 358 | udelay(1); |
747 | SELECT_DRIVE(drive); | 359 | SELECT_DRIVE(drive); |
748 | SELECT_MASK(drive, 1); | 360 | SELECT_MASK(drive, 1); |
@@ -812,8 +424,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | |||
812 | * | 424 | * |
813 | * See also ide_execute_command | 425 | * See also ide_execute_command |
814 | */ | 426 | */ |
815 | static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, | 427 | void __ide_set_handler(ide_drive_t *drive, ide_handler_t *handler, |
816 | unsigned int timeout, ide_expiry_t *expiry) | 428 | unsigned int timeout, ide_expiry_t *expiry) |
817 | { | 429 | { |
818 | ide_hwif_t *hwif = drive->hwif; | 430 | ide_hwif_t *hwif = drive->hwif; |
819 | 431 | ||
@@ -835,9 +447,8 @@ void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, | |||
835 | __ide_set_handler(drive, handler, timeout, expiry); | 447 | __ide_set_handler(drive, handler, timeout, expiry); |
836 | spin_unlock_irqrestore(&hwif->lock, flags); | 448 | spin_unlock_irqrestore(&hwif->lock, flags); |
837 | } | 449 | } |
838 | |||
839 | EXPORT_SYMBOL(ide_set_handler); | 450 | EXPORT_SYMBOL(ide_set_handler); |
840 | 451 | ||
841 | /** | 452 | /** |
842 | * ide_execute_command - execute an IDE command | 453 | * ide_execute_command - execute an IDE command |
843 | * @drive: IDE drive to issue the command against | 454 | * @drive: IDE drive to issue the command against |
@@ -847,7 +458,7 @@ EXPORT_SYMBOL(ide_set_handler); | |||
847 | * @expiry: handler to run on timeout | 458 | * @expiry: handler to run on timeout |
848 | * | 459 | * |
849 | * Helper function to issue an IDE command. This handles the | 460 | * Helper function to issue an IDE command. This handles the |
850 | * atomicity requirements, command timing and ensures that the | 461 | * atomicity requirements, command timing and ensures that the |
851 | * handler and IRQ setup do not race. All IDE command kick off | 462 | * handler and IRQ setup do not race. All IDE command kick off |
852 | * should go via this function or do equivalent locking. | 463 | * should go via this function or do equivalent locking. |
853 | */ | 464 | */ |
@@ -884,301 +495,6 @@ void ide_execute_pkt_cmd(ide_drive_t *drive) | |||
884 | } | 495 | } |
885 | EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd); | 496 | EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd); |
886 | 497 | ||
887 | static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) | ||
888 | { | ||
889 | struct request *rq = drive->hwif->rq; | ||
890 | |||
891 | if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) | ||
892 | ide_end_request(drive, err ? err : 1, 0); | ||
893 | } | ||
894 | |||
895 | /* needed below */ | ||
896 | static ide_startstop_t do_reset1 (ide_drive_t *, int); | ||
897 | |||
898 | /* | ||
899 | * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms | ||
900 | * during an atapi drive reset operation. If the drive has not yet responded, | ||
901 | * and we have not yet hit our maximum waiting time, then the timer is restarted | ||
902 | * for another 50ms. | ||
903 | */ | ||
904 | static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive) | ||
905 | { | ||
906 | ide_hwif_t *hwif = drive->hwif; | ||
907 | u8 stat; | ||
908 | |||
909 | SELECT_DRIVE(drive); | ||
910 | udelay (10); | ||
911 | stat = hwif->tp_ops->read_status(hwif); | ||
912 | |||
913 | if (OK_STAT(stat, 0, ATA_BUSY)) | ||
914 | printk("%s: ATAPI reset complete\n", drive->name); | ||
915 | else { | ||
916 | if (time_before(jiffies, hwif->poll_timeout)) { | ||
917 | ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL); | ||
918 | /* continue polling */ | ||
919 | return ide_started; | ||
920 | } | ||
921 | /* end of polling */ | ||
922 | hwif->polling = 0; | ||
923 | printk("%s: ATAPI reset timed-out, status=0x%02x\n", | ||
924 | drive->name, stat); | ||
925 | /* do it the old fashioned way */ | ||
926 | return do_reset1(drive, 1); | ||
927 | } | ||
928 | /* done polling */ | ||
929 | hwif->polling = 0; | ||
930 | ide_complete_drive_reset(drive, 0); | ||
931 | return ide_stopped; | ||
932 | } | ||
933 | |||
934 | static void ide_reset_report_error(ide_hwif_t *hwif, u8 err) | ||
935 | { | ||
936 | static const char *err_master_vals[] = | ||
937 | { NULL, "passed", "formatter device error", | ||
938 | "sector buffer error", "ECC circuitry error", | ||
939 | "controlling MPU error" }; | ||
940 | |||
941 | u8 err_master = err & 0x7f; | ||
942 | |||
943 | printk(KERN_ERR "%s: reset: master: ", hwif->name); | ||
944 | if (err_master && err_master < 6) | ||
945 | printk(KERN_CONT "%s", err_master_vals[err_master]); | ||
946 | else | ||
947 | printk(KERN_CONT "error (0x%02x?)", err); | ||
948 | if (err & 0x80) | ||
949 | printk(KERN_CONT "; slave: failed"); | ||
950 | printk(KERN_CONT "\n"); | ||
951 | } | ||
952 | |||
953 | /* | ||
954 | * reset_pollfunc() gets invoked to poll the interface for completion every 50ms | ||
955 | * during an ide reset operation. If the drives have not yet responded, | ||
956 | * and we have not yet hit our maximum waiting time, then the timer is restarted | ||
957 | * for another 50ms. | ||
958 | */ | ||
959 | static ide_startstop_t reset_pollfunc (ide_drive_t *drive) | ||
960 | { | ||
961 | ide_hwif_t *hwif = drive->hwif; | ||
962 | const struct ide_port_ops *port_ops = hwif->port_ops; | ||
963 | u8 tmp; | ||
964 | int err = 0; | ||
965 | |||
966 | if (port_ops && port_ops->reset_poll) { | ||
967 | err = port_ops->reset_poll(drive); | ||
968 | if (err) { | ||
969 | printk(KERN_ERR "%s: host reset_poll failure for %s.\n", | ||
970 | hwif->name, drive->name); | ||
971 | goto out; | ||
972 | } | ||
973 | } | ||
974 | |||
975 | tmp = hwif->tp_ops->read_status(hwif); | ||
976 | |||
977 | if (!OK_STAT(tmp, 0, ATA_BUSY)) { | ||
978 | if (time_before(jiffies, hwif->poll_timeout)) { | ||
979 | ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL); | ||
980 | /* continue polling */ | ||
981 | return ide_started; | ||
982 | } | ||
983 | printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp); | ||
984 | drive->failures++; | ||
985 | err = -EIO; | ||
986 | } else { | ||
987 | tmp = ide_read_error(drive); | ||
988 | |||
989 | if (tmp == 1) { | ||
990 | printk(KERN_INFO "%s: reset: success\n", hwif->name); | ||
991 | drive->failures = 0; | ||
992 | } else { | ||
993 | ide_reset_report_error(hwif, tmp); | ||
994 | drive->failures++; | ||
995 | err = -EIO; | ||
996 | } | ||
997 | } | ||
998 | out: | ||
999 | hwif->polling = 0; /* done polling */ | ||
1000 | ide_complete_drive_reset(drive, err); | ||
1001 | return ide_stopped; | ||
1002 | } | ||
1003 | |||
1004 | static void ide_disk_pre_reset(ide_drive_t *drive) | ||
1005 | { | ||
1006 | int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1; | ||
1007 | |||
1008 | drive->special.all = 0; | ||
1009 | drive->special.b.set_geometry = legacy; | ||
1010 | drive->special.b.recalibrate = legacy; | ||
1011 | |||
1012 | drive->mult_count = 0; | ||
1013 | drive->dev_flags &= ~IDE_DFLAG_PARKED; | ||
1014 | |||
1015 | if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 && | ||
1016 | (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) | ||
1017 | drive->mult_req = 0; | ||
1018 | |||
1019 | if (drive->mult_req != drive->mult_count) | ||
1020 | drive->special.b.set_multmode = 1; | ||
1021 | } | ||
1022 | |||
1023 | static void pre_reset(ide_drive_t *drive) | ||
1024 | { | ||
1025 | const struct ide_port_ops *port_ops = drive->hwif->port_ops; | ||
1026 | |||
1027 | if (drive->media == ide_disk) | ||
1028 | ide_disk_pre_reset(drive); | ||
1029 | else | ||
1030 | drive->dev_flags |= IDE_DFLAG_POST_RESET; | ||
1031 | |||
1032 | if (drive->dev_flags & IDE_DFLAG_USING_DMA) { | ||
1033 | if (drive->crc_count) | ||
1034 | ide_check_dma_crc(drive); | ||
1035 | else | ||
1036 | ide_dma_off(drive); | ||
1037 | } | ||
1038 | |||
1039 | if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) { | ||
1040 | if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) { | ||
1041 | drive->dev_flags &= ~IDE_DFLAG_UNMASK; | ||
1042 | drive->io_32bit = 0; | ||
1043 | } | ||
1044 | return; | ||
1045 | } | ||
1046 | |||
1047 | if (port_ops && port_ops->pre_reset) | ||
1048 | port_ops->pre_reset(drive); | ||
1049 | |||
1050 | if (drive->current_speed != 0xff) | ||
1051 | drive->desired_speed = drive->current_speed; | ||
1052 | drive->current_speed = 0xff; | ||
1053 | } | ||
1054 | |||
1055 | /* | ||
1056 | * do_reset1() attempts to recover a confused drive by resetting it. | ||
1057 | * Unfortunately, resetting a disk drive actually resets all devices on | ||
1058 | * the same interface, so it can really be thought of as resetting the | ||
1059 | * interface rather than resetting the drive. | ||
1060 | * | ||
1061 | * ATAPI devices have their own reset mechanism which allows them to be | ||
1062 | * individually reset without clobbering other devices on the same interface. | ||
1063 | * | ||
1064 | * Unfortunately, the IDE interface does not generate an interrupt to let | ||
1065 | * us know when the reset operation has finished, so we must poll for this. | ||
1066 | * Equally poor, though, is the fact that this may a very long time to complete, | ||
1067 | * (up to 30 seconds worstcase). So, instead of busy-waiting here for it, | ||
1068 | * we set a timer to poll at 50ms intervals. | ||
1069 | */ | ||
1070 | static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) | ||
1071 | { | ||
1072 | ide_hwif_t *hwif = drive->hwif; | ||
1073 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
1074 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | ||
1075 | const struct ide_port_ops *port_ops; | ||
1076 | ide_drive_t *tdrive; | ||
1077 | unsigned long flags, timeout; | ||
1078 | int i; | ||
1079 | DEFINE_WAIT(wait); | ||
1080 | |||
1081 | spin_lock_irqsave(&hwif->lock, flags); | ||
1082 | |||
1083 | /* We must not reset with running handlers */ | ||
1084 | BUG_ON(hwif->handler != NULL); | ||
1085 | |||
1086 | /* For an ATAPI device, first try an ATAPI SRST. */ | ||
1087 | if (drive->media != ide_disk && !do_not_try_atapi) { | ||
1088 | pre_reset(drive); | ||
1089 | SELECT_DRIVE(drive); | ||
1090 | udelay (20); | ||
1091 | tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET); | ||
1092 | ndelay(400); | ||
1093 | hwif->poll_timeout = jiffies + WAIT_WORSTCASE; | ||
1094 | hwif->polling = 1; | ||
1095 | __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL); | ||
1096 | spin_unlock_irqrestore(&hwif->lock, flags); | ||
1097 | return ide_started; | ||
1098 | } | ||
1099 | |||
1100 | /* We must not disturb devices in the IDE_DFLAG_PARKED state. */ | ||
1101 | do { | ||
1102 | unsigned long now; | ||
1103 | |||
1104 | prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE); | ||
1105 | timeout = jiffies; | ||
1106 | ide_port_for_each_dev(i, tdrive, hwif) { | ||
1107 | if (tdrive->dev_flags & IDE_DFLAG_PRESENT && | ||
1108 | tdrive->dev_flags & IDE_DFLAG_PARKED && | ||
1109 | time_after(tdrive->sleep, timeout)) | ||
1110 | timeout = tdrive->sleep; | ||
1111 | } | ||
1112 | |||
1113 | now = jiffies; | ||
1114 | if (time_before_eq(timeout, now)) | ||
1115 | break; | ||
1116 | |||
1117 | spin_unlock_irqrestore(&hwif->lock, flags); | ||
1118 | timeout = schedule_timeout_uninterruptible(timeout - now); | ||
1119 | spin_lock_irqsave(&hwif->lock, flags); | ||
1120 | } while (timeout); | ||
1121 | finish_wait(&ide_park_wq, &wait); | ||
1122 | |||
1123 | /* | ||
1124 | * First, reset any device state data we were maintaining | ||
1125 | * for any of the drives on this interface. | ||
1126 | */ | ||
1127 | ide_port_for_each_dev(i, tdrive, hwif) | ||
1128 | pre_reset(tdrive); | ||
1129 | |||
1130 | if (io_ports->ctl_addr == 0) { | ||
1131 | spin_unlock_irqrestore(&hwif->lock, flags); | ||
1132 | ide_complete_drive_reset(drive, -ENXIO); | ||
1133 | return ide_stopped; | ||
1134 | } | ||
1135 | |||
1136 | /* | ||
1137 | * Note that we also set nIEN while resetting the device, | ||
1138 | * to mask unwanted interrupts from the interface during the reset. | ||
1139 | * However, due to the design of PC hardware, this will cause an | ||
1140 | * immediate interrupt due to the edge transition it produces. | ||
1141 | * This single interrupt gives us a "fast poll" for drives that | ||
1142 | * recover from reset very quickly, saving us the first 50ms wait time. | ||
1143 | * | ||
1144 | * TODO: add ->softreset method and stop abusing ->set_irq | ||
1145 | */ | ||
1146 | /* set SRST and nIEN */ | ||
1147 | tp_ops->set_irq(hwif, 4); | ||
1148 | /* more than enough time */ | ||
1149 | udelay(10); | ||
1150 | /* clear SRST, leave nIEN (unless device is on the quirk list) */ | ||
1151 | tp_ops->set_irq(hwif, drive->quirk_list == 2); | ||
1152 | /* more than enough time */ | ||
1153 | udelay(10); | ||
1154 | hwif->poll_timeout = jiffies + WAIT_WORSTCASE; | ||
1155 | hwif->polling = 1; | ||
1156 | __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL); | ||
1157 | |||
1158 | /* | ||
1159 | * Some weird controller like resetting themselves to a strange | ||
1160 | * state when the disks are reset this way. At least, the Winbond | ||
1161 | * 553 documentation says that | ||
1162 | */ | ||
1163 | port_ops = hwif->port_ops; | ||
1164 | if (port_ops && port_ops->resetproc) | ||
1165 | port_ops->resetproc(drive); | ||
1166 | |||
1167 | spin_unlock_irqrestore(&hwif->lock, flags); | ||
1168 | return ide_started; | ||
1169 | } | ||
1170 | |||
1171 | /* | ||
1172 | * ide_do_reset() is the entry point to the drive/interface reset code. | ||
1173 | */ | ||
1174 | |||
1175 | ide_startstop_t ide_do_reset (ide_drive_t *drive) | ||
1176 | { | ||
1177 | return do_reset1(drive, 0); | ||
1178 | } | ||
1179 | |||
1180 | EXPORT_SYMBOL(ide_do_reset); | ||
1181 | |||
1182 | /* | 498 | /* |
1183 | * ide_wait_not_busy() waits for the currently selected device on the hwif | 499 | * ide_wait_not_busy() waits for the currently selected device on the hwif |
1184 | * to report a non-busy status, see comments in ide_probe_port(). | 500 | * to report a non-busy status, see comments in ide_probe_port(). |
@@ -1187,7 +503,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout) | |||
1187 | { | 503 | { |
1188 | u8 stat = 0; | 504 | u8 stat = 0; |
1189 | 505 | ||
1190 | while(timeout--) { | 506 | while (timeout--) { |
1191 | /* | 507 | /* |
1192 | * Turn this into a schedule() sleep once I'm sure | 508 | * Turn this into a schedule() sleep once I'm sure |
1193 | * about locking issues (2.5 work ?). | 509 | * about locking issues (2.5 work ?). |