diff options
Diffstat (limited to 'drivers/ide/ide-io.c')
-rw-r--r-- | drivers/ide/ide-io.c | 72 |
1 files changed, 64 insertions, 8 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 4f2f138de2c..fb6795236e7 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -24,7 +24,6 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | 26 | ||
27 | #include <linux/config.h> | ||
28 | #include <linux/module.h> | 27 | #include <linux/module.h> |
29 | #include <linux/types.h> | 28 | #include <linux/types.h> |
30 | #include <linux/string.h> | 29 | #include <linux/string.h> |
@@ -223,6 +222,63 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * | |||
223 | } | 222 | } |
224 | 223 | ||
225 | /** | 224 | /** |
225 | * ide_end_dequeued_request - complete an IDE I/O | ||
226 | * @drive: IDE device for the I/O | ||
227 | * @uptodate: | ||
228 | * @nr_sectors: number of sectors completed | ||
229 | * | ||
230 | * Complete an I/O that is no longer on the request queue. This | ||
231 | * typically occurs when we pull the request and issue a REQUEST_SENSE. | ||
232 | * We must still finish the old request but we must not tamper with the | ||
233 | * queue in the meantime. | ||
234 | * | ||
235 | * NOTE: This path does not handle barrier, but barrier is not supported | ||
236 | * on ide-cd anyway. | ||
237 | */ | ||
238 | |||
239 | int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, | ||
240 | int uptodate, int nr_sectors) | ||
241 | { | ||
242 | unsigned long flags; | ||
243 | int ret = 1; | ||
244 | |||
245 | spin_lock_irqsave(&ide_lock, flags); | ||
246 | |||
247 | BUG_ON(!(rq->flags & REQ_STARTED)); | ||
248 | |||
249 | /* | ||
250 | * if failfast is set on a request, override number of sectors and | ||
251 | * complete the whole request right now | ||
252 | */ | ||
253 | if (blk_noretry_request(rq) && end_io_error(uptodate)) | ||
254 | nr_sectors = rq->hard_nr_sectors; | ||
255 | |||
256 | if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) | ||
257 | rq->errors = -EIO; | ||
258 | |||
259 | /* | ||
260 | * decide whether to reenable DMA -- 3 is a random magic for now, | ||
261 | * if we DMA timeout more than 3 times, just stay in PIO | ||
262 | */ | ||
263 | if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { | ||
264 | drive->state = 0; | ||
265 | HWGROUP(drive)->hwif->ide_dma_on(drive); | ||
266 | } | ||
267 | |||
268 | if (!end_that_request_first(rq, uptodate, nr_sectors)) { | ||
269 | add_disk_randomness(rq->rq_disk); | ||
270 | if (blk_rq_tagged(rq)) | ||
271 | blk_queue_end_tag(drive->queue, rq); | ||
272 | end_that_request_last(rq, uptodate); | ||
273 | ret = 0; | ||
274 | } | ||
275 | spin_unlock_irqrestore(&ide_lock, flags); | ||
276 | return ret; | ||
277 | } | ||
278 | EXPORT_SYMBOL_GPL(ide_end_dequeued_request); | ||
279 | |||
280 | |||
281 | /** | ||
226 | * ide_complete_pm_request - end the current Power Management request | 282 | * ide_complete_pm_request - end the current Power Management request |
227 | * @drive: target drive | 283 | * @drive: target drive |
228 | * @rq: request | 284 | * @rq: request |
@@ -448,7 +504,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 | |||
448 | } | 504 | } |
449 | } | 505 | } |
450 | 506 | ||
451 | if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ) | 507 | if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && hwif->err_stops_fifo == 0) |
452 | try_to_flush_leftover_data(drive); | 508 | try_to_flush_leftover_data(drive); |
453 | 509 | ||
454 | if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) | 510 | if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) |
@@ -637,7 +693,7 @@ static ide_startstop_t drive_cmd_intr (ide_drive_t *drive) | |||
637 | u8 stat = hwif->INB(IDE_STATUS_REG); | 693 | u8 stat = hwif->INB(IDE_STATUS_REG); |
638 | int retries = 10; | 694 | int retries = 10; |
639 | 695 | ||
640 | local_irq_enable(); | 696 | local_irq_enable_in_hardirq(); |
641 | if ((stat & DRQ_STAT) && args && args[3]) { | 697 | if ((stat & DRQ_STAT) && args && args[3]) { |
642 | u8 io_32bit = drive->io_32bit; | 698 | u8 io_32bit = drive->io_32bit; |
643 | drive->io_32bit = 0; | 699 | drive->io_32bit = 0; |
@@ -902,7 +958,7 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) | |||
902 | printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); | 958 | printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); |
903 | SELECT_DRIVE(drive); | 959 | SELECT_DRIVE(drive); |
904 | HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]); | 960 | HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]); |
905 | rc = ide_wait_not_busy(HWIF(drive), 10000); | 961 | rc = ide_wait_not_busy(HWIF(drive), 100000); |
906 | if (rc) | 962 | if (rc) |
907 | printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); | 963 | printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); |
908 | } | 964 | } |
@@ -1230,7 +1286,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) | |||
1230 | if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) | 1286 | if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) |
1231 | disable_irq_nosync(hwif->irq); | 1287 | disable_irq_nosync(hwif->irq); |
1232 | spin_unlock(&ide_lock); | 1288 | spin_unlock(&ide_lock); |
1233 | local_irq_enable(); | 1289 | local_irq_enable_in_hardirq(); |
1234 | /* allow other IRQs while we start this request */ | 1290 | /* allow other IRQs while we start this request */ |
1235 | startstop = start_request(drive, rq); | 1291 | startstop = start_request(drive, rq); |
1236 | spin_lock_irq(&ide_lock); | 1292 | spin_lock_irq(&ide_lock); |
@@ -1575,7 +1631,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs) | |||
1575 | spin_unlock(&ide_lock); | 1631 | spin_unlock(&ide_lock); |
1576 | 1632 | ||
1577 | if (drive->unmask) | 1633 | if (drive->unmask) |
1578 | local_irq_enable(); | 1634 | local_irq_enable_in_hardirq(); |
1579 | /* service this interrupt, may set handler for next interrupt */ | 1635 | /* service this interrupt, may set handler for next interrupt */ |
1580 | startstop = handler(drive); | 1636 | startstop = handler(drive); |
1581 | spin_lock_irq(&ide_lock); | 1637 | spin_lock_irq(&ide_lock); |
@@ -1608,7 +1664,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs) | |||
1608 | * Initialize a request before we fill it in and send it down to | 1664 | * Initialize a request before we fill it in and send it down to |
1609 | * ide_do_drive_cmd. Commands must be set up by this function. Right | 1665 | * ide_do_drive_cmd. Commands must be set up by this function. Right |
1610 | * now it doesn't do a lot, but if that changes abusers will have a | 1666 | * now it doesn't do a lot, but if that changes abusers will have a |
1611 | * nasty suprise. | 1667 | * nasty surprise. |
1612 | */ | 1668 | */ |
1613 | 1669 | ||
1614 | void ide_init_drive_cmd (struct request *rq) | 1670 | void ide_init_drive_cmd (struct request *rq) |
@@ -1649,7 +1705,7 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio | |||
1649 | { | 1705 | { |
1650 | unsigned long flags; | 1706 | unsigned long flags; |
1651 | ide_hwgroup_t *hwgroup = HWGROUP(drive); | 1707 | ide_hwgroup_t *hwgroup = HWGROUP(drive); |
1652 | DECLARE_COMPLETION(wait); | 1708 | DECLARE_COMPLETION_ONSTACK(wait); |
1653 | int where = ELEVATOR_INSERT_BACK, err; | 1709 | int where = ELEVATOR_INSERT_BACK, err; |
1654 | int must_wait = (action == ide_wait || action == ide_head_wait); | 1710 | int must_wait = (action == ide_wait || action == ide_head_wait); |
1655 | 1711 | ||