aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/ide/ide-io.c44
1 files changed, 10 insertions, 34 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index e2eb408a8a18..c89f0d3058e9 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -55,7 +55,7 @@
55#include <asm/io.h> 55#include <asm/io.h>
56 56
57static int __ide_end_request(ide_drive_t *drive, struct request *rq, 57static int __ide_end_request(ide_drive_t *drive, struct request *rq,
58 int uptodate, unsigned int nr_bytes) 58 int uptodate, unsigned int nr_bytes, int dequeue)
59{ 59{
60 int ret = 1; 60 int ret = 1;
61 61
@@ -80,9 +80,11 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
80 80
81 if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { 81 if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
82 add_disk_randomness(rq->rq_disk); 82 add_disk_randomness(rq->rq_disk);
83 if (!list_empty(&rq->queuelist)) 83 if (dequeue) {
84 blkdev_dequeue_request(rq); 84 if (!list_empty(&rq->queuelist))
85 HWGROUP(drive)->rq = NULL; 85 blkdev_dequeue_request(rq);
86 HWGROUP(drive)->rq = NULL;
87 }
86 end_that_request_last(rq, uptodate); 88 end_that_request_last(rq, uptodate);
87 ret = 0; 89 ret = 0;
88 } 90 }
@@ -122,7 +124,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
122 nr_bytes = rq->hard_cur_sectors << 9; 124 nr_bytes = rq->hard_cur_sectors << 9;
123 } 125 }
124 126
125 ret = __ide_end_request(drive, rq, uptodate, nr_bytes); 127 ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
126 128
127 spin_unlock_irqrestore(&ide_lock, flags); 129 spin_unlock_irqrestore(&ide_lock, flags);
128 return ret; 130 return ret;
@@ -255,39 +257,13 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
255 int uptodate, int nr_sectors) 257 int uptodate, int nr_sectors)
256{ 258{
257 unsigned long flags; 259 unsigned long flags;
258 int ret = 1; 260 int ret;
259 261
260 spin_lock_irqsave(&ide_lock, flags); 262 spin_lock_irqsave(&ide_lock, flags);
261
262 BUG_ON(!blk_rq_started(rq)); 263 BUG_ON(!blk_rq_started(rq));
263 264 ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
264 /*
265 * if failfast is set on a request, override number of sectors and
266 * complete the whole request right now
267 */
268 if (blk_noretry_request(rq) && end_io_error(uptodate))
269 nr_sectors = rq->hard_nr_sectors;
270
271 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
272 rq->errors = -EIO;
273
274 /*
275 * decide whether to reenable DMA -- 3 is a random magic for now,
276 * if we DMA timeout more than 3 times, just stay in PIO
277 */
278 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
279 drive->state = 0;
280 HWGROUP(drive)->hwif->ide_dma_on(drive);
281 }
282
283 if (!end_that_request_first(rq, uptodate, nr_sectors)) {
284 add_disk_randomness(rq->rq_disk);
285 if (blk_rq_tagged(rq))
286 blk_queue_end_tag(drive->queue, rq);
287 end_that_request_last(rq, uptodate);
288 ret = 0;
289 }
290 spin_unlock_irqrestore(&ide_lock, flags); 265 spin_unlock_irqrestore(&ide_lock, flags);
266
291 return ret; 267 return ret;
292} 268}
293EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 269EXPORT_SYMBOL_GPL(ide_end_dequeued_request);