aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide
diff options
context:
space:
mode:
authorKiyoshi Ueda <k-ueda@ct.jp.nec.com>2008-01-28 04:34:20 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-01-28 04:37:02 -0500
commit5e36bb6ee8d5ff6c6114b60d2aaa1c70d4275f4e (patch)
tree19d7cca04195c9b9952262cae852edf662702e4c /drivers/ide
parentea6f06f416347448645e60294d92c0c19aba8589 (diff)
blk_end_request: changing ide normal caller (take 4)
This patch converts "normal" parts of ide to use blk_end_request interfaces. Related 'uptodate' arguments are converted to 'error'. The conversion of 'uptodate' to 'error' is done only for the internal function, __ide_end_request(). ide_end_request() was not changed since it's exported and used by many ide drivers. With this patch, blkdev_dequeue_request() in __ide_end_request() is moved to blk_end_request, since blk_end_request takes care of dequeueing request like below: if (!list_empty(&rq->queuelist)) blkdev_dequeue_request(rq); In the case of ide, o 'dequeue' variable of __ide_end_request() is 1 only when the request is still linked to the queue (i.e. rq->queuelist is not empty) o 'dequeue' variable of __ide_end_request() is 0 only when the request has already been removed from the queue (i.e. rq->queuelist is empty) So blk_end_request can handle it correctly although ide always run thought the code above. Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/ide')
-rw-r--r--drivers/ide/ide-cd.c6
-rw-r--r--drivers/ide/ide-io.c25
2 files changed, 15 insertions, 16 deletions
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 44b033ec0ab0..282f1580fca9 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -655,9 +655,9 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
655 BUG(); 655 BUG();
656 } else { 656 } else {
657 spin_lock_irqsave(&ide_lock, flags); 657 spin_lock_irqsave(&ide_lock, flags);
658 end_that_request_chunk(failed, 0, 658 if (__blk_end_request(failed, -EIO,
659 failed->data_len); 659 failed->data_len))
660 end_that_request_last(failed, 0); 660 BUG();
661 spin_unlock_irqrestore(&ide_lock, flags); 661 spin_unlock_irqrestore(&ide_lock, flags);
662 } 662 }
663 } else 663 } else
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 6f8f544392a8..e6bb9cf24e3d 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -58,15 +58,19 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
58 int uptodate, unsigned int nr_bytes, int dequeue) 58 int uptodate, unsigned int nr_bytes, int dequeue)
59{ 59{
60 int ret = 1; 60 int ret = 1;
61 int error = 0;
62
63 if (uptodate <= 0)
64 error = uptodate ? uptodate : -EIO;
61 65
62 /* 66 /*
63 * if failfast is set on a request, override number of sectors and 67 * if failfast is set on a request, override number of sectors and
64 * complete the whole request right now 68 * complete the whole request right now
65 */ 69 */
66 if (blk_noretry_request(rq) && end_io_error(uptodate)) 70 if (blk_noretry_request(rq) && error)
67 nr_bytes = rq->hard_nr_sectors << 9; 71 nr_bytes = rq->hard_nr_sectors << 9;
68 72
69 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) 73 if (!blk_fs_request(rq) && error && !rq->errors)
70 rq->errors = -EIO; 74 rq->errors = -EIO;
71 75
72 /* 76 /*
@@ -78,14 +82,9 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
78 ide_dma_on(drive); 82 ide_dma_on(drive);
79 } 83 }
80 84
81 if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { 85 if (!__blk_end_request(rq, error, nr_bytes)) {
82 add_disk_randomness(rq->rq_disk); 86 if (dequeue)
83 if (dequeue) {
84 if (!list_empty(&rq->queuelist))
85 blkdev_dequeue_request(rq);
86 HWGROUP(drive)->rq = NULL; 87 HWGROUP(drive)->rq = NULL;
87 }
88 end_that_request_last(rq, uptodate);
89 ret = 0; 88 ret = 0;
90 } 89 }
91 90
@@ -290,9 +289,9 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
290 drive->blocked = 0; 289 drive->blocked = 0;
291 blk_start_queue(drive->queue); 290 blk_start_queue(drive->queue);
292 } 291 }
293 blkdev_dequeue_request(rq);
294 HWGROUP(drive)->rq = NULL; 292 HWGROUP(drive)->rq = NULL;
295 end_that_request_last(rq, 1); 293 if (__blk_end_request(rq, 0, 0))
294 BUG();
296 spin_unlock_irqrestore(&ide_lock, flags); 295 spin_unlock_irqrestore(&ide_lock, flags);
297} 296}
298 297
@@ -387,10 +386,10 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
387 } 386 }
388 387
389 spin_lock_irqsave(&ide_lock, flags); 388 spin_lock_irqsave(&ide_lock, flags);
390 blkdev_dequeue_request(rq);
391 HWGROUP(drive)->rq = NULL; 389 HWGROUP(drive)->rq = NULL;
392 rq->errors = err; 390 rq->errors = err;
393 end_that_request_last(rq, !rq->errors); 391 if (__blk_end_request(rq, (rq->errors ? -EIO : 0), 0))
392 BUG();
394 spin_unlock_irqrestore(&ide_lock, flags); 393 spin_unlock_irqrestore(&ide_lock, flags);
395} 394}
396 395