aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/plat-omap/mailbox.c16
-rw-r--r--arch/um/drivers/ubd_kern.c16
-rw-r--r--block/ll_rw_blk.c298
-rw-r--r--drivers/block/DAC960.c11
-rw-r--r--drivers/block/cciss.c25
-rw-r--r--drivers/block/cpqarray.c36
-rw-r--r--drivers/block/floppy.c16
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/block/ps3disk.c12
-rw-r--r--drivers/block/sunvdc.c11
-rw-r--r--drivers/block/sx8.c58
-rw-r--r--drivers/block/ub.c10
-rw-r--r--drivers/block/viodasd.c15
-rw-r--r--drivers/block/xen-blkfront.c10
-rw-r--r--drivers/block/xsysace.c5
-rw-r--r--drivers/cdrom/viocd.c15
-rw-r--r--drivers/ide/ide-cd.c55
-rw-r--r--drivers/ide/ide-io.c25
-rw-r--r--drivers/message/i2o/i2o_block.c20
-rw-r--r--drivers/mmc/card/block.c24
-rw-r--r--drivers/mmc/card/queue.c4
-rw-r--r--drivers/s390/block/dasd.c17
-rw-r--r--drivers/s390/char/tape_block.c13
-rw-r--r--drivers/scsi/ide-scsi.c8
-rw-r--r--drivers/scsi/scsi_lib.c31
-rw-r--r--include/linux/blkdev.h31
26 files changed, 416 insertions, 374 deletions
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 45a77df668f1..1945ddfec18d 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -116,8 +116,8 @@ static void mbox_tx_work(struct work_struct *work)
116 } 116 }
117 117
118 spin_lock(q->queue_lock); 118 spin_lock(q->queue_lock);
119 blkdev_dequeue_request(rq); 119 if (__blk_end_request(rq, 0, 0))
120 end_that_request_last(rq, 0); 120 BUG();
121 spin_unlock(q->queue_lock); 121 spin_unlock(q->queue_lock);
122 } 122 }
123} 123}
@@ -149,10 +149,8 @@ static void mbox_rx_work(struct work_struct *work)
149 149
150 msg = (mbox_msg_t) rq->data; 150 msg = (mbox_msg_t) rq->data;
151 151
152 spin_lock_irqsave(q->queue_lock, flags); 152 if (blk_end_request(rq, 0, 0))
153 blkdev_dequeue_request(rq); 153 BUG();
154 end_that_request_last(rq, 0);
155 spin_unlock_irqrestore(q->queue_lock, flags);
156 154
157 mbox->rxq->callback((void *)msg); 155 mbox->rxq->callback((void *)msg);
158 } 156 }
@@ -263,10 +261,8 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
263 261
264 *p = (mbox_msg_t) rq->data; 262 *p = (mbox_msg_t) rq->data;
265 263
266 spin_lock_irqsave(q->queue_lock, flags); 264 if (blk_end_request(rq, 0, 0))
267 blkdev_dequeue_request(rq); 265 BUG();
268 end_that_request_last(rq, 0);
269 spin_unlock_irqrestore(q->queue_lock, flags);
270 266
271 if (unlikely(mbox_seq_test(mbox, *p))) { 267 if (unlikely(mbox_seq_test(mbox, *p))) {
272 pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p); 268 pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index b1a77b11f089..99f9f9605e9c 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -475,17 +475,9 @@ static void do_ubd_request(struct request_queue * q);
475/* Only changed by ubd_init, which is an initcall. */ 475/* Only changed by ubd_init, which is an initcall. */
476int thread_fd = -1; 476int thread_fd = -1;
477 477
478static void ubd_end_request(struct request *req, int bytes, int uptodate) 478static void ubd_end_request(struct request *req, int bytes, int error)
479{ 479{
480 if (!end_that_request_first(req, uptodate, bytes >> 9)) { 480 blk_end_request(req, error, bytes);
481 struct ubd *dev = req->rq_disk->private_data;
482 unsigned long flags;
483
484 add_disk_randomness(req->rq_disk);
485 spin_lock_irqsave(&dev->lock, flags);
486 end_that_request_last(req, uptodate);
487 spin_unlock_irqrestore(&dev->lock, flags);
488 }
489} 481}
490 482
491/* Callable only from interrupt context - otherwise you need to do 483/* Callable only from interrupt context - otherwise you need to do
@@ -493,10 +485,10 @@ static void ubd_end_request(struct request *req, int bytes, int uptodate)
493static inline void ubd_finish(struct request *req, int bytes) 485static inline void ubd_finish(struct request *req, int bytes)
494{ 486{
495 if(bytes < 0){ 487 if(bytes < 0){
496 ubd_end_request(req, 0, 0); 488 ubd_end_request(req, 0, -EIO);
497 return; 489 return;
498 } 490 }
499 ubd_end_request(req, bytes, 1); 491 ubd_end_request(req, bytes, 0);
500} 492}
501 493
502static LIST_HEAD(restart); 494static LIST_HEAD(restart);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index b901db63f6ae..c16fdfed8c62 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -347,7 +347,6 @@ unsigned blk_ordered_req_seq(struct request *rq)
347void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) 347void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
348{ 348{
349 struct request *rq; 349 struct request *rq;
350 int uptodate;
351 350
352 if (error && !q->orderr) 351 if (error && !q->orderr)
353 q->orderr = error; 352 q->orderr = error;
@@ -361,15 +360,11 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
361 /* 360 /*
362 * Okay, sequence complete. 361 * Okay, sequence complete.
363 */ 362 */
364 uptodate = 1;
365 if (q->orderr)
366 uptodate = q->orderr;
367
368 q->ordseq = 0; 363 q->ordseq = 0;
369 rq = q->orig_bar_rq; 364 rq = q->orig_bar_rq;
370 365
371 end_that_request_first(rq, uptodate, rq->hard_nr_sectors); 366 if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
372 end_that_request_last(rq, uptodate); 367 BUG();
373} 368}
374 369
375static void pre_flush_end_io(struct request *rq, int error) 370static void pre_flush_end_io(struct request *rq, int error)
@@ -486,9 +481,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
486 * ORDERED_NONE while this request is on it. 481 * ORDERED_NONE while this request is on it.
487 */ 482 */
488 blkdev_dequeue_request(rq); 483 blkdev_dequeue_request(rq);
489 end_that_request_first(rq, -EOPNOTSUPP, 484 if (__blk_end_request(rq, -EOPNOTSUPP,
490 rq->hard_nr_sectors); 485 blk_rq_bytes(rq)))
491 end_that_request_last(rq, -EOPNOTSUPP); 486 BUG();
492 *rqp = NULL; 487 *rqp = NULL;
493 return 0; 488 return 0;
494 } 489 }
@@ -3437,29 +3432,36 @@ static void blk_recalc_rq_sectors(struct request *rq, int nsect)
3437 } 3432 }
3438} 3433}
3439 3434
3440static int __end_that_request_first(struct request *req, int uptodate, 3435/**
3436 * __end_that_request_first - end I/O on a request
3437 * @req: the request being processed
3438 * @error: 0 for success, < 0 for error
3439 * @nr_bytes: number of bytes to complete
3440 *
3441 * Description:
3442 * Ends I/O on a number of bytes attached to @req, and sets it up
3443 * for the next range of segments (if any) in the cluster.
3444 *
3445 * Return:
3446 * 0 - we are done with this request, call end_that_request_last()
3447 * 1 - still buffers pending for this request
3448 **/
3449static int __end_that_request_first(struct request *req, int error,
3441 int nr_bytes) 3450 int nr_bytes)
3442{ 3451{
3443 int total_bytes, bio_nbytes, error, next_idx = 0; 3452 int total_bytes, bio_nbytes, next_idx = 0;
3444 struct bio *bio; 3453 struct bio *bio;
3445 3454
3446 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); 3455 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
3447 3456
3448 /* 3457 /*
3449 * extend uptodate bool to allow < 0 value to be direct io error
3450 */
3451 error = 0;
3452 if (end_io_error(uptodate))
3453 error = !uptodate ? -EIO : uptodate;
3454
3455 /*
3456 * for a REQ_BLOCK_PC request, we want to carry any eventual 3458 * for a REQ_BLOCK_PC request, we want to carry any eventual
3457 * sense key with us all the way through 3459 * sense key with us all the way through
3458 */ 3460 */
3459 if (!blk_pc_request(req)) 3461 if (!blk_pc_request(req))
3460 req->errors = 0; 3462 req->errors = 0;
3461 3463
3462 if (!uptodate) { 3464 if (error) {
3463 if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) 3465 if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
3464 printk("end_request: I/O error, dev %s, sector %llu\n", 3466 printk("end_request: I/O error, dev %s, sector %llu\n",
3465 req->rq_disk ? req->rq_disk->disk_name : "?", 3467 req->rq_disk ? req->rq_disk->disk_name : "?",
@@ -3553,49 +3555,6 @@ static int __end_that_request_first(struct request *req, int uptodate,
3553 return 1; 3555 return 1;
3554} 3556}
3555 3557
3556/**
3557 * end_that_request_first - end I/O on a request
3558 * @req: the request being processed
3559 * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
3560 * @nr_sectors: number of sectors to end I/O on
3561 *
3562 * Description:
3563 * Ends I/O on a number of sectors attached to @req, and sets it up
3564 * for the next range of segments (if any) in the cluster.
3565 *
3566 * Return:
3567 * 0 - we are done with this request, call end_that_request_last()
3568 * 1 - still buffers pending for this request
3569 **/
3570int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
3571{
3572 return __end_that_request_first(req, uptodate, nr_sectors << 9);
3573}
3574
3575EXPORT_SYMBOL(end_that_request_first);
3576
3577/**
3578 * end_that_request_chunk - end I/O on a request
3579 * @req: the request being processed
3580 * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
3581 * @nr_bytes: number of bytes to complete
3582 *
3583 * Description:
3584 * Ends I/O on a number of bytes attached to @req, and sets it up
3585 * for the next range of segments (if any). Like end_that_request_first(),
3586 * but deals with bytes instead of sectors.
3587 *
3588 * Return:
3589 * 0 - we are done with this request, call end_that_request_last()
3590 * 1 - still buffers pending for this request
3591 **/
3592int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
3593{
3594 return __end_that_request_first(req, uptodate, nr_bytes);
3595}
3596
3597EXPORT_SYMBOL(end_that_request_chunk);
3598
3599/* 3558/*
3600 * splice the completion data to a local structure and hand off to 3559 * splice the completion data to a local structure and hand off to
3601 * process_completion_queue() to complete the requests 3560 * process_completion_queue() to complete the requests
@@ -3675,17 +3634,15 @@ EXPORT_SYMBOL(blk_complete_request);
3675/* 3634/*
3676 * queue lock must be held 3635 * queue lock must be held
3677 */ 3636 */
3678void end_that_request_last(struct request *req, int uptodate) 3637static void end_that_request_last(struct request *req, int error)
3679{ 3638{
3680 struct gendisk *disk = req->rq_disk; 3639 struct gendisk *disk = req->rq_disk;
3681 int error;
3682 3640
3683 /* 3641 if (blk_rq_tagged(req))
3684 * extend uptodate bool to allow < 0 value to be direct io error 3642 blk_queue_end_tag(req->q, req);
3685 */ 3643
3686 error = 0; 3644 if (blk_queued_rq(req))
3687 if (end_io_error(uptodate)) 3645 blkdev_dequeue_request(req);
3688 error = !uptodate ? -EIO : uptodate;
3689 3646
3690 if (unlikely(laptop_mode) && blk_fs_request(req)) 3647 if (unlikely(laptop_mode) && blk_fs_request(req))
3691 laptop_io_completion(); 3648 laptop_io_completion();
@@ -3704,32 +3661,54 @@ void end_that_request_last(struct request *req, int uptodate)
3704 disk_round_stats(disk); 3661 disk_round_stats(disk);
3705 disk->in_flight--; 3662 disk->in_flight--;
3706 } 3663 }
3664
3707 if (req->end_io) 3665 if (req->end_io)
3708 req->end_io(req, error); 3666 req->end_io(req, error);
3709 else 3667 else {
3668 if (blk_bidi_rq(req))
3669 __blk_put_request(req->next_rq->q, req->next_rq);
3670
3710 __blk_put_request(req->q, req); 3671 __blk_put_request(req->q, req);
3672 }
3711} 3673}
3712 3674
3713EXPORT_SYMBOL(end_that_request_last);
3714
3715static inline void __end_request(struct request *rq, int uptodate, 3675static inline void __end_request(struct request *rq, int uptodate,
3716 unsigned int nr_bytes, int dequeue) 3676 unsigned int nr_bytes)
3717{ 3677{
3718 if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { 3678 int error = 0;
3719 if (dequeue) 3679
3720 blkdev_dequeue_request(rq); 3680 if (uptodate <= 0)
3721 add_disk_randomness(rq->rq_disk); 3681 error = uptodate ? uptodate : -EIO;
3722 end_that_request_last(rq, uptodate); 3682
3723 } 3683 __blk_end_request(rq, error, nr_bytes);
3724} 3684}
3725 3685
3726static unsigned int rq_byte_size(struct request *rq) 3686/**
3687 * blk_rq_bytes - Returns bytes left to complete in the entire request
3688 **/
3689unsigned int blk_rq_bytes(struct request *rq)
3727{ 3690{
3728 if (blk_fs_request(rq)) 3691 if (blk_fs_request(rq))
3729 return rq->hard_nr_sectors << 9; 3692 return rq->hard_nr_sectors << 9;
3730 3693
3731 return rq->data_len; 3694 return rq->data_len;
3732} 3695}
3696EXPORT_SYMBOL_GPL(blk_rq_bytes);
3697
3698/**
3699 * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
3700 **/
3701unsigned int blk_rq_cur_bytes(struct request *rq)
3702{
3703 if (blk_fs_request(rq))
3704 return rq->current_nr_sectors << 9;
3705
3706 if (rq->bio)
3707 return rq->bio->bi_size;
3708
3709 return rq->data_len;
3710}
3711EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
3733 3712
3734/** 3713/**
3735 * end_queued_request - end all I/O on a queued request 3714 * end_queued_request - end all I/O on a queued request
@@ -3744,7 +3723,7 @@ static unsigned int rq_byte_size(struct request *rq)
3744 **/ 3723 **/
3745void end_queued_request(struct request *rq, int uptodate) 3724void end_queued_request(struct request *rq, int uptodate)
3746{ 3725{
3747 __end_request(rq, uptodate, rq_byte_size(rq), 1); 3726 __end_request(rq, uptodate, blk_rq_bytes(rq));
3748} 3727}
3749EXPORT_SYMBOL(end_queued_request); 3728EXPORT_SYMBOL(end_queued_request);
3750 3729
@@ -3761,7 +3740,7 @@ EXPORT_SYMBOL(end_queued_request);
3761 **/ 3740 **/
3762void end_dequeued_request(struct request *rq, int uptodate) 3741void end_dequeued_request(struct request *rq, int uptodate)
3763{ 3742{
3764 __end_request(rq, uptodate, rq_byte_size(rq), 0); 3743 __end_request(rq, uptodate, blk_rq_bytes(rq));
3765} 3744}
3766EXPORT_SYMBOL(end_dequeued_request); 3745EXPORT_SYMBOL(end_dequeued_request);
3767 3746
@@ -3787,10 +3766,159 @@ EXPORT_SYMBOL(end_dequeued_request);
3787 **/ 3766 **/
3788void end_request(struct request *req, int uptodate) 3767void end_request(struct request *req, int uptodate)
3789{ 3768{
3790 __end_request(req, uptodate, req->hard_cur_sectors << 9, 1); 3769 __end_request(req, uptodate, req->hard_cur_sectors << 9);
3791} 3770}
3792EXPORT_SYMBOL(end_request); 3771EXPORT_SYMBOL(end_request);
3793 3772
3773/**
3774 * blk_end_io - Generic end_io function to complete a request.
3775 * @rq: the request being processed
3776 * @error: 0 for success, < 0 for error
3777 * @nr_bytes: number of bytes to complete @rq
3778 * @bidi_bytes: number of bytes to complete @rq->next_rq
3779 * @drv_callback: function called between completion of bios in the request
3780 * and completion of the request.
3781 * If the callback returns non 0, this helper returns without
3782 * completion of the request.
3783 *
3784 * Description:
3785 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
3786 * If @rq has leftover, sets it up for the next range of segments.
3787 *
3788 * Return:
3789 * 0 - we are done with this request
3790 * 1 - this request is not freed yet, it still has pending buffers.
3791 **/
3792static int blk_end_io(struct request *rq, int error, int nr_bytes,
3793 int bidi_bytes, int (drv_callback)(struct request *))
3794{
3795 struct request_queue *q = rq->q;
3796 unsigned long flags = 0UL;
3797
3798 if (blk_fs_request(rq) || blk_pc_request(rq)) {
3799 if (__end_that_request_first(rq, error, nr_bytes))
3800 return 1;
3801
3802 /* Bidi request must be completed as a whole */
3803 if (blk_bidi_rq(rq) &&
3804 __end_that_request_first(rq->next_rq, error, bidi_bytes))
3805 return 1;
3806 }
3807
3808 /* Special feature for tricky drivers */
3809 if (drv_callback && drv_callback(rq))
3810 return 1;
3811
3812 add_disk_randomness(rq->rq_disk);
3813
3814 spin_lock_irqsave(q->queue_lock, flags);
3815 end_that_request_last(rq, error);
3816 spin_unlock_irqrestore(q->queue_lock, flags);
3817
3818 return 0;
3819}
3820
3821/**
3822 * blk_end_request - Helper function for drivers to complete the request.
3823 * @rq: the request being processed
3824 * @error: 0 for success, < 0 for error
3825 * @nr_bytes: number of bytes to complete
3826 *
3827 * Description:
3828 * Ends I/O on a number of bytes attached to @rq.
3829 * If @rq has leftover, sets it up for the next range of segments.
3830 *
3831 * Return:
3832 * 0 - we are done with this request
3833 * 1 - still buffers pending for this request
3834 **/
3835int blk_end_request(struct request *rq, int error, int nr_bytes)
3836{
3837 return blk_end_io(rq, error, nr_bytes, 0, NULL);
3838}
3839EXPORT_SYMBOL_GPL(blk_end_request);
3840
3841/**
3842 * __blk_end_request - Helper function for drivers to complete the request.
3843 * @rq: the request being processed
3844 * @error: 0 for success, < 0 for error
3845 * @nr_bytes: number of bytes to complete
3846 *
3847 * Description:
3848 * Must be called with queue lock held unlike blk_end_request().
3849 *
3850 * Return:
3851 * 0 - we are done with this request
3852 * 1 - still buffers pending for this request
3853 **/
3854int __blk_end_request(struct request *rq, int error, int nr_bytes)
3855{
3856 if (blk_fs_request(rq) || blk_pc_request(rq)) {
3857 if (__end_that_request_first(rq, error, nr_bytes))
3858 return 1;
3859 }
3860
3861 add_disk_randomness(rq->rq_disk);
3862
3863 end_that_request_last(rq, error);
3864
3865 return 0;
3866}
3867EXPORT_SYMBOL_GPL(__blk_end_request);
3868
3869/**
3870 * blk_end_bidi_request - Helper function for drivers to complete bidi request.
3871 * @rq: the bidi request being processed
3872 * @error: 0 for success, < 0 for error
3873 * @nr_bytes: number of bytes to complete @rq
3874 * @bidi_bytes: number of bytes to complete @rq->next_rq
3875 *
3876 * Description:
3877 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
3878 *
3879 * Return:
3880 * 0 - we are done with this request
3881 * 1 - still buffers pending for this request
3882 **/
3883int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
3884 int bidi_bytes)
3885{
3886 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
3887}
3888EXPORT_SYMBOL_GPL(blk_end_bidi_request);
3889
3890/**
3891 * blk_end_request_callback - Special helper function for tricky drivers
3892 * @rq: the request being processed
3893 * @error: 0 for success, < 0 for error
3894 * @nr_bytes: number of bytes to complete
3895 * @drv_callback: function called between completion of bios in the request
3896 * and completion of the request.
3897 * If the callback returns non 0, this helper returns without
3898 * completion of the request.
3899 *
3900 * Description:
3901 * Ends I/O on a number of bytes attached to @rq.
3902 * If @rq has leftover, sets it up for the next range of segments.
3903 *
3904 * This special helper function is used only for existing tricky drivers.
3905 * (e.g. cdrom_newpc_intr() of ide-cd)
3906 * This interface will be removed when such drivers are rewritten.
3907 * Don't use this interface in other places anymore.
3908 *
3909 * Return:
3910 * 0 - we are done with this request
3911 * 1 - this request is not freed yet.
3912 * this request still has pending buffers or
3913 * the driver doesn't want to finish this request yet.
3914 **/
3915int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
3916 int (drv_callback)(struct request *))
3917{
3918 return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
3919}
3920EXPORT_SYMBOL_GPL(blk_end_request_callback);
3921
3794static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 3922static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3795 struct bio *bio) 3923 struct bio *bio)
3796{ 3924{
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 9030c373ce67..cd03473f3547 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3455,19 +3455,12 @@ static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
3455 bool SuccessfulIO) 3455 bool SuccessfulIO)
3456{ 3456{
3457 struct request *Request = Command->Request; 3457 struct request *Request = Command->Request;
3458 int UpToDate; 3458 int Error = SuccessfulIO ? 0 : -EIO;
3459
3460 UpToDate = 0;
3461 if (SuccessfulIO)
3462 UpToDate = 1;
3463 3459
3464 pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist, 3460 pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
3465 Command->SegmentCount, Command->DmaDirection); 3461 Command->SegmentCount, Command->DmaDirection);
3466 3462
3467 if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) { 3463 if (!__blk_end_request(Request, Error, Command->BlockCount << 9)) {
3468 add_disk_randomness(Request->rq_disk);
3469 end_that_request_last(Request, UpToDate);
3470
3471 if (Command->Completion) { 3464 if (Command->Completion) {
3472 complete(Command->Completion); 3465 complete(Command->Completion);
3473 Command->Completion = NULL; 3466 Command->Completion = NULL;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 509b6490413b..ef50068def88 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1187,17 +1187,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
1187 } 1187 }
1188} 1188}
1189 1189
1190static inline void complete_buffers(struct bio *bio, int status)
1191{
1192 while (bio) {
1193 struct bio *xbh = bio->bi_next;
1194
1195 bio->bi_next = NULL;
1196 bio_endio(bio, status ? 0 : -EIO);
1197 bio = xbh;
1198 }
1199}
1200
1201static void cciss_check_queues(ctlr_info_t *h) 1190static void cciss_check_queues(ctlr_info_t *h)
1202{ 1191{
1203 int start_queue = h->next_to_run; 1192 int start_queue = h->next_to_run;
@@ -1263,21 +1252,14 @@ static void cciss_softirq_done(struct request *rq)
1263 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir); 1252 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1264 } 1253 }
1265 1254
1266 complete_buffers(rq->bio, (rq->errors == 0));
1267
1268 if (blk_fs_request(rq)) {
1269 const int rw = rq_data_dir(rq);
1270
1271 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1272 }
1273
1274#ifdef CCISS_DEBUG 1255#ifdef CCISS_DEBUG
1275 printk("Done with %p\n", rq); 1256 printk("Done with %p\n", rq);
1276#endif /* CCISS_DEBUG */ 1257#endif /* CCISS_DEBUG */
1277 1258
1278 add_disk_randomness(rq->rq_disk); 1259 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
1260 BUG();
1261
1279 spin_lock_irqsave(&h->lock, flags); 1262 spin_lock_irqsave(&h->lock, flags);
1280 end_that_request_last(rq, (rq->errors == 0));
1281 cmd_free(h, cmd, 1); 1263 cmd_free(h, cmd, 1);
1282 cciss_check_queues(h); 1264 cciss_check_queues(h);
1283 spin_unlock_irqrestore(&h->lock, flags); 1265 spin_unlock_irqrestore(&h->lock, flags);
@@ -2544,7 +2526,6 @@ after_error_processing:
2544 } 2526 }
2545 cmd->rq->data_len = 0; 2527 cmd->rq->data_len = 0;
2546 cmd->rq->completion_data = cmd; 2528 cmd->rq->completion_data = cmd;
2547 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2548 blk_complete_request(cmd->rq); 2529 blk_complete_request(cmd->rq);
2549} 2530}
2550 2531
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index c8132d958795..69199185ff4b 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -167,7 +167,6 @@ static void start_io(ctlr_info_t *h);
167 167
168static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c); 168static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
169static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c); 169static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
170static inline void complete_buffers(struct bio *bio, int ok);
171static inline void complete_command(cmdlist_t *cmd, int timeout); 170static inline void complete_command(cmdlist_t *cmd, int timeout);
172 171
173static irqreturn_t do_ida_intr(int irq, void *dev_id); 172static irqreturn_t do_ida_intr(int irq, void *dev_id);
@@ -980,26 +979,13 @@ static void start_io(ctlr_info_t *h)
980 } 979 }
981} 980}
982 981
983static inline void complete_buffers(struct bio *bio, int ok)
984{
985 struct bio *xbh;
986
987 while (bio) {
988 xbh = bio->bi_next;
989 bio->bi_next = NULL;
990
991 bio_endio(bio, ok ? 0 : -EIO);
992
993 bio = xbh;
994 }
995}
996/* 982/*
997 * Mark all buffers that cmd was responsible for 983 * Mark all buffers that cmd was responsible for
998 */ 984 */
999static inline void complete_command(cmdlist_t *cmd, int timeout) 985static inline void complete_command(cmdlist_t *cmd, int timeout)
1000{ 986{
1001 struct request *rq = cmd->rq; 987 struct request *rq = cmd->rq;
1002 int ok=1; 988 int error = 0;
1003 int i, ddir; 989 int i, ddir;
1004 990
1005 if (cmd->req.hdr.rcode & RCODE_NONFATAL && 991 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
@@ -1011,16 +997,17 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
1011 if (cmd->req.hdr.rcode & RCODE_FATAL) { 997 if (cmd->req.hdr.rcode & RCODE_FATAL) {
1012 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n", 998 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1013 cmd->ctlr, cmd->hdr.unit); 999 cmd->ctlr, cmd->hdr.unit);
1014 ok = 0; 1000 error = -EIO;
1015 } 1001 }
1016 if (cmd->req.hdr.rcode & RCODE_INVREQ) { 1002 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1017 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n", 1003 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1018 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd, 1004 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1019 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt, 1005 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1020 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode); 1006 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1021 ok = 0; 1007 error = -EIO;
1022 } 1008 }
1023 if (timeout) ok = 0; 1009 if (timeout)
1010 error = -EIO;
1024 /* unmap the DMA mapping for all the scatter gather elements */ 1011 /* unmap the DMA mapping for all the scatter gather elements */
1025 if (cmd->req.hdr.cmd == IDA_READ) 1012 if (cmd->req.hdr.cmd == IDA_READ)
1026 ddir = PCI_DMA_FROMDEVICE; 1013 ddir = PCI_DMA_FROMDEVICE;
@@ -1030,18 +1017,9 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
1030 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr, 1017 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1031 cmd->req.sg[i].size, ddir); 1018 cmd->req.sg[i].size, ddir);
1032 1019
1033 complete_buffers(rq->bio, ok);
1034
1035 if (blk_fs_request(rq)) {
1036 const int rw = rq_data_dir(rq);
1037
1038 disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
1039 }
1040
1041 add_disk_randomness(rq->rq_disk);
1042
1043 DBGPX(printk("Done with %p\n", rq);); 1020 DBGPX(printk("Done with %p\n", rq););
1044 end_that_request_last(rq, ok ? 1 : -EIO); 1021 if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
1022 BUG();
1045} 1023}
1046 1024
1047/* 1025/*
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 639ed14bb08d..32c79a55511b 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2287,21 +2287,19 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
2287 * ============================= 2287 * =============================
2288 */ 2288 */
2289 2289
2290static void floppy_end_request(struct request *req, int uptodate) 2290static void floppy_end_request(struct request *req, int error)
2291{ 2291{
2292 unsigned int nr_sectors = current_count_sectors; 2292 unsigned int nr_sectors = current_count_sectors;
2293 unsigned int drive = (unsigned long)req->rq_disk->private_data;
2293 2294
2294 /* current_count_sectors can be zero if transfer failed */ 2295 /* current_count_sectors can be zero if transfer failed */
2295 if (!uptodate) 2296 if (error)
2296 nr_sectors = req->current_nr_sectors; 2297 nr_sectors = req->current_nr_sectors;
2297 if (end_that_request_first(req, uptodate, nr_sectors)) 2298 if (__blk_end_request(req, error, nr_sectors << 9))
2298 return; 2299 return;
2299 add_disk_randomness(req->rq_disk);
2300 floppy_off((long)req->rq_disk->private_data);
2301 blkdev_dequeue_request(req);
2302 end_that_request_last(req, uptodate);
2303 2300
2304 /* We're done with the request */ 2301 /* We're done with the request */
2302 floppy_off(drive);
2305 current_req = NULL; 2303 current_req = NULL;
2306} 2304}
2307 2305
@@ -2332,7 +2330,7 @@ static void request_done(int uptodate)
2332 2330
2333 /* unlock chained buffers */ 2331 /* unlock chained buffers */
2334 spin_lock_irqsave(q->queue_lock, flags); 2332 spin_lock_irqsave(q->queue_lock, flags);
2335 floppy_end_request(req, 1); 2333 floppy_end_request(req, 0);
2336 spin_unlock_irqrestore(q->queue_lock, flags); 2334 spin_unlock_irqrestore(q->queue_lock, flags);
2337 } else { 2335 } else {
2338 if (rq_data_dir(req) == WRITE) { 2336 if (rq_data_dir(req) == WRITE) {
@@ -2346,7 +2344,7 @@ static void request_done(int uptodate)
2346 DRWE->last_error_generation = DRS->generation; 2344 DRWE->last_error_generation = DRS->generation;
2347 } 2345 }
2348 spin_lock_irqsave(q->queue_lock, flags); 2346 spin_lock_irqsave(q->queue_lock, flags);
2349 floppy_end_request(req, 0); 2347 floppy_end_request(req, -EIO);
2350 spin_unlock_irqrestore(q->queue_lock, flags); 2348 spin_unlock_irqrestore(q->queue_lock, flags);
2351 } 2349 }
2352} 2350}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ba9b17e507e0..ae3106045ee5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -100,17 +100,15 @@ static const char *nbdcmd_to_ascii(int cmd)
100 100
101static void nbd_end_request(struct request *req) 101static void nbd_end_request(struct request *req)
102{ 102{
103 int uptodate = (req->errors == 0) ? 1 : 0; 103 int error = req->errors ? -EIO : 0;
104 struct request_queue *q = req->q; 104 struct request_queue *q = req->q;
105 unsigned long flags; 105 unsigned long flags;
106 106
107 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, 107 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
108 req, uptodate? "done": "failed"); 108 req, error ? "failed" : "done");
109 109
110 spin_lock_irqsave(q->queue_lock, flags); 110 spin_lock_irqsave(q->queue_lock, flags);
111 if (!end_that_request_first(req, uptodate, req->nr_sectors)) { 111 __blk_end_request(req, error, req->nr_sectors << 9);
112 end_that_request_last(req, uptodate);
113 }
114 spin_unlock_irqrestore(q->queue_lock, flags); 112 spin_unlock_irqrestore(q->queue_lock, flags);
115} 113}
116 114
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index e354bfc070e1..7483f947f0e9 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -229,7 +229,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
229 struct ps3_storage_device *dev = data; 229 struct ps3_storage_device *dev = data;
230 struct ps3disk_private *priv; 230 struct ps3disk_private *priv;
231 struct request *req; 231 struct request *req;
232 int res, read, uptodate; 232 int res, read, error;
233 u64 tag, status; 233 u64 tag, status;
234 unsigned long num_sectors; 234 unsigned long num_sectors;
235 const char *op; 235 const char *op;
@@ -270,21 +270,17 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
270 if (status) { 270 if (status) {
271 dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__, 271 dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
272 __LINE__, op, status); 272 __LINE__, op, status);
273 uptodate = 0; 273 error = -EIO;
274 } else { 274 } else {
275 dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__, 275 dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
276 __LINE__, op); 276 __LINE__, op);
277 uptodate = 1; 277 error = 0;
278 if (read) 278 if (read)
279 ps3disk_scatter_gather(dev, req, 0); 279 ps3disk_scatter_gather(dev, req, 0);
280 } 280 }
281 281
282 spin_lock(&priv->lock); 282 spin_lock(&priv->lock);
283 if (!end_that_request_first(req, uptodate, num_sectors)) { 283 __blk_end_request(req, error, num_sectors << 9);
284 add_disk_randomness(req->rq_disk);
285 blkdev_dequeue_request(req);
286 end_that_request_last(req, uptodate);
287 }
288 priv->req = NULL; 284 priv->req = NULL;
289 ps3disk_do_request(dev, priv->queue); 285 ps3disk_do_request(dev, priv->queue);
290 spin_unlock(&priv->lock); 286 spin_unlock(&priv->lock);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index fac4c6cd04f7..66e30155b0ab 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -212,12 +212,9 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD); 212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
213} 213}
214 214
215static void vdc_end_request(struct request *req, int uptodate, int num_sectors) 215static void vdc_end_request(struct request *req, int error, int num_sectors)
216{ 216{
217 if (end_that_request_first(req, uptodate, num_sectors)) 217 __blk_end_request(req, error, num_sectors << 9);
218 return;
219 add_disk_randomness(req->rq_disk);
220 end_that_request_last(req, uptodate);
221} 218}
222 219
223static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, 220static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
@@ -242,7 +239,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
242 239
243 rqe->req = NULL; 240 rqe->req = NULL;
244 241
245 vdc_end_request(req, !desc->status, desc->size >> 9); 242 vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9);
246 243
247 if (blk_queue_stopped(port->disk->queue)) 244 if (blk_queue_stopped(port->disk->queue))
248 blk_start_queue(port->disk->queue); 245 blk_start_queue(port->disk->queue);
@@ -456,7 +453,7 @@ static void do_vdc_request(struct request_queue *q)
456 453
457 blkdev_dequeue_request(req); 454 blkdev_dequeue_request(req);
458 if (__send_request(req) < 0) 455 if (__send_request(req) < 0)
459 vdc_end_request(req, 0, req->hard_nr_sectors); 456 vdc_end_request(req, -EIO, req->hard_nr_sectors);
460 } 457 }
461} 458}
462 459
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 52dc5e131718..cd5674b63faf 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -744,16 +744,14 @@ static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
744 744
745static inline void carm_end_request_queued(struct carm_host *host, 745static inline void carm_end_request_queued(struct carm_host *host,
746 struct carm_request *crq, 746 struct carm_request *crq,
747 int uptodate) 747 int error)
748{ 748{
749 struct request *req = crq->rq; 749 struct request *req = crq->rq;
750 int rc; 750 int rc;
751 751
752 rc = end_that_request_first(req, uptodate, req->hard_nr_sectors); 752 rc = __blk_end_request(req, error, blk_rq_bytes(req));
753 assert(rc == 0); 753 assert(rc == 0);
754 754
755 end_that_request_last(req, uptodate);
756
757 rc = carm_put_request(host, crq); 755 rc = carm_put_request(host, crq);
758 assert(rc == 0); 756 assert(rc == 0);
759} 757}
@@ -793,9 +791,9 @@ static inline void carm_round_robin(struct carm_host *host)
793} 791}
794 792
795static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq, 793static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
796 int is_ok) 794 int error)
797{ 795{
798 carm_end_request_queued(host, crq, is_ok); 796 carm_end_request_queued(host, crq, error);
799 if (max_queue == 1) 797 if (max_queue == 1)
800 carm_round_robin(host); 798 carm_round_robin(host);
801 else if ((host->n_msgs <= CARM_MSG_LOW_WATER) && 799 else if ((host->n_msgs <= CARM_MSG_LOW_WATER) &&
@@ -873,14 +871,14 @@ queue_one_request:
873 sg = &crq->sg[0]; 871 sg = &crq->sg[0];
874 n_elem = blk_rq_map_sg(q, rq, sg); 872 n_elem = blk_rq_map_sg(q, rq, sg);
875 if (n_elem <= 0) { 873 if (n_elem <= 0) {
876 carm_end_rq(host, crq, 0); 874 carm_end_rq(host, crq, -EIO);
877 return; /* request with no s/g entries? */ 875 return; /* request with no s/g entries? */
878 } 876 }
879 877
880 /* map scatterlist to PCI bus addresses */ 878 /* map scatterlist to PCI bus addresses */
881 n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir); 879 n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
882 if (n_elem <= 0) { 880 if (n_elem <= 0) {
883 carm_end_rq(host, crq, 0); 881 carm_end_rq(host, crq, -EIO);
884 return; /* request with no s/g entries? */ 882 return; /* request with no s/g entries? */
885 } 883 }
886 crq->n_elem = n_elem; 884 crq->n_elem = n_elem;
@@ -941,7 +939,7 @@ queue_one_request:
941 939
942static void carm_handle_array_info(struct carm_host *host, 940static void carm_handle_array_info(struct carm_host *host,
943 struct carm_request *crq, u8 *mem, 941 struct carm_request *crq, u8 *mem,
944 int is_ok) 942 int error)
945{ 943{
946 struct carm_port *port; 944 struct carm_port *port;
947 u8 *msg_data = mem + sizeof(struct carm_array_info); 945 u8 *msg_data = mem + sizeof(struct carm_array_info);
@@ -952,9 +950,9 @@ static void carm_handle_array_info(struct carm_host *host,
952 950
953 DPRINTK("ENTER\n"); 951 DPRINTK("ENTER\n");
954 952
955 carm_end_rq(host, crq, is_ok); 953 carm_end_rq(host, crq, error);
956 954
957 if (!is_ok) 955 if (error)
958 goto out; 956 goto out;
959 if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST) 957 if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
960 goto out; 958 goto out;
@@ -1001,7 +999,7 @@ out:
1001 999
1002static void carm_handle_scan_chan(struct carm_host *host, 1000static void carm_handle_scan_chan(struct carm_host *host,
1003 struct carm_request *crq, u8 *mem, 1001 struct carm_request *crq, u8 *mem,
1004 int is_ok) 1002 int error)
1005{ 1003{
1006 u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET; 1004 u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
1007 unsigned int i, dev_count = 0; 1005 unsigned int i, dev_count = 0;
@@ -1009,9 +1007,9 @@ static void carm_handle_scan_chan(struct carm_host *host,
1009 1007
1010 DPRINTK("ENTER\n"); 1008 DPRINTK("ENTER\n");
1011 1009
1012 carm_end_rq(host, crq, is_ok); 1010 carm_end_rq(host, crq, error);
1013 1011
1014 if (!is_ok) { 1012 if (error) {
1015 new_state = HST_ERROR; 1013 new_state = HST_ERROR;
1016 goto out; 1014 goto out;
1017 } 1015 }
@@ -1033,23 +1031,23 @@ out:
1033} 1031}
1034 1032
1035static void carm_handle_generic(struct carm_host *host, 1033static void carm_handle_generic(struct carm_host *host,
1036 struct carm_request *crq, int is_ok, 1034 struct carm_request *crq, int error,
1037 int cur_state, int next_state) 1035 int cur_state, int next_state)
1038{ 1036{
1039 DPRINTK("ENTER\n"); 1037 DPRINTK("ENTER\n");
1040 1038
1041 carm_end_rq(host, crq, is_ok); 1039 carm_end_rq(host, crq, error);
1042 1040
1043 assert(host->state == cur_state); 1041 assert(host->state == cur_state);
1044 if (is_ok) 1042 if (error)
1045 host->state = next_state;
1046 else
1047 host->state = HST_ERROR; 1043 host->state = HST_ERROR;
1044 else
1045 host->state = next_state;
1048 schedule_work(&host->fsm_task); 1046 schedule_work(&host->fsm_task);
1049} 1047}
1050 1048
1051static inline void carm_handle_rw(struct carm_host *host, 1049static inline void carm_handle_rw(struct carm_host *host,
1052 struct carm_request *crq, int is_ok) 1050 struct carm_request *crq, int error)
1053{ 1051{
1054 int pci_dir; 1052 int pci_dir;
1055 1053
@@ -1062,7 +1060,7 @@ static inline void carm_handle_rw(struct carm_host *host,
1062 1060
1063 pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir); 1061 pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir);
1064 1062
1065 carm_end_rq(host, crq, is_ok); 1063 carm_end_rq(host, crq, error);
1066} 1064}
1067 1065
1068static inline void carm_handle_resp(struct carm_host *host, 1066static inline void carm_handle_resp(struct carm_host *host,
@@ -1071,7 +1069,7 @@ static inline void carm_handle_resp(struct carm_host *host,
1071 u32 handle = le32_to_cpu(ret_handle_le); 1069 u32 handle = le32_to_cpu(ret_handle_le);
1072 unsigned int msg_idx; 1070 unsigned int msg_idx;
1073 struct carm_request *crq; 1071 struct carm_request *crq;
1074 int is_ok = (status == RMSG_OK); 1072 int error = (status == RMSG_OK) ? 0 : -EIO;
1075 u8 *mem; 1073 u8 *mem;
1076 1074
1077 VPRINTK("ENTER, handle == 0x%x\n", handle); 1075 VPRINTK("ENTER, handle == 0x%x\n", handle);
@@ -1090,7 +1088,7 @@ static inline void carm_handle_resp(struct carm_host *host,
1090 /* fast path */ 1088 /* fast path */
1091 if (likely(crq->msg_type == CARM_MSG_READ || 1089 if (likely(crq->msg_type == CARM_MSG_READ ||
1092 crq->msg_type == CARM_MSG_WRITE)) { 1090 crq->msg_type == CARM_MSG_WRITE)) {
1093 carm_handle_rw(host, crq, is_ok); 1091 carm_handle_rw(host, crq, error);
1094 return; 1092 return;
1095 } 1093 }
1096 1094
@@ -1100,7 +1098,7 @@ static inline void carm_handle_resp(struct carm_host *host,
1100 case CARM_MSG_IOCTL: { 1098 case CARM_MSG_IOCTL: {
1101 switch (crq->msg_subtype) { 1099 switch (crq->msg_subtype) {
1102 case CARM_IOC_SCAN_CHAN: 1100 case CARM_IOC_SCAN_CHAN:
1103 carm_handle_scan_chan(host, crq, mem, is_ok); 1101 carm_handle_scan_chan(host, crq, mem, error);
1104 break; 1102 break;
1105 default: 1103 default:
1106 /* unknown / invalid response */ 1104 /* unknown / invalid response */
@@ -1112,21 +1110,21 @@ static inline void carm_handle_resp(struct carm_host *host,
1112 case CARM_MSG_MISC: { 1110 case CARM_MSG_MISC: {
1113 switch (crq->msg_subtype) { 1111 switch (crq->msg_subtype) {
1114 case MISC_ALLOC_MEM: 1112 case MISC_ALLOC_MEM:
1115 carm_handle_generic(host, crq, is_ok, 1113 carm_handle_generic(host, crq, error,
1116 HST_ALLOC_BUF, HST_SYNC_TIME); 1114 HST_ALLOC_BUF, HST_SYNC_TIME);
1117 break; 1115 break;
1118 case MISC_SET_TIME: 1116 case MISC_SET_TIME:
1119 carm_handle_generic(host, crq, is_ok, 1117 carm_handle_generic(host, crq, error,
1120 HST_SYNC_TIME, HST_GET_FW_VER); 1118 HST_SYNC_TIME, HST_GET_FW_VER);
1121 break; 1119 break;
1122 case MISC_GET_FW_VER: { 1120 case MISC_GET_FW_VER: {
1123 struct carm_fw_ver *ver = (struct carm_fw_ver *) 1121 struct carm_fw_ver *ver = (struct carm_fw_ver *)
1124 mem + sizeof(struct carm_msg_get_fw_ver); 1122 mem + sizeof(struct carm_msg_get_fw_ver);
1125 if (is_ok) { 1123 if (!error) {
1126 host->fw_ver = le32_to_cpu(ver->version); 1124 host->fw_ver = le32_to_cpu(ver->version);
1127 host->flags |= (ver->features & FL_FW_VER_MASK); 1125 host->flags |= (ver->features & FL_FW_VER_MASK);
1128 } 1126 }
1129 carm_handle_generic(host, crq, is_ok, 1127 carm_handle_generic(host, crq, error,
1130 HST_GET_FW_VER, HST_PORT_SCAN); 1128 HST_GET_FW_VER, HST_PORT_SCAN);
1131 break; 1129 break;
1132 } 1130 }
@@ -1140,7 +1138,7 @@ static inline void carm_handle_resp(struct carm_host *host,
1140 case CARM_MSG_ARRAY: { 1138 case CARM_MSG_ARRAY: {
1141 switch (crq->msg_subtype) { 1139 switch (crq->msg_subtype) {
1142 case CARM_ARRAY_INFO: 1140 case CARM_ARRAY_INFO:
1143 carm_handle_array_info(host, crq, mem, is_ok); 1141 carm_handle_array_info(host, crq, mem, error);
1144 break; 1142 break;
1145 default: 1143 default:
1146 /* unknown / invalid response */ 1144 /* unknown / invalid response */
@@ -1159,7 +1157,7 @@ static inline void carm_handle_resp(struct carm_host *host,
1159err_out: 1157err_out:
1160 printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n", 1158 printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
1161 pci_name(host->pdev), crq->msg_type, crq->msg_subtype); 1159 pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
1162 carm_end_rq(host, crq, 0); 1160 carm_end_rq(host, crq, -EIO);
1163} 1161}
1164 1162
1165static inline void carm_handle_responses(struct carm_host *host) 1163static inline void carm_handle_responses(struct carm_host *host)
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 08e909dc7944..c6179d6ac6e4 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -808,16 +808,16 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
808 808
809static void ub_end_rq(struct request *rq, unsigned int scsi_status) 809static void ub_end_rq(struct request *rq, unsigned int scsi_status)
810{ 810{
811 int uptodate; 811 int error;
812 812
813 if (scsi_status == 0) { 813 if (scsi_status == 0) {
814 uptodate = 1; 814 error = 0;
815 } else { 815 } else {
816 uptodate = 0; 816 error = -EIO;
817 rq->errors = scsi_status; 817 rq->errors = scsi_status;
818 } 818 }
819 end_that_request_first(rq, uptodate, rq->hard_nr_sectors); 819 if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
820 end_that_request_last(rq, uptodate); 820 BUG();
821} 821}
822 822
823static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 823static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index ab5d404faa11..9e61fca46117 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -229,13 +229,10 @@ static struct block_device_operations viodasd_fops = {
229/* 229/*
230 * End a request 230 * End a request
231 */ 231 */
232static void viodasd_end_request(struct request *req, int uptodate, 232static void viodasd_end_request(struct request *req, int error,
233 int num_sectors) 233 int num_sectors)
234{ 234{
235 if (end_that_request_first(req, uptodate, num_sectors)) 235 __blk_end_request(req, error, num_sectors << 9);
236 return;
237 add_disk_randomness(req->rq_disk);
238 end_that_request_last(req, uptodate);
239} 236}
240 237
241/* 238/*
@@ -374,12 +371,12 @@ static void do_viodasd_request(struct request_queue *q)
374 blkdev_dequeue_request(req); 371 blkdev_dequeue_request(req);
375 /* check that request contains a valid command */ 372 /* check that request contains a valid command */
376 if (!blk_fs_request(req)) { 373 if (!blk_fs_request(req)) {
377 viodasd_end_request(req, 0, req->hard_nr_sectors); 374 viodasd_end_request(req, -EIO, req->hard_nr_sectors);
378 continue; 375 continue;
379 } 376 }
380 /* Try sending the request */ 377 /* Try sending the request */
381 if (send_request(req) != 0) 378 if (send_request(req) != 0)
382 viodasd_end_request(req, 0, req->hard_nr_sectors); 379 viodasd_end_request(req, -EIO, req->hard_nr_sectors);
383 } 380 }
384} 381}
385 382
@@ -591,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
591 num_req_outstanding--; 588 num_req_outstanding--;
592 spin_unlock_irqrestore(&viodasd_spinlock, irq_flags); 589 spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
593 590
594 error = event->xRc != HvLpEvent_Rc_Good; 591 error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO;
595 if (error) { 592 if (error) {
596 const struct vio_error_entry *err; 593 const struct vio_error_entry *err;
597 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); 594 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
@@ -601,7 +598,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
601 } 598 }
602 qlock = req->q->queue_lock; 599 qlock = req->q->queue_lock;
603 spin_lock_irqsave(qlock, irq_flags); 600 spin_lock_irqsave(qlock, irq_flags);
604 viodasd_end_request(req, !error, num_sect); 601 viodasd_end_request(req, error, num_sect);
605 spin_unlock_irqrestore(qlock, irq_flags); 602 spin_unlock_irqrestore(qlock, irq_flags);
606 603
607 /* Finally, try to get more requests off of this device's queue */ 604 /* Finally, try to get more requests off of this device's queue */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2bdebcb3ff16..8afce67c0aa5 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -452,7 +452,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
452 RING_IDX i, rp; 452 RING_IDX i, rp;
453 unsigned long flags; 453 unsigned long flags;
454 struct blkfront_info *info = (struct blkfront_info *)dev_id; 454 struct blkfront_info *info = (struct blkfront_info *)dev_id;
455 int uptodate; 455 int error;
456 456
457 spin_lock_irqsave(&blkif_io_lock, flags); 457 spin_lock_irqsave(&blkif_io_lock, flags);
458 458
@@ -477,13 +477,13 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
477 477
478 add_id_to_freelist(info, id); 478 add_id_to_freelist(info, id);
479 479
480 uptodate = (bret->status == BLKIF_RSP_OKAY); 480 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
481 switch (bret->operation) { 481 switch (bret->operation) {
482 case BLKIF_OP_WRITE_BARRIER: 482 case BLKIF_OP_WRITE_BARRIER:
483 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 483 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
484 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", 484 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
485 info->gd->disk_name); 485 info->gd->disk_name);
486 uptodate = -EOPNOTSUPP; 486 error = -EOPNOTSUPP;
487 info->feature_barrier = 0; 487 info->feature_barrier = 0;
488 xlvbd_barrier(info); 488 xlvbd_barrier(info);
489 } 489 }
@@ -494,10 +494,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
494 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 494 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
495 "request: %x\n", bret->status); 495 "request: %x\n", bret->status);
496 496
497 ret = end_that_request_first(req, uptodate, 497 ret = __blk_end_request(req, error, blk_rq_bytes(req));
498 req->hard_nr_sectors);
499 BUG_ON(ret); 498 BUG_ON(ret);
500 end_that_request_last(req, uptodate);
501 break; 499 break;
502 default: 500 default:
503 BUG(); 501 BUG();
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 82effce97c51..2c81465fd60c 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -703,7 +703,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
703 703
704 /* bio finished; is there another one? */ 704 /* bio finished; is there another one? */
705 i = ace->req->current_nr_sectors; 705 i = ace->req->current_nr_sectors;
706 if (end_that_request_first(ace->req, 1, i)) { 706 if (__blk_end_request(ace->req, 0, i)) {
707 /* dev_dbg(ace->dev, "next block; h=%li c=%i\n", 707 /* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
708 * ace->req->hard_nr_sectors, 708 * ace->req->hard_nr_sectors,
709 * ace->req->current_nr_sectors); 709 * ace->req->current_nr_sectors);
@@ -718,9 +718,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
718 break; 718 break;
719 719
720 case ACE_FSM_STATE_REQ_COMPLETE: 720 case ACE_FSM_STATE_REQ_COMPLETE:
721 /* Complete the block request */
722 blkdev_dequeue_request(ace->req);
723 end_that_request_last(ace->req, 1);
724 ace->req = NULL; 721 ace->req = NULL;
725 722
726 /* Finished request; go to idle state */ 723 /* Finished request; go to idle state */
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index d8bb44b98a6a..8473b9f1da96 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -289,7 +289,7 @@ static int send_request(struct request *req)
289 return 0; 289 return 0;
290} 290}
291 291
292static void viocd_end_request(struct request *req, int uptodate) 292static void viocd_end_request(struct request *req, int error)
293{ 293{
294 int nsectors = req->hard_nr_sectors; 294 int nsectors = req->hard_nr_sectors;
295 295
@@ -302,11 +302,8 @@ static void viocd_end_request(struct request *req, int uptodate)
302 if (!nsectors) 302 if (!nsectors)
303 nsectors = 1; 303 nsectors = 1;
304 304
305 if (end_that_request_first(req, uptodate, nsectors)) 305 if (__blk_end_request(req, error, nsectors << 9))
306 BUG(); 306 BUG();
307 add_disk_randomness(req->rq_disk);
308 blkdev_dequeue_request(req);
309 end_that_request_last(req, uptodate);
310} 307}
311 308
312static int rwreq; 309static int rwreq;
@@ -317,11 +314,11 @@ static void do_viocd_request(struct request_queue *q)
317 314
318 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { 315 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
319 if (!blk_fs_request(req)) 316 if (!blk_fs_request(req))
320 viocd_end_request(req, 0); 317 viocd_end_request(req, -EIO);
321 else if (send_request(req) < 0) { 318 else if (send_request(req) < 0) {
322 printk(VIOCD_KERN_WARNING 319 printk(VIOCD_KERN_WARNING
323 "unable to send message to OS/400!"); 320 "unable to send message to OS/400!");
324 viocd_end_request(req, 0); 321 viocd_end_request(req, -EIO);
325 } else 322 } else
326 rwreq++; 323 rwreq++;
327 } 324 }
@@ -532,9 +529,9 @@ return_complete:
532 "with rc %d:0x%04X: %s\n", 529 "with rc %d:0x%04X: %s\n",
533 req, event->xRc, 530 req, event->xRc,
534 bevent->sub_result, err->msg); 531 bevent->sub_result, err->msg);
535 viocd_end_request(req, 0); 532 viocd_end_request(req, -EIO);
536 } else 533 } else
537 viocd_end_request(req, 1); 534 viocd_end_request(req, 0);
538 535
539 /* restart handling of incoming requests */ 536 /* restart handling of incoming requests */
540 spin_unlock_irqrestore(&viocd_reqlock, flags); 537 spin_unlock_irqrestore(&viocd_reqlock, flags);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 44b033ec0ab0..74c6087ada38 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -655,9 +655,9 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
655 BUG(); 655 BUG();
656 } else { 656 } else {
657 spin_lock_irqsave(&ide_lock, flags); 657 spin_lock_irqsave(&ide_lock, flags);
658 end_that_request_chunk(failed, 0, 658 if (__blk_end_request(failed, -EIO,
659 failed->data_len); 659 failed->data_len))
660 end_that_request_last(failed, 0); 660 BUG();
661 spin_unlock_irqrestore(&ide_lock, flags); 661 spin_unlock_irqrestore(&ide_lock, flags);
662 } 662 }
663 } else 663 } else
@@ -1647,6 +1647,17 @@ static int cdrom_write_check_ireason(ide_drive_t *drive, int len, int ireason)
1647 return 1; 1647 return 1;
1648} 1648}
1649 1649
1650/*
1651 * Called from blk_end_request_callback() after the data of the request
1652 * is completed and before the request is completed.
1653 * By returning value '1', blk_end_request_callback() returns immediately
1654 * without completing the request.
1655 */
1656static int cdrom_newpc_intr_dummy_cb(struct request *rq)
1657{
1658 return 1;
1659}
1660
1650typedef void (xfer_func_t)(ide_drive_t *, void *, u32); 1661typedef void (xfer_func_t)(ide_drive_t *, void *, u32);
1651 1662
1652/* 1663/*
@@ -1685,9 +1696,13 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1685 return ide_error(drive, "dma error", stat); 1696 return ide_error(drive, "dma error", stat);
1686 } 1697 }
1687 1698
1688 end_that_request_chunk(rq, 1, rq->data_len); 1699 spin_lock_irqsave(&ide_lock, flags);
1689 rq->data_len = 0; 1700 if (__blk_end_request(rq, 0, rq->data_len))
1690 goto end_request; 1701 BUG();
1702 HWGROUP(drive)->rq = NULL;
1703 spin_unlock_irqrestore(&ide_lock, flags);
1704
1705 return ide_stopped;
1691 } 1706 }
1692 1707
1693 /* 1708 /*
@@ -1705,8 +1720,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1705 /* 1720 /*
1706 * If DRQ is clear, the command has completed. 1721 * If DRQ is clear, the command has completed.
1707 */ 1722 */
1708 if ((stat & DRQ_STAT) == 0) 1723 if ((stat & DRQ_STAT) == 0) {
1709 goto end_request; 1724 spin_lock_irqsave(&ide_lock, flags);
1725 if (__blk_end_request(rq, 0, 0))
1726 BUG();
1727 HWGROUP(drive)->rq = NULL;
1728 spin_unlock_irqrestore(&ide_lock, flags);
1729
1730 return ide_stopped;
1731 }
1710 1732
1711 /* 1733 /*
1712 * check which way to transfer data 1734 * check which way to transfer data
@@ -1759,7 +1781,14 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1759 rq->data_len -= blen; 1781 rq->data_len -= blen;
1760 1782
1761 if (rq->bio) 1783 if (rq->bio)
1762 end_that_request_chunk(rq, 1, blen); 1784 /*
1785 * The request can't be completed until DRQ is cleared.
1786 * So complete the data, but don't complete the request
1787 * using the dummy function for the callback feature
1788 * of blk_end_request_callback().
1789 */
1790 blk_end_request_callback(rq, 0, blen,
1791 cdrom_newpc_intr_dummy_cb);
1763 else 1792 else
1764 rq->data += blen; 1793 rq->data += blen;
1765 } 1794 }
@@ -1780,14 +1809,6 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1780 1809
1781 ide_set_handler(drive, cdrom_newpc_intr, rq->timeout, NULL); 1810 ide_set_handler(drive, cdrom_newpc_intr, rq->timeout, NULL);
1782 return ide_started; 1811 return ide_started;
1783
1784end_request:
1785 spin_lock_irqsave(&ide_lock, flags);
1786 blkdev_dequeue_request(rq);
1787 end_that_request_last(rq, 1);
1788 HWGROUP(drive)->rq = NULL;
1789 spin_unlock_irqrestore(&ide_lock, flags);
1790 return ide_stopped;
1791} 1812}
1792 1813
1793static ide_startstop_t cdrom_write_intr(ide_drive_t *drive) 1814static ide_startstop_t cdrom_write_intr(ide_drive_t *drive)
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 6f8f544392a8..e6bb9cf24e3d 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -58,15 +58,19 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
58 int uptodate, unsigned int nr_bytes, int dequeue) 58 int uptodate, unsigned int nr_bytes, int dequeue)
59{ 59{
60 int ret = 1; 60 int ret = 1;
61 int error = 0;
62
63 if (uptodate <= 0)
64 error = uptodate ? uptodate : -EIO;
61 65
62 /* 66 /*
63 * if failfast is set on a request, override number of sectors and 67 * if failfast is set on a request, override number of sectors and
64 * complete the whole request right now 68 * complete the whole request right now
65 */ 69 */
66 if (blk_noretry_request(rq) && end_io_error(uptodate)) 70 if (blk_noretry_request(rq) && error)
67 nr_bytes = rq->hard_nr_sectors << 9; 71 nr_bytes = rq->hard_nr_sectors << 9;
68 72
69 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) 73 if (!blk_fs_request(rq) && error && !rq->errors)
70 rq->errors = -EIO; 74 rq->errors = -EIO;
71 75
72 /* 76 /*
@@ -78,14 +82,9 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
78 ide_dma_on(drive); 82 ide_dma_on(drive);
79 } 83 }
80 84
81 if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { 85 if (!__blk_end_request(rq, error, nr_bytes)) {
82 add_disk_randomness(rq->rq_disk); 86 if (dequeue)
83 if (dequeue) {
84 if (!list_empty(&rq->queuelist))
85 blkdev_dequeue_request(rq);
86 HWGROUP(drive)->rq = NULL; 87 HWGROUP(drive)->rq = NULL;
87 }
88 end_that_request_last(rq, uptodate);
89 ret = 0; 88 ret = 0;
90 } 89 }
91 90
@@ -290,9 +289,9 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
290 drive->blocked = 0; 289 drive->blocked = 0;
291 blk_start_queue(drive->queue); 290 blk_start_queue(drive->queue);
292 } 291 }
293 blkdev_dequeue_request(rq);
294 HWGROUP(drive)->rq = NULL; 292 HWGROUP(drive)->rq = NULL;
295 end_that_request_last(rq, 1); 293 if (__blk_end_request(rq, 0, 0))
294 BUG();
296 spin_unlock_irqrestore(&ide_lock, flags); 295 spin_unlock_irqrestore(&ide_lock, flags);
297} 296}
298 297
@@ -387,10 +386,10 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
387 } 386 }
388 387
389 spin_lock_irqsave(&ide_lock, flags); 388 spin_lock_irqsave(&ide_lock, flags);
390 blkdev_dequeue_request(rq);
391 HWGROUP(drive)->rq = NULL; 389 HWGROUP(drive)->rq = NULL;
392 rq->errors = err; 390 rq->errors = err;
393 end_that_request_last(rq, !rq->errors); 391 if (__blk_end_request(rq, (rq->errors ? -EIO : 0), 0))
392 BUG();
394 spin_unlock_irqrestore(&ide_lock, flags); 393 spin_unlock_irqrestore(&ide_lock, flags);
395} 394}
396 395
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index e4ad7a1c4fbd..a95314897402 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -412,13 +412,13 @@ static void i2o_block_delayed_request_fn(struct work_struct *work)
412/** 412/**
413 * i2o_block_end_request - Post-processing of completed commands 413 * i2o_block_end_request - Post-processing of completed commands
414 * @req: request which should be completed 414 * @req: request which should be completed
415 * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error 415 * @error: 0 for success, < 0 for error
416 * @nr_bytes: number of bytes to complete 416 * @nr_bytes: number of bytes to complete
417 * 417 *
418 * Mark the request as complete. The lock must not be held when entering. 418 * Mark the request as complete. The lock must not be held when entering.
419 * 419 *
420 */ 420 */
421static void i2o_block_end_request(struct request *req, int uptodate, 421static void i2o_block_end_request(struct request *req, int error,
422 int nr_bytes) 422 int nr_bytes)
423{ 423{
424 struct i2o_block_request *ireq = req->special; 424 struct i2o_block_request *ireq = req->special;
@@ -426,22 +426,18 @@ static void i2o_block_end_request(struct request *req, int uptodate,
426 struct request_queue *q = req->q; 426 struct request_queue *q = req->q;
427 unsigned long flags; 427 unsigned long flags;
428 428
429 if (end_that_request_chunk(req, uptodate, nr_bytes)) { 429 if (blk_end_request(req, error, nr_bytes)) {
430 int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT); 430 int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
431 431
432 if (blk_pc_request(req)) 432 if (blk_pc_request(req))
433 leftover = req->data_len; 433 leftover = req->data_len;
434 434
435 if (end_io_error(uptodate)) 435 if (error)
436 end_that_request_chunk(req, 0, leftover); 436 blk_end_request(req, -EIO, leftover);
437 } 437 }
438 438
439 add_disk_randomness(req->rq_disk);
440
441 spin_lock_irqsave(q->queue_lock, flags); 439 spin_lock_irqsave(q->queue_lock, flags);
442 440
443 end_that_request_last(req, uptodate);
444
445 if (likely(dev)) { 441 if (likely(dev)) {
446 dev->open_queue_depth--; 442 dev->open_queue_depth--;
447 list_del(&ireq->queue); 443 list_del(&ireq->queue);
@@ -468,7 +464,7 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
468 struct i2o_message *msg) 464 struct i2o_message *msg)
469{ 465{
470 struct request *req; 466 struct request *req;
471 int uptodate = 1; 467 int error = 0;
472 468
473 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); 469 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
474 if (unlikely(!req)) { 470 if (unlikely(!req)) {
@@ -501,10 +497,10 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
501 497
502 req->errors++; 498 req->errors++;
503 499
504 uptodate = 0; 500 error = -EIO;
505 } 501 }
506 502
507 i2o_block_end_request(req, uptodate, le32_to_cpu(msg->body[1])); 503 i2o_block_end_request(req, error, le32_to_cpu(msg->body[1]));
508 504
509 return 1; 505 return 1;
510}; 506};
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index aeb32a93f6a0..91ded3e82401 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -348,15 +348,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
348 * A block was successfully transferred. 348 * A block was successfully transferred.
349 */ 349 */
350 spin_lock_irq(&md->lock); 350 spin_lock_irq(&md->lock);
351 ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); 351 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
352 if (!ret) {
353 /*
354 * The whole request completed successfully.
355 */
356 add_disk_randomness(req->rq_disk);
357 blkdev_dequeue_request(req);
358 end_that_request_last(req, 1);
359 }
360 spin_unlock_irq(&md->lock); 352 spin_unlock_irq(&md->lock);
361 } while (ret); 353 } while (ret);
362 354
@@ -386,27 +378,21 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
386 else 378 else
387 bytes = blocks << 9; 379 bytes = blocks << 9;
388 spin_lock_irq(&md->lock); 380 spin_lock_irq(&md->lock);
389 ret = end_that_request_chunk(req, 1, bytes); 381 ret = __blk_end_request(req, 0, bytes);
390 spin_unlock_irq(&md->lock); 382 spin_unlock_irq(&md->lock);
391 } 383 }
392 } else if (rq_data_dir(req) != READ && 384 } else if (rq_data_dir(req) != READ &&
393 (card->host->caps & MMC_CAP_MULTIWRITE)) { 385 (card->host->caps & MMC_CAP_MULTIWRITE)) {
394 spin_lock_irq(&md->lock); 386 spin_lock_irq(&md->lock);
395 ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); 387 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
396 spin_unlock_irq(&md->lock); 388 spin_unlock_irq(&md->lock);
397 } 389 }
398 390
399 mmc_release_host(card->host); 391 mmc_release_host(card->host);
400 392
401 spin_lock_irq(&md->lock); 393 spin_lock_irq(&md->lock);
402 while (ret) { 394 while (ret)
403 ret = end_that_request_chunk(req, 0, 395 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
404 req->current_nr_sectors << 9);
405 }
406
407 add_disk_randomness(req->rq_disk);
408 blkdev_dequeue_request(req);
409 end_that_request_last(req, 0);
410 spin_unlock_irq(&md->lock); 396 spin_unlock_irq(&md->lock);
411 397
412 return 0; 398 return 0;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 30cd13b13ac3..7731ddefdc1b 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -94,8 +94,8 @@ static void mmc_request(struct request_queue *q)
94 printk(KERN_ERR "MMC: killing requests for dead queue\n"); 94 printk(KERN_ERR "MMC: killing requests for dead queue\n");
95 while ((req = elv_next_request(q)) != NULL) { 95 while ((req = elv_next_request(q)) != NULL) {
96 do { 96 do {
97 ret = end_that_request_chunk(req, 0, 97 ret = __blk_end_request(req, -EIO,
98 req->current_nr_sectors << 9); 98 blk_rq_cur_bytes(req));
99 } while (ret); 99 } while (ret);
100 } 100 }
101 return; 101 return;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1db15f3e5d20..d640427c74c8 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1595,12 +1595,10 @@ void dasd_block_clear_timer(struct dasd_block *block)
1595/* 1595/*
1596 * posts the buffer_cache about a finalized request 1596 * posts the buffer_cache about a finalized request
1597 */ 1597 */
1598static inline void dasd_end_request(struct request *req, int uptodate) 1598static inline void dasd_end_request(struct request *req, int error)
1599{ 1599{
1600 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1600 if (__blk_end_request(req, error, blk_rq_bytes(req)))
1601 BUG(); 1601 BUG();
1602 add_disk_randomness(req->rq_disk);
1603 end_that_request_last(req, uptodate);
1604} 1602}
1605 1603
1606/* 1604/*
@@ -1657,7 +1655,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1657 "Rejecting write request %p", 1655 "Rejecting write request %p",
1658 req); 1656 req);
1659 blkdev_dequeue_request(req); 1657 blkdev_dequeue_request(req);
1660 dasd_end_request(req, 0); 1658 dasd_end_request(req, -EIO);
1661 continue; 1659 continue;
1662 } 1660 }
1663 cqr = basedev->discipline->build_cp(basedev, block, req); 1661 cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -1686,7 +1684,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1686 "on request %p", 1684 "on request %p",
1687 PTR_ERR(cqr), req); 1685 PTR_ERR(cqr), req);
1688 blkdev_dequeue_request(req); 1686 blkdev_dequeue_request(req);
1689 dasd_end_request(req, 0); 1687 dasd_end_request(req, -EIO);
1690 continue; 1688 continue;
1691 } 1689 }
1692 /* 1690 /*
@@ -1705,11 +1703,14 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1705{ 1703{
1706 struct request *req; 1704 struct request *req;
1707 int status; 1705 int status;
1706 int error = 0;
1708 1707
1709 req = (struct request *) cqr->callback_data; 1708 req = (struct request *) cqr->callback_data;
1710 dasd_profile_end(cqr->block, cqr, req); 1709 dasd_profile_end(cqr->block, cqr, req);
1711 status = cqr->memdev->discipline->free_cp(cqr, req); 1710 status = cqr->memdev->discipline->free_cp(cqr, req);
1712 dasd_end_request(req, status); 1711 if (status <= 0)
1712 error = status ? status : -EIO;
1713 dasd_end_request(req, error);
1713} 1714}
1714 1715
1715/* 1716/*
@@ -2009,7 +2010,7 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2009 spin_lock_irq(&block->request_queue_lock); 2010 spin_lock_irq(&block->request_queue_lock);
2010 while ((req = elv_next_request(block->request_queue))) { 2011 while ((req = elv_next_request(block->request_queue))) {
2011 blkdev_dequeue_request(req); 2012 blkdev_dequeue_request(req);
2012 dasd_end_request(req, 0); 2013 dasd_end_request(req, -EIO);
2013 } 2014 }
2014 spin_unlock_irq(&block->request_queue_lock); 2015 spin_unlock_irq(&block->request_queue_lock);
2015} 2016}
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index eeb92e2ed0cc..ddc4a114e7f4 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -74,11 +74,10 @@ tapeblock_trigger_requeue(struct tape_device *device)
74 * Post finished request. 74 * Post finished request.
75 */ 75 */
76static void 76static void
77tapeblock_end_request(struct request *req, int uptodate) 77tapeblock_end_request(struct request *req, int error)
78{ 78{
79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 79 if (__blk_end_request(req, error, blk_rq_bytes(req)))
80 BUG(); 80 BUG();
81 end_that_request_last(req, uptodate);
82} 81}
83 82
84static void 83static void
@@ -91,7 +90,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
91 90
92 device = ccw_req->device; 91 device = ccw_req->device;
93 req = (struct request *) data; 92 req = (struct request *) data;
94 tapeblock_end_request(req, ccw_req->rc == 0); 93 tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO);
95 if (ccw_req->rc == 0) 94 if (ccw_req->rc == 0)
96 /* Update position. */ 95 /* Update position. */
97 device->blk_data.block_position = 96 device->blk_data.block_position =
@@ -119,7 +118,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
119 ccw_req = device->discipline->bread(device, req); 118 ccw_req = device->discipline->bread(device, req);
120 if (IS_ERR(ccw_req)) { 119 if (IS_ERR(ccw_req)) {
121 DBF_EVENT(1, "TBLOCK: bread failed\n"); 120 DBF_EVENT(1, "TBLOCK: bread failed\n");
122 tapeblock_end_request(req, 0); 121 tapeblock_end_request(req, -EIO);
123 return PTR_ERR(ccw_req); 122 return PTR_ERR(ccw_req);
124 } 123 }
125 ccw_req->callback = __tapeblock_end_request; 124 ccw_req->callback = __tapeblock_end_request;
@@ -132,7 +131,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
132 * Start/enqueueing failed. No retries in 131 * Start/enqueueing failed. No retries in
133 * this case. 132 * this case.
134 */ 133 */
135 tapeblock_end_request(req, 0); 134 tapeblock_end_request(req, -EIO);
136 device->discipline->free_bread(ccw_req); 135 device->discipline->free_bread(ccw_req);
137 } 136 }
138 137
@@ -177,7 +176,7 @@ tapeblock_requeue(struct work_struct *work) {
177 if (rq_data_dir(req) == WRITE) { 176 if (rq_data_dir(req) == WRITE) {
178 DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); 177 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
179 blkdev_dequeue_request(req); 178 blkdev_dequeue_request(req);
180 tapeblock_end_request(req, 0); 179 tapeblock_end_request(req, -EIO);
181 continue; 180 continue;
182 } 181 }
183 spin_unlock_irq(&device->blk_data.request_queue_lock); 182 spin_unlock_irq(&device->blk_data.request_queue_lock);
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 02e91893064d..db8bc20539e1 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -919,8 +919,8 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
919 } 919 }
920 920
921 /* kill current request */ 921 /* kill current request */
922 blkdev_dequeue_request(req); 922 if (__blk_end_request(req, -EIO, 0))
923 end_that_request_last(req, 0); 923 BUG();
924 if (blk_sense_request(req)) 924 if (blk_sense_request(req))
925 kfree(scsi->pc->buffer); 925 kfree(scsi->pc->buffer);
926 kfree(scsi->pc); 926 kfree(scsi->pc);
@@ -929,8 +929,8 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
929 929
930 /* now nuke the drive queue */ 930 /* now nuke the drive queue */
931 while ((req = elv_next_request(drive->queue))) { 931 while ((req = elv_next_request(drive->queue))) {
932 blkdev_dequeue_request(req); 932 if (__blk_end_request(req, -EIO, 0))
933 end_that_request_last(req, 0); 933 BUG();
934 } 934 }
935 935
936 HWGROUP(drive)->rq = NULL; 936 HWGROUP(drive)->rq = NULL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index eb4911a61641..7c4c889c5221 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -634,7 +634,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
634 * of upper level post-processing and scsi_io_completion). 634 * of upper level post-processing and scsi_io_completion).
635 * 635 *
636 * Arguments: cmd - command that is complete. 636 * Arguments: cmd - command that is complete.
637 * uptodate - 1 if I/O indicates success, <= 0 for I/O error. 637 * error - 0 if I/O indicates success, < 0 for I/O error.
638 * bytes - number of bytes of completed I/O 638 * bytes - number of bytes of completed I/O
639 * requeue - indicates whether we should requeue leftovers. 639 * requeue - indicates whether we should requeue leftovers.
640 * 640 *
@@ -649,26 +649,25 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
649 * at some point during this call. 649 * at some point during this call.
650 * Notes: If cmd was requeued, upon return it will be a stale pointer. 650 * Notes: If cmd was requeued, upon return it will be a stale pointer.
651 */ 651 */
652static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 652static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
653 int bytes, int requeue) 653 int bytes, int requeue)
654{ 654{
655 struct request_queue *q = cmd->device->request_queue; 655 struct request_queue *q = cmd->device->request_queue;
656 struct request *req = cmd->request; 656 struct request *req = cmd->request;
657 unsigned long flags;
658 657
659 /* 658 /*
660 * If there are blocks left over at the end, set up the command 659 * If there are blocks left over at the end, set up the command
661 * to queue the remainder of them. 660 * to queue the remainder of them.
662 */ 661 */
663 if (end_that_request_chunk(req, uptodate, bytes)) { 662 if (blk_end_request(req, error, bytes)) {
664 int leftover = (req->hard_nr_sectors << 9); 663 int leftover = (req->hard_nr_sectors << 9);
665 664
666 if (blk_pc_request(req)) 665 if (blk_pc_request(req))
667 leftover = req->data_len; 666 leftover = req->data_len;
668 667
669 /* kill remainder if no retrys */ 668 /* kill remainder if no retrys */
670 if (!uptodate && blk_noretry_request(req)) 669 if (error && blk_noretry_request(req))
671 end_that_request_chunk(req, 0, leftover); 670 blk_end_request(req, error, leftover);
672 else { 671 else {
673 if (requeue) { 672 if (requeue) {
674 /* 673 /*
@@ -683,14 +682,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
683 } 682 }
684 } 683 }
685 684
686 add_disk_randomness(req->rq_disk);
687
688 spin_lock_irqsave(q->queue_lock, flags);
689 if (blk_rq_tagged(req))
690 blk_queue_end_tag(q, req);
691 end_that_request_last(req, uptodate);
692 spin_unlock_irqrestore(q->queue_lock, flags);
693
694 /* 685 /*
695 * This will goose the queue request function at the end, so we don't 686 * This will goose the queue request function at the end, so we don't
696 * need to worry about launching another command. 687 * need to worry about launching another command.
@@ -892,7 +883,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
892 * are leftovers and there is some kind of error 883 * are leftovers and there is some kind of error
893 * (result != 0), retry the rest. 884 * (result != 0), retry the rest.
894 */ 885 */
895 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) 886 if (scsi_end_request(cmd, 0, good_bytes, result == 0) == NULL)
896 return; 887 return;
897 888
898 /* good_bytes = 0, or (inclusive) there were leftovers and 889 /* good_bytes = 0, or (inclusive) there were leftovers and
@@ -906,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
906 * and quietly refuse further access. 897 * and quietly refuse further access.
907 */ 898 */
908 cmd->device->changed = 1; 899 cmd->device->changed = 1;
909 scsi_end_request(cmd, 0, this_count, 1); 900 scsi_end_request(cmd, -EIO, this_count, 1);
910 return; 901 return;
911 } else { 902 } else {
912 /* Must have been a power glitch, or a 903 /* Must have been a power glitch, or a
@@ -938,7 +929,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
938 scsi_requeue_command(q, cmd); 929 scsi_requeue_command(q, cmd);
939 return; 930 return;
940 } else { 931 } else {
941 scsi_end_request(cmd, 0, this_count, 1); 932 scsi_end_request(cmd, -EIO, this_count, 1);
942 return; 933 return;
943 } 934 }
944 break; 935 break;
@@ -966,7 +957,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
966 "Device not ready", 957 "Device not ready",
967 &sshdr); 958 &sshdr);
968 959
969 scsi_end_request(cmd, 0, this_count, 1); 960 scsi_end_request(cmd, -EIO, this_count, 1);
970 return; 961 return;
971 case VOLUME_OVERFLOW: 962 case VOLUME_OVERFLOW:
972 if (!(req->cmd_flags & REQ_QUIET)) { 963 if (!(req->cmd_flags & REQ_QUIET)) {
@@ -976,7 +967,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
976 scsi_print_sense("", cmd); 967 scsi_print_sense("", cmd);
977 } 968 }
978 /* See SSC3rXX or current. */ 969 /* See SSC3rXX or current. */
979 scsi_end_request(cmd, 0, this_count, 1); 970 scsi_end_request(cmd, -EIO, this_count, 1);
980 return; 971 return;
981 default: 972 default:
982 break; 973 break;
@@ -997,7 +988,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
997 scsi_print_sense("", cmd); 988 scsi_print_sense("", cmd);
998 } 989 }
999 } 990 }
1000 scsi_end_request(cmd, 0, this_count, !result); 991 scsi_end_request(cmd, -EIO, this_count, !result);
1001} 992}
1002 993
1003/* 994/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2483a05231c7..b71c3900810d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -464,6 +464,8 @@ enum {
464#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 464#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
465#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 465#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
466#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) 466#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
467/* rq->queuelist of dequeued request must be list_empty() */
468#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
467 469
468#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 470#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
469 471
@@ -643,29 +645,32 @@ static inline void blk_run_address_space(struct address_space *mapping)
643} 645}
644 646
645/* 647/*
646 * end_request() and friends. Must be called with the request queue spinlock 648 * blk_end_request() and friends.
647 * acquired. All functions called within end_request() _must_be_ atomic. 649 * __blk_end_request() and end_request() must be called with
650 * the request queue spinlock acquired.
648 * 651 *
649 * Several drivers define their own end_request and call 652 * Several drivers define their own end_request and call
650 * end_that_request_first() and end_that_request_last() 653 * blk_end_request() for parts of the original function.
651 * for parts of the original function. This prevents 654 * This prevents code duplication in drivers.
652 * code duplication in drivers.
653 */ 655 */
654extern int end_that_request_first(struct request *, int, int); 656extern int blk_end_request(struct request *rq, int error, int nr_bytes);
655extern int end_that_request_chunk(struct request *, int, int); 657extern int __blk_end_request(struct request *rq, int error, int nr_bytes);
656extern void end_that_request_last(struct request *, int); 658extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
659 int bidi_bytes);
657extern void end_request(struct request *, int); 660extern void end_request(struct request *, int);
658extern void end_queued_request(struct request *, int); 661extern void end_queued_request(struct request *, int);
659extern void end_dequeued_request(struct request *, int); 662extern void end_dequeued_request(struct request *, int);
663extern int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
664 int (drv_callback)(struct request *));
660extern void blk_complete_request(struct request *); 665extern void blk_complete_request(struct request *);
661 666
662/* 667/*
663 * end_that_request_first/chunk() takes an uptodate argument. we account 668 * blk_end_request() takes bytes instead of sectors as a complete size.
664 * any value <= as an io error. 0 means -EIO for compatability reasons, 669 * blk_rq_bytes() returns bytes left to complete in the entire request.
665 * any other < 0 value is the direct error type. An uptodate value of 670 * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
666 * 1 indicates successful io completion
667 */ 671 */
668#define end_io_error(uptodate) (unlikely((uptodate) <= 0)) 672extern unsigned int blk_rq_bytes(struct request *rq);
673extern unsigned int blk_rq_cur_bytes(struct request *rq);
669 674
670static inline void blkdev_dequeue_request(struct request *req) 675static inline void blkdev_dequeue_request(struct request *req)
671{ 676{