diff options
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 298 |
1 files changed, 213 insertions, 85 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index b901db63f6ae..c16fdfed8c62 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -347,7 +347,6 @@ unsigned blk_ordered_req_seq(struct request *rq) | |||
347 | void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) | 347 | void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) |
348 | { | 348 | { |
349 | struct request *rq; | 349 | struct request *rq; |
350 | int uptodate; | ||
351 | 350 | ||
352 | if (error && !q->orderr) | 351 | if (error && !q->orderr) |
353 | q->orderr = error; | 352 | q->orderr = error; |
@@ -361,15 +360,11 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) | |||
361 | /* | 360 | /* |
362 | * Okay, sequence complete. | 361 | * Okay, sequence complete. |
363 | */ | 362 | */ |
364 | uptodate = 1; | ||
365 | if (q->orderr) | ||
366 | uptodate = q->orderr; | ||
367 | |||
368 | q->ordseq = 0; | 363 | q->ordseq = 0; |
369 | rq = q->orig_bar_rq; | 364 | rq = q->orig_bar_rq; |
370 | 365 | ||
371 | end_that_request_first(rq, uptodate, rq->hard_nr_sectors); | 366 | if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) |
372 | end_that_request_last(rq, uptodate); | 367 | BUG(); |
373 | } | 368 | } |
374 | 369 | ||
375 | static void pre_flush_end_io(struct request *rq, int error) | 370 | static void pre_flush_end_io(struct request *rq, int error) |
@@ -486,9 +481,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) | |||
486 | * ORDERED_NONE while this request is on it. | 481 | * ORDERED_NONE while this request is on it. |
487 | */ | 482 | */ |
488 | blkdev_dequeue_request(rq); | 483 | blkdev_dequeue_request(rq); |
489 | end_that_request_first(rq, -EOPNOTSUPP, | 484 | if (__blk_end_request(rq, -EOPNOTSUPP, |
490 | rq->hard_nr_sectors); | 485 | blk_rq_bytes(rq))) |
491 | end_that_request_last(rq, -EOPNOTSUPP); | 486 | BUG(); |
492 | *rqp = NULL; | 487 | *rqp = NULL; |
493 | return 0; | 488 | return 0; |
494 | } | 489 | } |
@@ -3437,29 +3432,36 @@ static void blk_recalc_rq_sectors(struct request *rq, int nsect) | |||
3437 | } | 3432 | } |
3438 | } | 3433 | } |
3439 | 3434 | ||
3440 | static int __end_that_request_first(struct request *req, int uptodate, | 3435 | /** |
3436 | * __end_that_request_first - end I/O on a request | ||
3437 | * @req: the request being processed | ||
3438 | * @error: 0 for success, < 0 for error | ||
3439 | * @nr_bytes: number of bytes to complete | ||
3440 | * | ||
3441 | * Description: | ||
3442 | * Ends I/O on a number of bytes attached to @req, and sets it up | ||
3443 | * for the next range of segments (if any) in the cluster. | ||
3444 | * | ||
3445 | * Return: | ||
3446 | * 0 - we are done with this request, call end_that_request_last() | ||
3447 | * 1 - still buffers pending for this request | ||
3448 | **/ | ||
3449 | static int __end_that_request_first(struct request *req, int error, | ||
3441 | int nr_bytes) | 3450 | int nr_bytes) |
3442 | { | 3451 | { |
3443 | int total_bytes, bio_nbytes, error, next_idx = 0; | 3452 | int total_bytes, bio_nbytes, next_idx = 0; |
3444 | struct bio *bio; | 3453 | struct bio *bio; |
3445 | 3454 | ||
3446 | blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); | 3455 | blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); |
3447 | 3456 | ||
3448 | /* | 3457 | /* |
3449 | * extend uptodate bool to allow < 0 value to be direct io error | ||
3450 | */ | ||
3451 | error = 0; | ||
3452 | if (end_io_error(uptodate)) | ||
3453 | error = !uptodate ? -EIO : uptodate; | ||
3454 | |||
3455 | /* | ||
3456 | * for a REQ_BLOCK_PC request, we want to carry any eventual | 3458 | * for a REQ_BLOCK_PC request, we want to carry any eventual |
3457 | * sense key with us all the way through | 3459 | * sense key with us all the way through |
3458 | */ | 3460 | */ |
3459 | if (!blk_pc_request(req)) | 3461 | if (!blk_pc_request(req)) |
3460 | req->errors = 0; | 3462 | req->errors = 0; |
3461 | 3463 | ||
3462 | if (!uptodate) { | 3464 | if (error) { |
3463 | if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) | 3465 | if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) |
3464 | printk("end_request: I/O error, dev %s, sector %llu\n", | 3466 | printk("end_request: I/O error, dev %s, sector %llu\n", |
3465 | req->rq_disk ? req->rq_disk->disk_name : "?", | 3467 | req->rq_disk ? req->rq_disk->disk_name : "?", |
@@ -3553,49 +3555,6 @@ static int __end_that_request_first(struct request *req, int uptodate, | |||
3553 | return 1; | 3555 | return 1; |
3554 | } | 3556 | } |
3555 | 3557 | ||
3556 | /** | ||
3557 | * end_that_request_first - end I/O on a request | ||
3558 | * @req: the request being processed | ||
3559 | * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error | ||
3560 | * @nr_sectors: number of sectors to end I/O on | ||
3561 | * | ||
3562 | * Description: | ||
3563 | * Ends I/O on a number of sectors attached to @req, and sets it up | ||
3564 | * for the next range of segments (if any) in the cluster. | ||
3565 | * | ||
3566 | * Return: | ||
3567 | * 0 - we are done with this request, call end_that_request_last() | ||
3568 | * 1 - still buffers pending for this request | ||
3569 | **/ | ||
3570 | int end_that_request_first(struct request *req, int uptodate, int nr_sectors) | ||
3571 | { | ||
3572 | return __end_that_request_first(req, uptodate, nr_sectors << 9); | ||
3573 | } | ||
3574 | |||
3575 | EXPORT_SYMBOL(end_that_request_first); | ||
3576 | |||
3577 | /** | ||
3578 | * end_that_request_chunk - end I/O on a request | ||
3579 | * @req: the request being processed | ||
3580 | * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error | ||
3581 | * @nr_bytes: number of bytes to complete | ||
3582 | * | ||
3583 | * Description: | ||
3584 | * Ends I/O on a number of bytes attached to @req, and sets it up | ||
3585 | * for the next range of segments (if any). Like end_that_request_first(), | ||
3586 | * but deals with bytes instead of sectors. | ||
3587 | * | ||
3588 | * Return: | ||
3589 | * 0 - we are done with this request, call end_that_request_last() | ||
3590 | * 1 - still buffers pending for this request | ||
3591 | **/ | ||
3592 | int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes) | ||
3593 | { | ||
3594 | return __end_that_request_first(req, uptodate, nr_bytes); | ||
3595 | } | ||
3596 | |||
3597 | EXPORT_SYMBOL(end_that_request_chunk); | ||
3598 | |||
3599 | /* | 3558 | /* |
3600 | * splice the completion data to a local structure and hand off to | 3559 | * splice the completion data to a local structure and hand off to |
3601 | * process_completion_queue() to complete the requests | 3560 | * process_completion_queue() to complete the requests |
@@ -3675,17 +3634,15 @@ EXPORT_SYMBOL(blk_complete_request); | |||
3675 | /* | 3634 | /* |
3676 | * queue lock must be held | 3635 | * queue lock must be held |
3677 | */ | 3636 | */ |
3678 | void end_that_request_last(struct request *req, int uptodate) | 3637 | static void end_that_request_last(struct request *req, int error) |
3679 | { | 3638 | { |
3680 | struct gendisk *disk = req->rq_disk; | 3639 | struct gendisk *disk = req->rq_disk; |
3681 | int error; | ||
3682 | 3640 | ||
3683 | /* | 3641 | if (blk_rq_tagged(req)) |
3684 | * extend uptodate bool to allow < 0 value to be direct io error | 3642 | blk_queue_end_tag(req->q, req); |
3685 | */ | 3643 | |
3686 | error = 0; | 3644 | if (blk_queued_rq(req)) |
3687 | if (end_io_error(uptodate)) | 3645 | blkdev_dequeue_request(req); |
3688 | error = !uptodate ? -EIO : uptodate; | ||
3689 | 3646 | ||
3690 | if (unlikely(laptop_mode) && blk_fs_request(req)) | 3647 | if (unlikely(laptop_mode) && blk_fs_request(req)) |
3691 | laptop_io_completion(); | 3648 | laptop_io_completion(); |
@@ -3704,32 +3661,54 @@ void end_that_request_last(struct request *req, int uptodate) | |||
3704 | disk_round_stats(disk); | 3661 | disk_round_stats(disk); |
3705 | disk->in_flight--; | 3662 | disk->in_flight--; |
3706 | } | 3663 | } |
3664 | |||
3707 | if (req->end_io) | 3665 | if (req->end_io) |
3708 | req->end_io(req, error); | 3666 | req->end_io(req, error); |
3709 | else | 3667 | else { |
3668 | if (blk_bidi_rq(req)) | ||
3669 | __blk_put_request(req->next_rq->q, req->next_rq); | ||
3670 | |||
3710 | __blk_put_request(req->q, req); | 3671 | __blk_put_request(req->q, req); |
3672 | } | ||
3711 | } | 3673 | } |
3712 | 3674 | ||
3713 | EXPORT_SYMBOL(end_that_request_last); | ||
3714 | |||
3715 | static inline void __end_request(struct request *rq, int uptodate, | 3675 | static inline void __end_request(struct request *rq, int uptodate, |
3716 | unsigned int nr_bytes, int dequeue) | 3676 | unsigned int nr_bytes) |
3717 | { | 3677 | { |
3718 | if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { | 3678 | int error = 0; |
3719 | if (dequeue) | 3679 | |
3720 | blkdev_dequeue_request(rq); | 3680 | if (uptodate <= 0) |
3721 | add_disk_randomness(rq->rq_disk); | 3681 | error = uptodate ? uptodate : -EIO; |
3722 | end_that_request_last(rq, uptodate); | 3682 | |
3723 | } | 3683 | __blk_end_request(rq, error, nr_bytes); |
3724 | } | 3684 | } |
3725 | 3685 | ||
3726 | static unsigned int rq_byte_size(struct request *rq) | 3686 | /** |
3687 | * blk_rq_bytes - Returns bytes left to complete in the entire request | ||
3688 | **/ | ||
3689 | unsigned int blk_rq_bytes(struct request *rq) | ||
3727 | { | 3690 | { |
3728 | if (blk_fs_request(rq)) | 3691 | if (blk_fs_request(rq)) |
3729 | return rq->hard_nr_sectors << 9; | 3692 | return rq->hard_nr_sectors << 9; |
3730 | 3693 | ||
3731 | return rq->data_len; | 3694 | return rq->data_len; |
3732 | } | 3695 | } |
3696 | EXPORT_SYMBOL_GPL(blk_rq_bytes); | ||
3697 | |||
3698 | /** | ||
3699 | * blk_rq_cur_bytes - Returns bytes left to complete in the current segment | ||
3700 | **/ | ||
3701 | unsigned int blk_rq_cur_bytes(struct request *rq) | ||
3702 | { | ||
3703 | if (blk_fs_request(rq)) | ||
3704 | return rq->current_nr_sectors << 9; | ||
3705 | |||
3706 | if (rq->bio) | ||
3707 | return rq->bio->bi_size; | ||
3708 | |||
3709 | return rq->data_len; | ||
3710 | } | ||
3711 | EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); | ||
3733 | 3712 | ||
3734 | /** | 3713 | /** |
3735 | * end_queued_request - end all I/O on a queued request | 3714 | * end_queued_request - end all I/O on a queued request |
@@ -3744,7 +3723,7 @@ static unsigned int rq_byte_size(struct request *rq) | |||
3744 | **/ | 3723 | **/ |
3745 | void end_queued_request(struct request *rq, int uptodate) | 3724 | void end_queued_request(struct request *rq, int uptodate) |
3746 | { | 3725 | { |
3747 | __end_request(rq, uptodate, rq_byte_size(rq), 1); | 3726 | __end_request(rq, uptodate, blk_rq_bytes(rq)); |
3748 | } | 3727 | } |
3749 | EXPORT_SYMBOL(end_queued_request); | 3728 | EXPORT_SYMBOL(end_queued_request); |
3750 | 3729 | ||
@@ -3761,7 +3740,7 @@ EXPORT_SYMBOL(end_queued_request); | |||
3761 | **/ | 3740 | **/ |
3762 | void end_dequeued_request(struct request *rq, int uptodate) | 3741 | void end_dequeued_request(struct request *rq, int uptodate) |
3763 | { | 3742 | { |
3764 | __end_request(rq, uptodate, rq_byte_size(rq), 0); | 3743 | __end_request(rq, uptodate, blk_rq_bytes(rq)); |
3765 | } | 3744 | } |
3766 | EXPORT_SYMBOL(end_dequeued_request); | 3745 | EXPORT_SYMBOL(end_dequeued_request); |
3767 | 3746 | ||
@@ -3787,10 +3766,159 @@ EXPORT_SYMBOL(end_dequeued_request); | |||
3787 | **/ | 3766 | **/ |
3788 | void end_request(struct request *req, int uptodate) | 3767 | void end_request(struct request *req, int uptodate) |
3789 | { | 3768 | { |
3790 | __end_request(req, uptodate, req->hard_cur_sectors << 9, 1); | 3769 | __end_request(req, uptodate, req->hard_cur_sectors << 9); |
3791 | } | 3770 | } |
3792 | EXPORT_SYMBOL(end_request); | 3771 | EXPORT_SYMBOL(end_request); |
3793 | 3772 | ||
3773 | /** | ||
3774 | * blk_end_io - Generic end_io function to complete a request. | ||
3775 | * @rq: the request being processed | ||
3776 | * @error: 0 for success, < 0 for error | ||
3777 | * @nr_bytes: number of bytes to complete @rq | ||
3778 | * @bidi_bytes: number of bytes to complete @rq->next_rq | ||
3779 | * @drv_callback: function called between completion of bios in the request | ||
3780 | * and completion of the request. | ||
3781 | * If the callback returns non 0, this helper returns without | ||
3782 | * completion of the request. | ||
3783 | * | ||
3784 | * Description: | ||
3785 | * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. | ||
3786 | * If @rq has leftover, sets it up for the next range of segments. | ||
3787 | * | ||
3788 | * Return: | ||
3789 | * 0 - we are done with this request | ||
3790 | * 1 - this request is not freed yet, it still has pending buffers. | ||
3791 | **/ | ||
3792 | static int blk_end_io(struct request *rq, int error, int nr_bytes, | ||
3793 | int bidi_bytes, int (drv_callback)(struct request *)) | ||
3794 | { | ||
3795 | struct request_queue *q = rq->q; | ||
3796 | unsigned long flags = 0UL; | ||
3797 | |||
3798 | if (blk_fs_request(rq) || blk_pc_request(rq)) { | ||
3799 | if (__end_that_request_first(rq, error, nr_bytes)) | ||
3800 | return 1; | ||
3801 | |||
3802 | /* Bidi request must be completed as a whole */ | ||
3803 | if (blk_bidi_rq(rq) && | ||
3804 | __end_that_request_first(rq->next_rq, error, bidi_bytes)) | ||
3805 | return 1; | ||
3806 | } | ||
3807 | |||
3808 | /* Special feature for tricky drivers */ | ||
3809 | if (drv_callback && drv_callback(rq)) | ||
3810 | return 1; | ||
3811 | |||
3812 | add_disk_randomness(rq->rq_disk); | ||
3813 | |||
3814 | spin_lock_irqsave(q->queue_lock, flags); | ||
3815 | end_that_request_last(rq, error); | ||
3816 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
3817 | |||
3818 | return 0; | ||
3819 | } | ||
3820 | |||
3821 | /** | ||
3822 | * blk_end_request - Helper function for drivers to complete the request. | ||
3823 | * @rq: the request being processed | ||
3824 | * @error: 0 for success, < 0 for error | ||
3825 | * @nr_bytes: number of bytes to complete | ||
3826 | * | ||
3827 | * Description: | ||
3828 | * Ends I/O on a number of bytes attached to @rq. | ||
3829 | * If @rq has leftover, sets it up for the next range of segments. | ||
3830 | * | ||
3831 | * Return: | ||
3832 | * 0 - we are done with this request | ||
3833 | * 1 - still buffers pending for this request | ||
3834 | **/ | ||
3835 | int blk_end_request(struct request *rq, int error, int nr_bytes) | ||
3836 | { | ||
3837 | return blk_end_io(rq, error, nr_bytes, 0, NULL); | ||
3838 | } | ||
3839 | EXPORT_SYMBOL_GPL(blk_end_request); | ||
3840 | |||
3841 | /** | ||
3842 | * __blk_end_request - Helper function for drivers to complete the request. | ||
3843 | * @rq: the request being processed | ||
3844 | * @error: 0 for success, < 0 for error | ||
3845 | * @nr_bytes: number of bytes to complete | ||
3846 | * | ||
3847 | * Description: | ||
3848 | * Must be called with queue lock held unlike blk_end_request(). | ||
3849 | * | ||
3850 | * Return: | ||
3851 | * 0 - we are done with this request | ||
3852 | * 1 - still buffers pending for this request | ||
3853 | **/ | ||
3854 | int __blk_end_request(struct request *rq, int error, int nr_bytes) | ||
3855 | { | ||
3856 | if (blk_fs_request(rq) || blk_pc_request(rq)) { | ||
3857 | if (__end_that_request_first(rq, error, nr_bytes)) | ||
3858 | return 1; | ||
3859 | } | ||
3860 | |||
3861 | add_disk_randomness(rq->rq_disk); | ||
3862 | |||
3863 | end_that_request_last(rq, error); | ||
3864 | |||
3865 | return 0; | ||
3866 | } | ||
3867 | EXPORT_SYMBOL_GPL(__blk_end_request); | ||
3868 | |||
3869 | /** | ||
3870 | * blk_end_bidi_request - Helper function for drivers to complete bidi request. | ||
3871 | * @rq: the bidi request being processed | ||
3872 | * @error: 0 for success, < 0 for error | ||
3873 | * @nr_bytes: number of bytes to complete @rq | ||
3874 | * @bidi_bytes: number of bytes to complete @rq->next_rq | ||
3875 | * | ||
3876 | * Description: | ||
3877 | * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. | ||
3878 | * | ||
3879 | * Return: | ||
3880 | * 0 - we are done with this request | ||
3881 | * 1 - still buffers pending for this request | ||
3882 | **/ | ||
3883 | int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, | ||
3884 | int bidi_bytes) | ||
3885 | { | ||
3886 | return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); | ||
3887 | } | ||
3888 | EXPORT_SYMBOL_GPL(blk_end_bidi_request); | ||
3889 | |||
3890 | /** | ||
3891 | * blk_end_request_callback - Special helper function for tricky drivers | ||
3892 | * @rq: the request being processed | ||
3893 | * @error: 0 for success, < 0 for error | ||
3894 | * @nr_bytes: number of bytes to complete | ||
3895 | * @drv_callback: function called between completion of bios in the request | ||
3896 | * and completion of the request. | ||
3897 | * If the callback returns non 0, this helper returns without | ||
3898 | * completion of the request. | ||
3899 | * | ||
3900 | * Description: | ||
3901 | * Ends I/O on a number of bytes attached to @rq. | ||
3902 | * If @rq has leftover, sets it up for the next range of segments. | ||
3903 | * | ||
3904 | * This special helper function is used only for existing tricky drivers. | ||
3905 | * (e.g. cdrom_newpc_intr() of ide-cd) | ||
3906 | * This interface will be removed when such drivers are rewritten. | ||
3907 | * Don't use this interface in other places anymore. | ||
3908 | * | ||
3909 | * Return: | ||
3910 | * 0 - we are done with this request | ||
3911 | * 1 - this request is not freed yet. | ||
3912 | * this request still has pending buffers or | ||
3913 | * the driver doesn't want to finish this request yet. | ||
3914 | **/ | ||
3915 | int blk_end_request_callback(struct request *rq, int error, int nr_bytes, | ||
3916 | int (drv_callback)(struct request *)) | ||
3917 | { | ||
3918 | return blk_end_io(rq, error, nr_bytes, 0, drv_callback); | ||
3919 | } | ||
3920 | EXPORT_SYMBOL_GPL(blk_end_request_callback); | ||
3921 | |||
3794 | static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 3922 | static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
3795 | struct bio *bio) | 3923 | struct bio *bio) |
3796 | { | 3924 | { |