diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-09-21 04:41:07 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-16 05:03:53 -0400 |
commit | a0cd128542cd9c67f27458a08e989db486a293ce (patch) | |
tree | bc4ca6dd3cbfa230de486d79480f7a99bbfb6232 /block | |
parent | 992c5ddaf1b8b85d2252339c4c89adf7469c09ca (diff) |
block: add end_queued_request() and end_dequeued_request() helpers
We can use this helper in the elevator core for BLKPREP_KILL, and it'll
also be useful for the empty barrier patch.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/elevator.c | 9 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 78 |
2 files changed, 74 insertions, 13 deletions
diff --git a/block/elevator.c b/block/elevator.c index b9c518afe1f8..ec23ca02f2fe 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -751,15 +751,8 @@ struct request *elv_next_request(struct request_queue *q) | |||
751 | rq = NULL; | 751 | rq = NULL; |
752 | break; | 752 | break; |
753 | } else if (ret == BLKPREP_KILL) { | 753 | } else if (ret == BLKPREP_KILL) { |
754 | int nr_bytes = rq->hard_nr_sectors << 9; | ||
755 | |||
756 | if (!nr_bytes) | ||
757 | nr_bytes = rq->data_len; | ||
758 | |||
759 | blkdev_dequeue_request(rq); | ||
760 | rq->cmd_flags |= REQ_QUIET; | 754 | rq->cmd_flags |= REQ_QUIET; |
761 | end_that_request_chunk(rq, 0, nr_bytes); | 755 | end_queued_request(rq, 0); |
762 | end_that_request_last(rq, 0); | ||
763 | } else { | 756 | } else { |
764 | printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, | 757 | printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, |
765 | ret); | 758 | ret); |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 0fa5d3d556d9..8904f8b1f417 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -3630,15 +3630,83 @@ void end_that_request_last(struct request *req, int uptodate) | |||
3630 | 3630 | ||
3631 | EXPORT_SYMBOL(end_that_request_last); | 3631 | EXPORT_SYMBOL(end_that_request_last); |
3632 | 3632 | ||
3633 | void end_request(struct request *req, int uptodate) | 3633 | static inline void __end_request(struct request *rq, int uptodate, |
3634 | unsigned int nr_bytes, int dequeue) | ||
3634 | { | 3635 | { |
3635 | if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) { | 3636 | if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { |
3636 | add_disk_randomness(req->rq_disk); | 3637 | if (dequeue) |
3637 | blkdev_dequeue_request(req); | 3638 | blkdev_dequeue_request(rq); |
3638 | end_that_request_last(req, uptodate); | 3639 | add_disk_randomness(rq->rq_disk); |
3640 | end_that_request_last(rq, uptodate); | ||
3639 | } | 3641 | } |
3640 | } | 3642 | } |
3641 | 3643 | ||
3644 | static unsigned int rq_byte_size(struct request *rq) | ||
3645 | { | ||
3646 | if (blk_fs_request(rq)) | ||
3647 | return rq->hard_nr_sectors << 9; | ||
3648 | |||
3649 | return rq->data_len; | ||
3650 | } | ||
3651 | |||
3652 | /** | ||
3653 | * end_queued_request - end all I/O on a queued request | ||
3654 | * @rq: the request being processed | ||
3655 | * @uptodate: error value or 0/1 uptodate flag | ||
3656 | * | ||
3657 | * Description: | ||
3658 | * Ends all I/O on a request, and removes it from the block layer queues. | ||
3659 | * Not suitable for normal IO completion, unless the driver still has | ||
3660 | * the request attached to the block layer. | ||
3661 | * | ||
3662 | **/ | ||
3663 | void end_queued_request(struct request *rq, int uptodate) | ||
3664 | { | ||
3665 | __end_request(rq, uptodate, rq_byte_size(rq), 1); | ||
3666 | } | ||
3667 | EXPORT_SYMBOL(end_queued_request); | ||
3668 | |||
3669 | /** | ||
3670 | * end_dequeued_request - end all I/O on a dequeued request | ||
3671 | * @rq: the request being processed | ||
3672 | * @uptodate: error value or 0/1 uptodate flag | ||
3673 | * | ||
3674 | * Description: | ||
3675 | * Ends all I/O on a request. The request must already have been | ||
3676 | * dequeued using blkdev_dequeue_request(), as is normally the case | ||
3677 | * for most drivers. | ||
3678 | * | ||
3679 | **/ | ||
3680 | void end_dequeued_request(struct request *rq, int uptodate) | ||
3681 | { | ||
3682 | __end_request(rq, uptodate, rq_byte_size(rq), 0); | ||
3683 | } | ||
3684 | EXPORT_SYMBOL(end_dequeued_request); | ||
3685 | |||
3686 | |||
3687 | /** | ||
3688 | * end_request - end I/O on the current segment of the request | ||
3689 | * @rq: the request being processed | ||
3690 | * @uptodate: error value or 0/1 uptodate flag | ||
3691 | * | ||
3692 | * Description: | ||
3693 | * Ends I/O on the current segment of a request. If that is the only | ||
3694 | * remaining segment, the request is also completed and freed. | ||
3695 | * | ||
3696 | * This is a remnant of how older block drivers handled IO completions. | ||
3697 | * Modern drivers typically end IO on the full request in one go, unless | ||
3698 | * they have a residual value to account for. For that case this function | ||
3699 | * isn't really useful, unless the residual just happens to be the | ||
3700 | * full current segment. In other words, don't use this function in new | ||
3701 | * code. Either use end_request_completely(), or the | ||
3702 | * end_that_request_chunk() (along with end_that_request_last()) for | ||
3703 | * partial completions. | ||
3704 | * | ||
3705 | **/ | ||
3706 | void end_request(struct request *req, int uptodate) | ||
3707 | { | ||
3708 | __end_request(req, uptodate, req->hard_cur_sectors << 9, 1); | ||
3709 | } | ||
3642 | EXPORT_SYMBOL(end_request); | 3710 | EXPORT_SYMBOL(end_request); |
3643 | 3711 | ||
3644 | static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 3712 | static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |