aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKiyoshi Ueda <k-ueda@ct.jp.nec.com>2007-12-11 17:40:30 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-01-28 04:35:53 -0500
commit336cdb4003200a90f4fc52a4e9ccc2baa570fffb (patch)
treedcb7f736738232c0e853a1ba796ca35d5b18c503
parent91525300baf162e83e923b09ca286f9205e21522 (diff)
blk_end_request: add new request completion interface (take 4)
This patch adds 2 new interfaces for request completion: o blk_end_request() : called without queue lock o __blk_end_request() : called with queue lock held blk_end_request takes 'error' as an argument instead of 'uptodate', which current end_that_request_* take. The meanings of values are below and the value is used when bio is completed. 0 : success < 0 : error Some device drivers call some generic functions below between end_that_request_{first/chunk} and end_that_request_last(). o add_disk_randomness() o blk_queue_end_tag() o blkdev_dequeue_request() These are called in the blk_end_request interfaces as a part of generic request completion. So all device drivers become to call above functions. To decide whether to call blkdev_dequeue_request(), blk_end_request uses list_empty(&rq->queuelist) (blk_queued_rq() macro is added for it). So drivers must re-initialize it using list_init() or so before calling blk_end_request if drivers use it for its specific purpose. (Currently, there is no driver which completes request without re-initializing the queuelist after used it. So rq->queuelist can be used for the purpose above.) "Normal" drivers can be converted to use blk_end_request() in a standard way shown below. a) end_that_request_{chunk/first} spin_lock_irqsave() (add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request()) end_that_request_last() spin_unlock_irqrestore() => blk_end_request() b) spin_lock_irqsave() end_that_request_{chunk/first} (add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request()) end_that_request_last() spin_unlock_irqrestore() => spin_lock_irqsave() __blk_end_request() spin_unlock_irqsave() c) spin_lock_irqsave() (add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request()) end_that_request_last() spin_unlock_irqrestore() => blk_end_request() or spin_lock_irqsave() __blk_end_request() spin_unlock_irqrestore() Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/ll_rw_blk.c96
-rw-r--r--include/linux/blkdev.h4
2 files changed, 100 insertions, 0 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 3d0422f48453..5c01911af47c 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3791,6 +3791,102 @@ void end_request(struct request *req, int uptodate)
3791} 3791}
3792EXPORT_SYMBOL(end_request); 3792EXPORT_SYMBOL(end_request);
3793 3793
3794static void complete_request(struct request *rq, int error)
3795{
3796 /*
3797 * REMOVEME: This conversion is transitional and will be removed
3798 * when old end_that_request_* are unexported.
3799 */
3800 int uptodate = 1;
3801 if (error)
3802 uptodate = (error == -EIO) ? 0 : error;
3803
3804 if (blk_rq_tagged(rq))
3805 blk_queue_end_tag(rq->q, rq);
3806
3807 if (blk_queued_rq(rq))
3808 blkdev_dequeue_request(rq);
3809
3810 end_that_request_last(rq, uptodate);
3811}
3812
3813/**
3814 * blk_end_request - Helper function for drivers to complete the request.
3815 * @rq: the request being processed
3816 * @error: 0 for success, < 0 for error
3817 * @nr_bytes: number of bytes to complete
3818 *
3819 * Description:
3820 * Ends I/O on a number of bytes attached to @rq.
3821 * If @rq has leftover, sets it up for the next range of segments.
3822 *
3823 * Return:
3824 * 0 - we are done with this request
3825 * 1 - still buffers pending for this request
3826 **/
3827int blk_end_request(struct request *rq, int error, int nr_bytes)
3828{
3829 struct request_queue *q = rq->q;
3830 unsigned long flags = 0UL;
3831 /*
3832 * REMOVEME: This conversion is transitional and will be removed
3833 * when old end_that_request_* are unexported.
3834 */
3835 int uptodate = 1;
3836 if (error)
3837 uptodate = (error == -EIO) ? 0 : error;
3838
3839 if (blk_fs_request(rq) || blk_pc_request(rq)) {
3840 if (__end_that_request_first(rq, uptodate, nr_bytes))
3841 return 1;
3842 }
3843
3844 add_disk_randomness(rq->rq_disk);
3845
3846 spin_lock_irqsave(q->queue_lock, flags);
3847 complete_request(rq, error);
3848 spin_unlock_irqrestore(q->queue_lock, flags);
3849
3850 return 0;
3851}
3852EXPORT_SYMBOL_GPL(blk_end_request);
3853
3854/**
3855 * __blk_end_request - Helper function for drivers to complete the request.
3856 * @rq: the request being processed
3857 * @error: 0 for success, < 0 for error
3858 * @nr_bytes: number of bytes to complete
3859 *
3860 * Description:
3861 * Must be called with queue lock held unlike blk_end_request().
3862 *
3863 * Return:
3864 * 0 - we are done with this request
3865 * 1 - still buffers pending for this request
3866 **/
3867int __blk_end_request(struct request *rq, int error, int nr_bytes)
3868{
3869 /*
3870 * REMOVEME: This conversion is transitional and will be removed
3871 * when old end_that_request_* are unexported.
3872 */
3873 int uptodate = 1;
3874 if (error)
3875 uptodate = (error == -EIO) ? 0 : error;
3876
3877 if (blk_fs_request(rq) || blk_pc_request(rq)) {
3878 if (__end_that_request_first(rq, uptodate, nr_bytes))
3879 return 1;
3880 }
3881
3882 add_disk_randomness(rq->rq_disk);
3883
3884 complete_request(rq, error);
3885
3886 return 0;
3887}
3888EXPORT_SYMBOL_GPL(__blk_end_request);
3889
3794static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 3890static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3795 struct bio *bio) 3891 struct bio *bio)
3796{ 3892{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 49b7a4c31a6d..3b212f02db8d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -537,6 +537,8 @@ enum {
537#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 537#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
538#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 538#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
539#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) 539#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
540/* rq->queuelist of dequeued request must be list_empty() */
541#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
540 542
541#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 543#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
542 544
@@ -724,6 +726,8 @@ static inline void blk_run_address_space(struct address_space *mapping)
724 * for parts of the original function. This prevents 726 * for parts of the original function. This prevents
725 * code duplication in drivers. 727 * code duplication in drivers.
726 */ 728 */
729extern int blk_end_request(struct request *rq, int error, int nr_bytes);
730extern int __blk_end_request(struct request *rq, int error, int nr_bytes);
727extern int end_that_request_first(struct request *, int, int); 731extern int end_that_request_first(struct request *, int, int);
728extern int end_that_request_chunk(struct request *, int, int); 732extern int end_that_request_chunk(struct request *, int, int);
729extern void end_that_request_last(struct request *, int); 733extern void end_that_request_last(struct request *, int);