diff options
author | Boaz Harrosh <bharrosh@panasas.com> | 2009-05-17 11:57:15 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-19 06:14:56 -0400 |
commit | 79eb63e9e5875b84341a3a05f8e6ae9cdb4bb6f6 (patch) | |
tree | bdadc2037c1d0f24c6c73b71ae82fdd7b907e610 /block | |
parent | bc38bf106c967389a465d926be22c7371abba69d (diff) |
block: Add blk_make_request(), takes bio, returns a request
New block API:
given a struct bio allocates a new request. This is the parallel of
generic_make_request for BLOCK_PC commands users.
The passed bio may be a chained-bio. The bio is bounced if needed
inside the call to this member.
This is in the effort of un-exporting blk_rq_append_bio().
Signed-off-by: Boaz Harrosh <bharrosh@panasas.com>
CC: Jeff Garzik <jeff@garzik.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 45 |
1 files changed, 45 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index e3f7e6a3a095..bec1d69952d0 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -891,6 +891,51 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | |||
891 | EXPORT_SYMBOL(blk_get_request); | 891 | EXPORT_SYMBOL(blk_get_request); |
892 | 892 | ||
893 | /** | 893 | /** |
894 | * blk_make_request - given a bio, allocate a corresponding struct request. | ||
895 | * | ||
896 | * @bio: The bio describing the memory mappings that will be submitted for IO. | ||
897 | * It may be a chained-bio properly constructed by block/bio layer. | ||
898 | * | ||
899 | * blk_make_request is the parallel of generic_make_request for BLOCK_PC | ||
900 | * type commands. Where the struct request needs to be farther initialized by | ||
901 | * the caller. It is passed a &struct bio, which describes the memory info of | ||
902 | * the I/O transfer. | ||
903 | * | ||
904 | * The caller of blk_make_request must make sure that bi_io_vec | ||
905 | * are set to describe the memory buffers. That bio_data_dir() will return | ||
906 | * the needed direction of the request. (And all bio's in the passed bio-chain | ||
907 | * are properly set accordingly) | ||
908 | * | ||
909 | * If called under none-sleepable conditions, mapped bio buffers must not | ||
910 | * need bouncing, by calling the appropriate masked or flagged allocator, | ||
911 | * suitable for the target device. Otherwise the call to blk_queue_bounce will | ||
912 | * BUG. | ||
913 | */ | ||
914 | struct request *blk_make_request(struct request_queue *q, struct bio *bio, | ||
915 | gfp_t gfp_mask) | ||
916 | { | ||
917 | struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); | ||
918 | |||
919 | if (unlikely(!rq)) | ||
920 | return ERR_PTR(-ENOMEM); | ||
921 | |||
922 | for_each_bio(bio) { | ||
923 | struct bio *bounce_bio = bio; | ||
924 | int ret; | ||
925 | |||
926 | blk_queue_bounce(q, &bounce_bio); | ||
927 | ret = blk_rq_append_bio(q, rq, bounce_bio); | ||
928 | if (unlikely(ret)) { | ||
929 | blk_put_request(rq); | ||
930 | return ERR_PTR(ret); | ||
931 | } | ||
932 | } | ||
933 | |||
934 | return rq; | ||
935 | } | ||
936 | EXPORT_SYMBOL(blk_make_request); | ||
937 | |||
938 | /** | ||
894 | * blk_requeue_request - put a request back on queue | 939 | * blk_requeue_request - put a request back on queue |
895 | * @q: request queue where request should be inserted | 940 | * @q: request queue where request should be inserted |
896 | * @rq: request to be inserted | 941 | * @rq: request to be inserted |