aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-19 19:29:21 -0400
committerJens Axboe <axboe@kernel.dk>2012-04-20 04:06:40 -0400
commit29e2b09ab5fa790514d47838f3c05497130908b3 (patch)
treeaa430587f78d90d3108c1885f8049da484631935 /block
parentf9fcc2d3919b8eb575b3cee9274feefafb641bca (diff)
block: collapse blk_alloc_request() into get_request()
Allocation failure handling in get_request() is about to be updated. To ease the update, collapse blk_alloc_request() into get_request(). This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c46
1 files changed, 17 insertions, 29 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3b02ba351f8c..f6f68b0c8302 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -719,33 +719,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
719 mempool_free(rq, q->rq.rq_pool); 719 mempool_free(rq, q->rq.rq_pool);
720} 720}
721 721
722static struct request *
723blk_alloc_request(struct request_queue *q, struct bio *bio, struct io_cq *icq,
724 unsigned int flags, gfp_t gfp_mask)
725{
726 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
727
728 if (!rq)
729 return NULL;
730
731 blk_rq_init(q, rq);
732
733 rq->cmd_flags = flags | REQ_ALLOCED;
734
735 if (flags & REQ_ELVPRIV) {
736 rq->elv.icq = icq;
737 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
738 mempool_free(rq, q->rq.rq_pool);
739 return NULL;
740 }
741 /* @rq->elv.icq holds on to io_context until @rq is freed */
742 if (icq)
743 get_io_context(icq->ioc);
744 }
745
746 return rq;
747}
748
749/* 722/*
750 * ioc_batching returns true if the ioc is a valid batching request and 723 * ioc_batching returns true if the ioc is a valid batching request and
751 * should be given priority access to a request. 724 * should be given priority access to a request.
@@ -968,10 +941,25 @@ retry:
968 goto fail_alloc; 941 goto fail_alloc;
969 } 942 }
970 943
971 rq = blk_alloc_request(q, bio, icq, rw_flags, gfp_mask); 944 /* allocate and init request */
972 if (unlikely(!rq)) 945 rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
946 if (!rq)
973 goto fail_alloc; 947 goto fail_alloc;
974 948
949 blk_rq_init(q, rq);
950 rq->cmd_flags = rw_flags | REQ_ALLOCED;
951
952 if (rw_flags & REQ_ELVPRIV) {
953 rq->elv.icq = icq;
954 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
955 mempool_free(rq, q->rq.rq_pool);
956 goto fail_alloc;
957 }
958 /* @rq->elv.icq holds on to io_context until @rq is freed */
959 if (icq)
960 get_io_context(icq->ioc);
961 }
962
975 /* 963 /*
976 * ioc may be NULL here, and ioc_batching will be false. That's 964 * ioc may be NULL here, and ioc_batching will be false. That's
977 * OK, if the queue is under the request limit then requests need 965 * OK, if the queue is under the request limit then requests need