aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:23 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:24 -0500
commitb679281a6410676a41b175c5a185150a1ae42f9d (patch)
treef3966a921a4c07ebf80d57a46a80ef0a71e8354a /block
parentc875f4d0250a1f070fa26087a73bdd8f54c48100 (diff)
block: restructure get_request()
get_request() is structured a bit unusually in that failure path is inlined in the usual flow with goto labels atop and inside it. Relocate the error path to the end of the function. This is to prepare for icq handling changes in get_request() and doesn't introduce any behavior change. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c60
1 files changed, 29 insertions, 31 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 05693f403e46..792a384a8e35 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -826,7 +826,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
826static struct request *get_request(struct request_queue *q, int rw_flags, 826static struct request *get_request(struct request_queue *q, int rw_flags,
827 struct bio *bio, gfp_t gfp_mask) 827 struct bio *bio, gfp_t gfp_mask)
828{ 828{
829 struct request *rq = NULL; 829 struct request *rq;
830 struct request_list *rl = &q->rq; 830 struct request_list *rl = &q->rq;
831 struct elevator_type *et; 831 struct elevator_type *et;
832 struct io_context *ioc; 832 struct io_context *ioc;
@@ -878,7 +878,7 @@ retry:
878 * process is not a "batcher", and not 878 * process is not a "batcher", and not
879 * exempted by the IO scheduler 879 * exempted by the IO scheduler
880 */ 880 */
881 goto out; 881 return NULL;
882 } 882 }
883 } 883 }
884 } 884 }
@@ -891,7 +891,7 @@ retry:
891 * allocated with any setting of ->nr_requests 891 * allocated with any setting of ->nr_requests
892 */ 892 */
893 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 893 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
894 goto out; 894 return NULL;
895 895
896 rl->count[is_sync]++; 896 rl->count[is_sync]++;
897 rl->starved[is_sync] = 0; 897 rl->starved[is_sync] = 0;
@@ -921,36 +921,12 @@ retry:
921 if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) { 921 if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
922 icq = ioc_create_icq(q, gfp_mask); 922 icq = ioc_create_icq(q, gfp_mask);
923 if (!icq) 923 if (!icq)
924 goto fail_icq; 924 goto fail_alloc;
925 } 925 }
926 926
927 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask); 927 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
928 928 if (unlikely(!rq))
929fail_icq: 929 goto fail_alloc;
930 if (unlikely(!rq)) {
931 /*
932 * Allocation failed presumably due to memory. Undo anything
933 * we might have messed up.
934 *
935 * Allocating task should really be put onto the front of the
936 * wait queue, but this is pretty rare.
937 */
938 spin_lock_irq(q->queue_lock);
939 freed_request(q, rw_flags);
940
941 /*
942 * in the very unlikely event that allocation failed and no
943 * requests for this direction was pending, mark us starved
944 * so that freeing of a request in the other direction will
945 * notice us. another possible fix would be to split the
946 * rq mempool into READ and WRITE
947 */
948rq_starved:
949 if (unlikely(rl->count[is_sync] == 0))
950 rl->starved[is_sync] = 1;
951
952 goto out;
953 }
954 930
955 /* 931 /*
956 * ioc may be NULL here, and ioc_batching will be false. That's 932 * ioc may be NULL here, and ioc_batching will be false. That's
@@ -962,8 +938,30 @@ rq_starved:
962 ioc->nr_batch_requests--; 938 ioc->nr_batch_requests--;
963 939
964 trace_block_getrq(q, bio, rw_flags & 1); 940 trace_block_getrq(q, bio, rw_flags & 1);
965out:
966 return rq; 941 return rq;
942
943fail_alloc:
944 /*
945 * Allocation failed presumably due to memory. Undo anything we
946 * might have messed up.
947 *
948 * Allocating task should really be put onto the front of the wait
949 * queue, but this is pretty rare.
950 */
951 spin_lock_irq(q->queue_lock);
952 freed_request(q, rw_flags);
953
954 /*
955 * in the very unlikely event that allocation failed and no
956 * requests for this direction was pending, mark us starved so that
957 * freeing of a request in the other direction will notice
958 * us. another possible fix would be to split the rq mempool into
959 * READ and WRITE
960 */
961rq_starved:
962 if (unlikely(rl->count[is_sync] == 0))
963 rl->starved[is_sync] = 1;
964 return NULL;
967} 965}
968 966
969/** 967/**