diff options
author | Zhang, Yanmin <yanmin_zhang@linux.intel.com> | 2008-05-22 09:13:29 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-05-28 08:49:27 -0400 |
commit | 05caf8dbc1880415df3378cfd114d832c9618b60 (patch) | |
tree | 71b2a0839739c4a3e54e7d40d1a2358d61c8b279 /block/blk-core.c | |
parent | ca39d651d17df49b6d11f851d56c0ce0ce01ea1a (diff) |
block: Move the second call to get_request to the end of the loop
In function get_request_wait, the second call to get_request could be
moved to the end of the while loop, because if the first call to
get_request fails, the second call will fail without sleep.
Signed-off-by: Zhang Yanmin <yanmin.zhang@intel.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 37 |
1 files changed, 17 insertions, 20 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 6a9cc0d22a61..1905aaba49fb 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -806,35 +806,32 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
806 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 806 | rq = get_request(q, rw_flags, bio, GFP_NOIO); |
807 | while (!rq) { | 807 | while (!rq) { |
808 | DEFINE_WAIT(wait); | 808 | DEFINE_WAIT(wait); |
809 | struct io_context *ioc; | ||
809 | struct request_list *rl = &q->rq; | 810 | struct request_list *rl = &q->rq; |
810 | 811 | ||
811 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, | 812 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, |
812 | TASK_UNINTERRUPTIBLE); | 813 | TASK_UNINTERRUPTIBLE); |
813 | 814 | ||
814 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 815 | blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); |
815 | |||
816 | if (!rq) { | ||
817 | struct io_context *ioc; | ||
818 | 816 | ||
819 | blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); | 817 | __generic_unplug_device(q); |
820 | 818 | spin_unlock_irq(q->queue_lock); | |
821 | __generic_unplug_device(q); | 819 | io_schedule(); |
822 | spin_unlock_irq(q->queue_lock); | ||
823 | io_schedule(); | ||
824 | 820 | ||
825 | /* | 821 | /* |
826 | * After sleeping, we become a "batching" process and | 822 | * After sleeping, we become a "batching" process and |
827 | * will be able to allocate at least one request, and | 823 | * will be able to allocate at least one request, and |
828 | * up to a big batch of them for a small period time. | 824 | * up to a big batch of them for a small period time. |
829 | * See ioc_batching, ioc_set_batching | 825 | * See ioc_batching, ioc_set_batching |
830 | */ | 826 | */ |
831 | ioc = current_io_context(GFP_NOIO, q->node); | 827 | ioc = current_io_context(GFP_NOIO, q->node); |
832 | ioc_set_batching(q, ioc); | 828 | ioc_set_batching(q, ioc); |
833 | 829 | ||
834 | spin_lock_irq(q->queue_lock); | 830 | spin_lock_irq(q->queue_lock); |
835 | } | ||
836 | finish_wait(&rl->wait[rw], &wait); | 831 | finish_wait(&rl->wait[rw], &wait); |
837 | } | 832 | |
833 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | ||
834 | }; | ||
838 | 835 | ||
839 | return rq; | 836 | return rq; |
840 | } | 837 | } |