diff options
author | Tejun Heo <tj@kernel.org> | 2012-06-04 23:40:55 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-06-25 05:53:49 -0400 |
commit | a06e05e6afab70b4b23c0a7975aaeae24b195cd6 (patch) | |
tree | ec20f68e82542ebee27e914b04f356dc164bbe48 /block | |
parent | 86072d8112595ea1b6beeb33f578e7c2839e014e (diff) |
block: refactor get_request[_wait]()
Currently, there are two request allocation functions - get_request()
and get_request_wait(). The former tries to allocate a request once
and the latter keeps retrying until it succeeds. The latter wraps the
former and keeps retrying until allocation succeeds.
The combination of two functions deliver fallible non-wait allocation,
fallible wait allocation and unfailing wait allocation. However,
given that forward progress is guaranteed, fallible wait allocation
isn't all that useful and in fact nobody uses it.
This patch simplifies the interface as follows.
* get_request() is renamed to __get_request() and is only used by the
wrapper function.
* get_request_wait() is renamed to get_request(). It now takes
@gfp_mask and retries iff it contains %__GFP_WAIT.
This patch doesn't introduce any functional change and is to prepare
for further updates to request allocation path.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 74 |
1 files changed, 35 insertions, 39 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 64f9a8668253..080204a10fcf 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -837,7 +837,7 @@ static struct io_context *rq_ioc(struct bio *bio) | |||
837 | } | 837 | } |
838 | 838 | ||
839 | /** | 839 | /** |
840 | * get_request - get a free request | 840 | * __get_request - get a free request |
841 | * @q: request_queue to allocate request from | 841 | * @q: request_queue to allocate request from |
842 | * @rw_flags: RW and SYNC flags | 842 | * @rw_flags: RW and SYNC flags |
843 | * @bio: bio to allocate request for (can be %NULL) | 843 | * @bio: bio to allocate request for (can be %NULL) |
@@ -850,8 +850,8 @@ static struct io_context *rq_ioc(struct bio *bio) | |||
850 | * Returns %NULL on failure, with @q->queue_lock held. | 850 | * Returns %NULL on failure, with @q->queue_lock held. |
851 | * Returns !%NULL on success, with @q->queue_lock *not held*. | 851 | * Returns !%NULL on success, with @q->queue_lock *not held*. |
852 | */ | 852 | */ |
853 | static struct request *get_request(struct request_queue *q, int rw_flags, | 853 | static struct request *__get_request(struct request_queue *q, int rw_flags, |
854 | struct bio *bio, gfp_t gfp_mask) | 854 | struct bio *bio, gfp_t gfp_mask) |
855 | { | 855 | { |
856 | struct request *rq; | 856 | struct request *rq; |
857 | struct request_list *rl = &q->rq; | 857 | struct request_list *rl = &q->rq; |
@@ -1029,56 +1029,55 @@ rq_starved: | |||
1029 | } | 1029 | } |
1030 | 1030 | ||
1031 | /** | 1031 | /** |
1032 | * get_request_wait - get a free request with retry | 1032 | * get_request - get a free request |
1033 | * @q: request_queue to allocate request from | 1033 | * @q: request_queue to allocate request from |
1034 | * @rw_flags: RW and SYNC flags | 1034 | * @rw_flags: RW and SYNC flags |
1035 | * @bio: bio to allocate request for (can be %NULL) | 1035 | * @bio: bio to allocate request for (can be %NULL) |
1036 | * @gfp_mask: allocation mask | ||
1036 | * | 1037 | * |
1037 | * Get a free request from @q. This function keeps retrying under memory | 1038 | * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this |
1038 | * pressure and fails iff @q is dead. | 1039 | * function keeps retrying under memory pressure and fails iff @q is dead. |
1039 | * | 1040 | * |
1040 | * Must be callled with @q->queue_lock held and, | 1041 | * Must be callled with @q->queue_lock held and, |
1041 | * Returns %NULL on failure, with @q->queue_lock held. | 1042 | * Returns %NULL on failure, with @q->queue_lock held. |
1042 | * Returns !%NULL on success, with @q->queue_lock *not held*. | 1043 | * Returns !%NULL on success, with @q->queue_lock *not held*. |
1043 | */ | 1044 | */ |
1044 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, | 1045 | static struct request *get_request(struct request_queue *q, int rw_flags, |
1045 | struct bio *bio) | 1046 | struct bio *bio, gfp_t gfp_mask) |
1046 | { | 1047 | { |
1047 | const bool is_sync = rw_is_sync(rw_flags) != 0; | 1048 | const bool is_sync = rw_is_sync(rw_flags) != 0; |
1049 | DEFINE_WAIT(wait); | ||
1050 | struct request_list *rl = &q->rq; | ||
1048 | struct request *rq; | 1051 | struct request *rq; |
1052 | retry: | ||
1053 | rq = __get_request(q, rw_flags, bio, gfp_mask); | ||
1054 | if (rq) | ||
1055 | return rq; | ||
1049 | 1056 | ||
1050 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 1057 | if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) |
1051 | while (!rq) { | 1058 | return NULL; |
1052 | DEFINE_WAIT(wait); | ||
1053 | struct request_list *rl = &q->rq; | ||
1054 | |||
1055 | if (unlikely(blk_queue_dead(q))) | ||
1056 | return NULL; | ||
1057 | |||
1058 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, | ||
1059 | TASK_UNINTERRUPTIBLE); | ||
1060 | 1059 | ||
1061 | trace_block_sleeprq(q, bio, rw_flags & 1); | 1060 | /* wait on @rl and retry */ |
1061 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, | ||
1062 | TASK_UNINTERRUPTIBLE); | ||
1062 | 1063 | ||
1063 | spin_unlock_irq(q->queue_lock); | 1064 | trace_block_sleeprq(q, bio, rw_flags & 1); |
1064 | io_schedule(); | ||
1065 | 1065 | ||
1066 | /* | 1066 | spin_unlock_irq(q->queue_lock); |
1067 | * After sleeping, we become a "batching" process and | 1067 | io_schedule(); |
1068 | * will be able to allocate at least one request, and | ||
1069 | * up to a big batch of them for a small period time. | ||
1070 | * See ioc_batching, ioc_set_batching | ||
1071 | */ | ||
1072 | create_io_context(GFP_NOIO, q->node); | ||
1073 | ioc_set_batching(q, current->io_context); | ||
1074 | 1068 | ||
1075 | spin_lock_irq(q->queue_lock); | 1069 | /* |
1076 | finish_wait(&rl->wait[is_sync], &wait); | 1070 | * After sleeping, we become a "batching" process and will be able |
1071 | * to allocate at least one request, and up to a big batch of them | ||
1072 | * for a small period time. See ioc_batching, ioc_set_batching | ||
1073 | */ | ||
1074 | create_io_context(GFP_NOIO, q->node); | ||
1075 | ioc_set_batching(q, current->io_context); | ||
1077 | 1076 | ||
1078 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 1077 | spin_lock_irq(q->queue_lock); |
1079 | }; | 1078 | finish_wait(&rl->wait[is_sync], &wait); |
1080 | 1079 | ||
1081 | return rq; | 1080 | goto retry; |
1082 | } | 1081 | } |
1083 | 1082 | ||
1084 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | 1083 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) |
@@ -1088,10 +1087,7 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | |||
1088 | BUG_ON(rw != READ && rw != WRITE); | 1087 | BUG_ON(rw != READ && rw != WRITE); |
1089 | 1088 | ||
1090 | spin_lock_irq(q->queue_lock); | 1089 | spin_lock_irq(q->queue_lock); |
1091 | if (gfp_mask & __GFP_WAIT) | 1090 | rq = get_request(q, rw, NULL, gfp_mask); |
1092 | rq = get_request_wait(q, rw, NULL); | ||
1093 | else | ||
1094 | rq = get_request(q, rw, NULL, gfp_mask); | ||
1095 | if (!rq) | 1091 | if (!rq) |
1096 | spin_unlock_irq(q->queue_lock); | 1092 | spin_unlock_irq(q->queue_lock); |
1097 | /* q->queue_lock is unlocked at this point */ | 1093 | /* q->queue_lock is unlocked at this point */ |
@@ -1481,7 +1477,7 @@ get_rq: | |||
1481 | * Grab a free request. This is might sleep but can not fail. | 1477 | * Grab a free request. This is might sleep but can not fail. |
1482 | * Returns with the queue unlocked. | 1478 | * Returns with the queue unlocked. |
1483 | */ | 1479 | */ |
1484 | req = get_request_wait(q, rw_flags, bio); | 1480 | req = get_request(q, rw_flags, bio, GFP_NOIO); |
1485 | if (unlikely(!req)) { | 1481 | if (unlikely(!req)) { |
1486 | bio_endio(bio, -ENODEV); /* @q is dead */ | 1482 | bio_endio(bio, -ENODEV); /* @q is dead */ |
1487 | goto out_unlock; | 1483 | goto out_unlock; |