diff options
author | Ming Lei <ming.lei@redhat.com> | 2017-10-27 00:43:30 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-11-01 10:20:34 -0400 |
commit | 1f460b63d4b37f504d8d0affc2cd492eb005ea97 (patch) | |
tree | 829611af2a709a83ac018bf6c1d083e2fde02659 /block/blk-mq-sched.c | |
parent | 358a3a6bccb74da9d63a26b2dd5f09f1e9970e0b (diff) |
blk-mq: don't restart queue when .get_budget returns BLK_STS_RESOURCE
SCSI restarts its queue in scsi_end_request() automatically, so we don't
need to handle this case in blk-mq.
Especailly any request won't be dequeued in this case, we needn't to
worry about IO hang caused by restart vs. dispatch.
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r-- | block/blk-mq-sched.c | 45 |
1 files changed, 20 insertions, 25 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index daab27feb653..7775f6b12fa9 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c | |||
@@ -81,8 +81,12 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) | |||
81 | } | 81 | } |
82 | } | 82 | } |
83 | 83 | ||
84 | /* return true if hctx need to run again */ | 84 | /* |
85 | static bool blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) | 85 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts |
86 | * its queue by itself in its completion handler, so we don't need to | ||
87 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. | ||
88 | */ | ||
89 | static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) | ||
86 | { | 90 | { |
87 | struct request_queue *q = hctx->queue; | 91 | struct request_queue *q = hctx->queue; |
88 | struct elevator_queue *e = q->elevator; | 92 | struct elevator_queue *e = q->elevator; |
@@ -98,7 +102,7 @@ static bool blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) | |||
98 | 102 | ||
99 | ret = blk_mq_get_dispatch_budget(hctx); | 103 | ret = blk_mq_get_dispatch_budget(hctx); |
100 | if (ret == BLK_STS_RESOURCE) | 104 | if (ret == BLK_STS_RESOURCE) |
101 | return true; | 105 | break; |
102 | 106 | ||
103 | rq = e->type->ops.mq.dispatch_request(hctx); | 107 | rq = e->type->ops.mq.dispatch_request(hctx); |
104 | if (!rq) { | 108 | if (!rq) { |
@@ -116,8 +120,6 @@ static bool blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) | |||
116 | */ | 120 | */ |
117 | list_add(&rq->queuelist, &rq_list); | 121 | list_add(&rq->queuelist, &rq_list); |
118 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); | 122 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); |
119 | |||
120 | return false; | ||
121 | } | 123 | } |
122 | 124 | ||
123 | static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, | 125 | static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, |
@@ -131,8 +133,12 @@ static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, | |||
131 | return hctx->ctxs[idx]; | 133 | return hctx->ctxs[idx]; |
132 | } | 134 | } |
133 | 135 | ||
134 | /* return true if hctx need to run again */ | 136 | /* |
135 | static bool blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) | 137 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts |
138 | * its queue by itself in its completion handler, so we don't need to | ||
139 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. | ||
140 | */ | ||
141 | static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) | ||
136 | { | 142 | { |
137 | struct request_queue *q = hctx->queue; | 143 | struct request_queue *q = hctx->queue; |
138 | LIST_HEAD(rq_list); | 144 | LIST_HEAD(rq_list); |
@@ -147,7 +153,7 @@ static bool blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) | |||
147 | 153 | ||
148 | ret = blk_mq_get_dispatch_budget(hctx); | 154 | ret = blk_mq_get_dispatch_budget(hctx); |
149 | if (ret == BLK_STS_RESOURCE) | 155 | if (ret == BLK_STS_RESOURCE) |
150 | return true; | 156 | break; |
151 | 157 | ||
152 | rq = blk_mq_dequeue_from_ctx(hctx, ctx); | 158 | rq = blk_mq_dequeue_from_ctx(hctx, ctx); |
153 | if (!rq) { | 159 | if (!rq) { |
@@ -171,22 +177,19 @@ static bool blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) | |||
171 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); | 177 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); |
172 | 178 | ||
173 | WRITE_ONCE(hctx->dispatch_from, ctx); | 179 | WRITE_ONCE(hctx->dispatch_from, ctx); |
174 | |||
175 | return false; | ||
176 | } | 180 | } |
177 | 181 | ||
178 | /* return true if hw queue need to be run again */ | 182 | /* return true if hw queue need to be run again */ |
179 | bool blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | 183 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) |
180 | { | 184 | { |
181 | struct request_queue *q = hctx->queue; | 185 | struct request_queue *q = hctx->queue; |
182 | struct elevator_queue *e = q->elevator; | 186 | struct elevator_queue *e = q->elevator; |
183 | const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; | 187 | const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; |
184 | LIST_HEAD(rq_list); | 188 | LIST_HEAD(rq_list); |
185 | bool run_queue = false; | ||
186 | 189 | ||
187 | /* RCU or SRCU read lock is needed before checking quiesced flag */ | 190 | /* RCU or SRCU read lock is needed before checking quiesced flag */ |
188 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) | 191 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) |
189 | return false; | 192 | return; |
190 | 193 | ||
191 | hctx->run++; | 194 | hctx->run++; |
192 | 195 | ||
@@ -218,12 +221,12 @@ bool blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | |||
218 | blk_mq_sched_mark_restart_hctx(hctx); | 221 | blk_mq_sched_mark_restart_hctx(hctx); |
219 | if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { | 222 | if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { |
220 | if (has_sched_dispatch) | 223 | if (has_sched_dispatch) |
221 | run_queue = blk_mq_do_dispatch_sched(hctx); | 224 | blk_mq_do_dispatch_sched(hctx); |
222 | else | 225 | else |
223 | run_queue = blk_mq_do_dispatch_ctx(hctx); | 226 | blk_mq_do_dispatch_ctx(hctx); |
224 | } | 227 | } |
225 | } else if (has_sched_dispatch) { | 228 | } else if (has_sched_dispatch) { |
226 | run_queue = blk_mq_do_dispatch_sched(hctx); | 229 | blk_mq_do_dispatch_sched(hctx); |
227 | } else if (q->mq_ops->get_budget) { | 230 | } else if (q->mq_ops->get_budget) { |
228 | /* | 231 | /* |
229 | * If we need to get budget before queuing request, we | 232 | * If we need to get budget before queuing request, we |
@@ -233,19 +236,11 @@ bool blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | |||
233 | * TODO: get more budgets, and dequeue more requests in | 236 | * TODO: get more budgets, and dequeue more requests in |
234 | * one time. | 237 | * one time. |
235 | */ | 238 | */ |
236 | run_queue = blk_mq_do_dispatch_ctx(hctx); | 239 | blk_mq_do_dispatch_ctx(hctx); |
237 | } else { | 240 | } else { |
238 | blk_mq_flush_busy_ctxs(hctx, &rq_list); | 241 | blk_mq_flush_busy_ctxs(hctx, &rq_list); |
239 | blk_mq_dispatch_rq_list(q, &rq_list, false); | 242 | blk_mq_dispatch_rq_list(q, &rq_list, false); |
240 | } | 243 | } |
241 | |||
242 | if (run_queue && !blk_mq_sched_needs_restart(hctx) && | ||
243 | !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state)) { | ||
244 | blk_mq_sched_mark_restart_hctx(hctx); | ||
245 | return true; | ||
246 | } | ||
247 | |||
248 | return false; | ||
249 | } | 244 | } |
250 | 245 | ||
251 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, | 246 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, |