diff options
author | Ming Lei <ming.lei@canonical.com> | 2014-09-25 11:23:45 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-25 17:22:42 -0400 |
commit | 0bae352da54a95435f721705d3670a6eaefdcf87 (patch) | |
tree | 312c52c12ad21bcead3be8b8def5228003f77d68 | |
parent | ba483388e3058b3e412632a84e6bf1f134beaf3d (diff) |
block: flush: avoid to figure out flush queue unnecessarily
Just figuring out flush queue at the entry of kicking off flush
machinery and request's completion handler, then pass it through.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/blk-flush.c | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index d66cbf2b2bc8..9bc5b4f35c23 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -91,7 +91,8 @@ enum { | |||
91 | FLUSH_PENDING_TIMEOUT = 5 * HZ, | 91 | FLUSH_PENDING_TIMEOUT = 5 * HZ, |
92 | }; | 92 | }; |
93 | 93 | ||
94 | static bool blk_kick_flush(struct request_queue *q); | 94 | static bool blk_kick_flush(struct request_queue *q, |
95 | struct blk_flush_queue *fq); | ||
95 | 96 | ||
96 | static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) | 97 | static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) |
97 | { | 98 | { |
@@ -148,6 +149,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front) | |||
148 | /** | 149 | /** |
149 | * blk_flush_complete_seq - complete flush sequence | 150 | * blk_flush_complete_seq - complete flush sequence |
150 | * @rq: FLUSH/FUA request being sequenced | 151 | * @rq: FLUSH/FUA request being sequenced |
152 | * @fq: flush queue | ||
151 | * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) | 153 | * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) |
152 | * @error: whether an error occurred | 154 | * @error: whether an error occurred |
153 | * | 155 | * |
@@ -160,11 +162,11 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front) | |||
160 | * RETURNS: | 162 | * RETURNS: |
161 | * %true if requests were added to the dispatch queue, %false otherwise. | 163 | * %true if requests were added to the dispatch queue, %false otherwise. |
162 | */ | 164 | */ |
163 | static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, | 165 | static bool blk_flush_complete_seq(struct request *rq, |
164 | int error) | 166 | struct blk_flush_queue *fq, |
167 | unsigned int seq, int error) | ||
165 | { | 168 | { |
166 | struct request_queue *q = rq->q; | 169 | struct request_queue *q = rq->q; |
167 | struct blk_flush_queue *fq = blk_get_flush_queue(q); | ||
168 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; | 170 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
169 | bool queued = false, kicked; | 171 | bool queued = false, kicked; |
170 | 172 | ||
@@ -210,7 +212,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, | |||
210 | BUG(); | 212 | BUG(); |
211 | } | 213 | } |
212 | 214 | ||
213 | kicked = blk_kick_flush(q); | 215 | kicked = blk_kick_flush(q, fq); |
214 | return kicked | queued; | 216 | return kicked | queued; |
215 | } | 217 | } |
216 | 218 | ||
@@ -242,7 +244,7 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
242 | unsigned int seq = blk_flush_cur_seq(rq); | 244 | unsigned int seq = blk_flush_cur_seq(rq); |
243 | 245 | ||
244 | BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); | 246 | BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); |
245 | queued |= blk_flush_complete_seq(rq, seq, error); | 247 | queued |= blk_flush_complete_seq(rq, fq, seq, error); |
246 | } | 248 | } |
247 | 249 | ||
248 | /* | 250 | /* |
@@ -268,6 +270,7 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
268 | /** | 270 | /** |
269 | * blk_kick_flush - consider issuing flush request | 271 | * blk_kick_flush - consider issuing flush request |
270 | * @q: request_queue being kicked | 272 | * @q: request_queue being kicked |
273 | * @fq: flush queue | ||
271 | * | 274 | * |
272 | * Flush related states of @q have changed, consider issuing flush request. | 275 | * Flush related states of @q have changed, consider issuing flush request. |
273 | * Please read the comment at the top of this file for more info. | 276 | * Please read the comment at the top of this file for more info. |
@@ -278,9 +281,8 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
278 | * RETURNS: | 281 | * RETURNS: |
279 | * %true if flush was issued, %false otherwise. | 282 | * %true if flush was issued, %false otherwise. |
280 | */ | 283 | */ |
281 | static bool blk_kick_flush(struct request_queue *q) | 284 | static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) |
282 | { | 285 | { |
283 | struct blk_flush_queue *fq = blk_get_flush_queue(q); | ||
284 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; | 286 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
285 | struct request *first_rq = | 287 | struct request *first_rq = |
286 | list_first_entry(pending, struct request, flush.list); | 288 | list_first_entry(pending, struct request, flush.list); |
@@ -317,12 +319,13 @@ static bool blk_kick_flush(struct request_queue *q) | |||
317 | static void flush_data_end_io(struct request *rq, int error) | 319 | static void flush_data_end_io(struct request *rq, int error) |
318 | { | 320 | { |
319 | struct request_queue *q = rq->q; | 321 | struct request_queue *q = rq->q; |
322 | struct blk_flush_queue *fq = blk_get_flush_queue(q); | ||
320 | 323 | ||
321 | /* | 324 | /* |
322 | * After populating an empty queue, kick it to avoid stall. Read | 325 | * After populating an empty queue, kick it to avoid stall. Read |
323 | * the comment in flush_end_io(). | 326 | * the comment in flush_end_io(). |
324 | */ | 327 | */ |
325 | if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) | 328 | if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) |
326 | blk_run_queue_async(q); | 329 | blk_run_queue_async(q); |
327 | } | 330 | } |
328 | 331 | ||
@@ -342,7 +345,7 @@ static void mq_flush_data_end_io(struct request *rq, int error) | |||
342 | * the comment in flush_end_io(). | 345 | * the comment in flush_end_io(). |
343 | */ | 346 | */ |
344 | spin_lock_irqsave(&fq->mq_flush_lock, flags); | 347 | spin_lock_irqsave(&fq->mq_flush_lock, flags); |
345 | if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) | 348 | if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) |
346 | blk_mq_run_hw_queue(hctx, true); | 349 | blk_mq_run_hw_queue(hctx, true); |
347 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); | 350 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
348 | } | 351 | } |
@@ -364,6 +367,7 @@ void blk_insert_flush(struct request *rq) | |||
364 | struct request_queue *q = rq->q; | 367 | struct request_queue *q = rq->q; |
365 | unsigned int fflags = q->flush_flags; /* may change, cache */ | 368 | unsigned int fflags = q->flush_flags; /* may change, cache */ |
366 | unsigned int policy = blk_flush_policy(fflags, rq); | 369 | unsigned int policy = blk_flush_policy(fflags, rq); |
370 | struct blk_flush_queue *fq = blk_get_flush_queue(q); | ||
367 | 371 | ||
368 | /* | 372 | /* |
369 | * @policy now records what operations need to be done. Adjust | 373 | * @policy now records what operations need to be done. Adjust |
@@ -412,18 +416,16 @@ void blk_insert_flush(struct request *rq) | |||
412 | rq->cmd_flags |= REQ_FLUSH_SEQ; | 416 | rq->cmd_flags |= REQ_FLUSH_SEQ; |
413 | rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ | 417 | rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ |
414 | if (q->mq_ops) { | 418 | if (q->mq_ops) { |
415 | struct blk_flush_queue *fq = blk_get_flush_queue(q); | ||
416 | |||
417 | rq->end_io = mq_flush_data_end_io; | 419 | rq->end_io = mq_flush_data_end_io; |
418 | 420 | ||
419 | spin_lock_irq(&fq->mq_flush_lock); | 421 | spin_lock_irq(&fq->mq_flush_lock); |
420 | blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); | 422 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); |
421 | spin_unlock_irq(&fq->mq_flush_lock); | 423 | spin_unlock_irq(&fq->mq_flush_lock); |
422 | return; | 424 | return; |
423 | } | 425 | } |
424 | rq->end_io = flush_data_end_io; | 426 | rq->end_io = flush_data_end_io; |
425 | 427 | ||
426 | blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); | 428 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); |
427 | } | 429 | } |
428 | 430 | ||
429 | /** | 431 | /** |