diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 2 | ||||
-rw-r--r-- | block/blk-flush.c | 11 | ||||
-rw-r--r-- | block/blk-mq.c | 3 | ||||
-rw-r--r-- | block/blk.h | 4 |
4 files changed, 10 insertions, 10 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index dba0a8350807..b1dd4e086740 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -390,7 +390,7 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all) | |||
390 | * be drained. Check all the queues and counters. | 390 | * be drained. Check all the queues and counters. |
391 | */ | 391 | */ |
392 | if (drain_all) { | 392 | if (drain_all) { |
393 | struct blk_flush_queue *fq = blk_get_flush_queue(q); | 393 | struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); |
394 | drain |= !list_empty(&q->queue_head); | 394 | drain |= !list_empty(&q->queue_head); |
395 | for (i = 0; i < 2; i++) { | 395 | for (i = 0; i < 2; i++) { |
396 | drain |= q->nr_rqs[i]; | 396 | drain |= q->nr_rqs[i]; |
diff --git a/block/blk-flush.c b/block/blk-flush.c index 9bc5b4f35c23..004d95e4098e 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -223,7 +223,7 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
223 | bool queued = false; | 223 | bool queued = false; |
224 | struct request *rq, *n; | 224 | struct request *rq, *n; |
225 | unsigned long flags = 0; | 225 | unsigned long flags = 0; |
226 | struct blk_flush_queue *fq = blk_get_flush_queue(q); | 226 | struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); |
227 | 227 | ||
228 | if (q->mq_ops) { | 228 | if (q->mq_ops) { |
229 | spin_lock_irqsave(&fq->mq_flush_lock, flags); | 229 | spin_lock_irqsave(&fq->mq_flush_lock, flags); |
@@ -319,7 +319,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) | |||
319 | static void flush_data_end_io(struct request *rq, int error) | 319 | static void flush_data_end_io(struct request *rq, int error) |
320 | { | 320 | { |
321 | struct request_queue *q = rq->q; | 321 | struct request_queue *q = rq->q; |
322 | struct blk_flush_queue *fq = blk_get_flush_queue(q); | 322 | struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); |
323 | 323 | ||
324 | /* | 324 | /* |
325 | * After populating an empty queue, kick it to avoid stall. Read | 325 | * After populating an empty queue, kick it to avoid stall. Read |
@@ -333,11 +333,10 @@ static void mq_flush_data_end_io(struct request *rq, int error) | |||
333 | { | 333 | { |
334 | struct request_queue *q = rq->q; | 334 | struct request_queue *q = rq->q; |
335 | struct blk_mq_hw_ctx *hctx; | 335 | struct blk_mq_hw_ctx *hctx; |
336 | struct blk_mq_ctx *ctx; | 336 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
337 | unsigned long flags; | 337 | unsigned long flags; |
338 | struct blk_flush_queue *fq = blk_get_flush_queue(q); | 338 | struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); |
339 | 339 | ||
340 | ctx = rq->mq_ctx; | ||
341 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 340 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
342 | 341 | ||
343 | /* | 342 | /* |
@@ -367,7 +366,7 @@ void blk_insert_flush(struct request *rq) | |||
367 | struct request_queue *q = rq->q; | 366 | struct request_queue *q = rq->q; |
368 | unsigned int fflags = q->flush_flags; /* may change, cache */ | 367 | unsigned int fflags = q->flush_flags; /* may change, cache */ |
369 | unsigned int policy = blk_flush_policy(fflags, rq); | 368 | unsigned int policy = blk_flush_policy(fflags, rq); |
370 | struct blk_flush_queue *fq = blk_get_flush_queue(q); | 369 | struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); |
371 | 370 | ||
372 | /* | 371 | /* |
373 | * @policy now records what operations need to be done. Adjust | 372 | * @policy now records what operations need to be done. Adjust |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 59ca79634cb9..53b6def12fc4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -518,7 +518,8 @@ static inline bool is_flush_request(struct request *rq, | |||
518 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) | 518 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) |
519 | { | 519 | { |
520 | struct request *rq = tags->rqs[tag]; | 520 | struct request *rq = tags->rqs[tag]; |
521 | struct blk_flush_queue *fq = blk_get_flush_queue(rq->q); | 521 | /* mq_ctx of flush rq is always cloned from the corresponding req */ |
522 | struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx); | ||
522 | 523 | ||
523 | if (!is_flush_request(rq, fq, tag)) | 524 | if (!is_flush_request(rq, fq, tag)) |
524 | return rq; | 525 | return rq; |
diff --git a/block/blk.h b/block/blk.h index 9eaa6e91b13f..7ecdd8517e69 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -29,7 +29,7 @@ extern struct kobj_type blk_queue_ktype; | |||
29 | extern struct ida blk_queue_ida; | 29 | extern struct ida blk_queue_ida; |
30 | 30 | ||
31 | static inline struct blk_flush_queue *blk_get_flush_queue( | 31 | static inline struct blk_flush_queue *blk_get_flush_queue( |
32 | struct request_queue *q) | 32 | struct request_queue *q, struct blk_mq_ctx *ctx) |
33 | { | 33 | { |
34 | return q->fq; | 34 | return q->fq; |
35 | } | 35 | } |
@@ -106,7 +106,7 @@ void blk_insert_flush(struct request *rq); | |||
106 | static inline struct request *__elv_next_request(struct request_queue *q) | 106 | static inline struct request *__elv_next_request(struct request_queue *q) |
107 | { | 107 | { |
108 | struct request *rq; | 108 | struct request *rq; |
109 | struct blk_flush_queue *fq = blk_get_flush_queue(q); | 109 | struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); |
110 | 110 | ||
111 | while (1) { | 111 | while (1) { |
112 | if (!list_empty(&q->queue_head)) { | 112 | if (!list_empty(&q->queue_head)) { |