aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-flush.c21
-rw-r--r--block/blk-mq.c50
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk.h16
5 files changed, 54 insertions, 39 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index b1dd4e086740..e1c2775c7597 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -704,7 +704,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
704 if (!q) 704 if (!q)
705 return NULL; 705 return NULL;
706 706
707 q->fq = blk_alloc_flush_queue(q); 707 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0);
708 if (!q->fq) 708 if (!q->fq)
709 return NULL; 709 return NULL;
710 710
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 004d95e4098e..20badd7b9d1b 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -305,8 +305,15 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
305 fq->flush_pending_idx ^= 1; 305 fq->flush_pending_idx ^= 1;
306 306
307 blk_rq_init(q, flush_rq); 307 blk_rq_init(q, flush_rq);
308 if (q->mq_ops) 308
309 blk_mq_clone_flush_request(flush_rq, first_rq); 309 /*
310 * Borrow tag from the first request since they can't
311 * be in flight at the same time.
312 */
313 if (q->mq_ops) {
314 flush_rq->mq_ctx = first_rq->mq_ctx;
315 flush_rq->tag = first_rq->tag;
316 }
310 317
311 flush_rq->cmd_type = REQ_TYPE_FS; 318 flush_rq->cmd_type = REQ_TYPE_FS;
312 flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; 319 flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
@@ -480,22 +487,22 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
480} 487}
481EXPORT_SYMBOL(blkdev_issue_flush); 488EXPORT_SYMBOL(blkdev_issue_flush);
482 489
483struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q) 490struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
491 int node, int cmd_size)
484{ 492{
485 struct blk_flush_queue *fq; 493 struct blk_flush_queue *fq;
486 int rq_sz = sizeof(struct request); 494 int rq_sz = sizeof(struct request);
487 495
488 fq = kzalloc(sizeof(*fq), GFP_KERNEL); 496 fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
489 if (!fq) 497 if (!fq)
490 goto fail; 498 goto fail;
491 499
492 if (q->mq_ops) { 500 if (q->mq_ops) {
493 spin_lock_init(&fq->mq_flush_lock); 501 spin_lock_init(&fq->mq_flush_lock);
494 rq_sz = round_up(rq_sz + q->tag_set->cmd_size, 502 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
495 cache_line_size());
496 } 503 }
497 504
498 fq->flush_rq = kzalloc(rq_sz, GFP_KERNEL); 505 fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
499 if (!fq->flush_rq) 506 if (!fq->flush_rq)
500 goto fail_rq; 507 goto fail_rq;
501 508
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 53b6def12fc4..4e7a31466139 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -281,26 +281,6 @@ void blk_mq_free_request(struct request *rq)
281 __blk_mq_free_request(hctx, ctx, rq); 281 __blk_mq_free_request(hctx, ctx, rq);
282} 282}
283 283
284/*
285 * Clone all relevant state from a request that has been put on hold in
286 * the flush state machine into the preallocated flush request that hangs
287 * off the request queue.
288 *
289 * For a driver the flush request should be invisible, that's why we are
290 * impersonating the original request here.
291 */
292void blk_mq_clone_flush_request(struct request *flush_rq,
293 struct request *orig_rq)
294{
295 struct blk_mq_hw_ctx *hctx =
296 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
297
298 flush_rq->mq_ctx = orig_rq->mq_ctx;
299 flush_rq->tag = orig_rq->tag;
300 memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
301 hctx->cmd_size);
302}
303
304inline void __blk_mq_end_request(struct request *rq, int error) 284inline void __blk_mq_end_request(struct request *rq, int error)
305{ 285{
306 blk_account_io_done(rq); 286 blk_account_io_done(rq);
@@ -1516,12 +1496,20 @@ static void blk_mq_exit_hctx(struct request_queue *q,
1516 struct blk_mq_tag_set *set, 1496 struct blk_mq_tag_set *set,
1517 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 1497 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1518{ 1498{
1499 unsigned flush_start_tag = set->queue_depth;
1500
1519 blk_mq_tag_idle(hctx); 1501 blk_mq_tag_idle(hctx);
1520 1502
1503 if (set->ops->exit_request)
1504 set->ops->exit_request(set->driver_data,
1505 hctx->fq->flush_rq, hctx_idx,
1506 flush_start_tag + hctx_idx);
1507
1521 if (set->ops->exit_hctx) 1508 if (set->ops->exit_hctx)
1522 set->ops->exit_hctx(hctx, hctx_idx); 1509 set->ops->exit_hctx(hctx, hctx_idx);
1523 1510
1524 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); 1511 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1512 blk_free_flush_queue(hctx->fq);
1525 kfree(hctx->ctxs); 1513 kfree(hctx->ctxs);
1526 blk_mq_free_bitmap(&hctx->ctx_map); 1514 blk_mq_free_bitmap(&hctx->ctx_map);
1527} 1515}
@@ -1556,6 +1544,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
1556 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 1544 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1557{ 1545{
1558 int node; 1546 int node;
1547 unsigned flush_start_tag = set->queue_depth;
1559 1548
1560 node = hctx->numa_node; 1549 node = hctx->numa_node;
1561 if (node == NUMA_NO_NODE) 1550 if (node == NUMA_NO_NODE)
@@ -1594,8 +1583,23 @@ static int blk_mq_init_hctx(struct request_queue *q,
1594 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 1583 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1595 goto free_bitmap; 1584 goto free_bitmap;
1596 1585
1586 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1587 if (!hctx->fq)
1588 goto exit_hctx;
1589
1590 if (set->ops->init_request &&
1591 set->ops->init_request(set->driver_data,
1592 hctx->fq->flush_rq, hctx_idx,
1593 flush_start_tag + hctx_idx, node))
1594 goto free_fq;
1595
1597 return 0; 1596 return 0;
1598 1597
1598 free_fq:
1599 kfree(hctx->fq);
1600 exit_hctx:
1601 if (set->ops->exit_hctx)
1602 set->ops->exit_hctx(hctx, hctx_idx);
1599 free_bitmap: 1603 free_bitmap:
1600 blk_mq_free_bitmap(&hctx->ctx_map); 1604 blk_mq_free_bitmap(&hctx->ctx_map);
1601 free_ctxs: 1605 free_ctxs:
@@ -1862,16 +1866,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1862 1866
1863 blk_mq_add_queue_tag_set(set, q); 1867 blk_mq_add_queue_tag_set(set, q);
1864 1868
1865 q->fq = blk_alloc_flush_queue(q);
1866 if (!q->fq)
1867 goto err_hw_queues;
1868
1869 blk_mq_map_swqueue(q); 1869 blk_mq_map_swqueue(q);
1870 1870
1871 return q; 1871 return q;
1872 1872
1873err_hw_queues:
1874 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1875err_hw: 1873err_hw:
1876 blk_cleanup_queue(q); 1874 blk_cleanup_queue(q);
1877err_hctxs: 1875err_hctxs:
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 718cffc4c678..e8f38a36c625 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -517,10 +517,10 @@ static void blk_release_queue(struct kobject *kobj)
517 if (q->queue_tags) 517 if (q->queue_tags)
518 __blk_queue_free_tags(q); 518 __blk_queue_free_tags(q);
519 519
520 blk_free_flush_queue(q->fq);
521
522 if (q->mq_ops) 520 if (q->mq_ops)
523 blk_mq_free_queue(q); 521 blk_mq_free_queue(q);
522 else
523 blk_free_flush_queue(q->fq);
524 524
525 blk_trace_shutdown(q); 525 blk_trace_shutdown(q);
526 526
diff --git a/block/blk.h b/block/blk.h
index 7ecdd8517e69..43b036185712 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -2,6 +2,8 @@
2#define BLK_INTERNAL_H 2#define BLK_INTERNAL_H
3 3
4#include <linux/idr.h> 4#include <linux/idr.h>
5#include <linux/blk-mq.h>
6#include "blk-mq.h"
5 7
6/* Amount of time in which a process may batch requests */ 8/* Amount of time in which a process may batch requests */
7#define BLK_BATCH_TIME (HZ/50UL) 9#define BLK_BATCH_TIME (HZ/50UL)
@@ -31,7 +33,14 @@ extern struct ida blk_queue_ida;
31static inline struct blk_flush_queue *blk_get_flush_queue( 33static inline struct blk_flush_queue *blk_get_flush_queue(
32 struct request_queue *q, struct blk_mq_ctx *ctx) 34 struct request_queue *q, struct blk_mq_ctx *ctx)
33{ 35{
34 return q->fq; 36 struct blk_mq_hw_ctx *hctx;
37
38 if (!q->mq_ops)
39 return q->fq;
40
41 hctx = q->mq_ops->map_queue(q, ctx->cpu);
42
43 return hctx->fq;
35} 44}
36 45
37static inline void __blk_get_queue(struct request_queue *q) 46static inline void __blk_get_queue(struct request_queue *q)
@@ -39,8 +48,9 @@ static inline void __blk_get_queue(struct request_queue *q)
39 kobject_get(&q->kobj); 48 kobject_get(&q->kobj);
40} 49}
41 50
42struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q); 51struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
43void blk_free_flush_queue(struct blk_flush_queue *fq); 52 int node, int cmd_size);
53void blk_free_flush_queue(struct blk_flush_queue *q);
44 54
45int blk_init_rl(struct request_list *rl, struct request_queue *q, 55int blk_init_rl(struct request_list *rl, struct request_queue *q,
46 gfp_t gfp_mask); 56 gfp_t gfp_mask);