aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@canonical.com>2014-09-25 11:23:38 -0400
committerJens Axboe <axboe@fb.com>2014-09-25 17:22:32 -0400
commit08e98fc6016c890c2f4ffba6decc0ca9d2d5d7f8 (patch)
tree1f852a126a0f5282cd9c39f4540cfb1821f6d18d /block
parentfe052529e465daff25225aac769828baa88b7252 (diff)
blk-mq: handle failure path for initializing hctx
Failure of initializing one hctx isn't handled, so this patch introduces blk_mq_init_hctx() and its pair to handle it explicitly. Also this patch makes code cleaner. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@canonical.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c114
1 files changed, 69 insertions, 45 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a3a80884ed95..66ef1fb79326 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1509,6 +1509,20 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
1509 return NOTIFY_OK; 1509 return NOTIFY_OK;
1510} 1510}
1511 1511
1512static void blk_mq_exit_hctx(struct request_queue *q,
1513 struct blk_mq_tag_set *set,
1514 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1515{
1516 blk_mq_tag_idle(hctx);
1517
1518 if (set->ops->exit_hctx)
1519 set->ops->exit_hctx(hctx, hctx_idx);
1520
1521 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1522 kfree(hctx->ctxs);
1523 blk_mq_free_bitmap(&hctx->ctx_map);
1524}
1525
1512static void blk_mq_exit_hw_queues(struct request_queue *q, 1526static void blk_mq_exit_hw_queues(struct request_queue *q,
1513 struct blk_mq_tag_set *set, int nr_queue) 1527 struct blk_mq_tag_set *set, int nr_queue)
1514{ 1528{
@@ -1518,17 +1532,8 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
1518 queue_for_each_hw_ctx(q, hctx, i) { 1532 queue_for_each_hw_ctx(q, hctx, i) {
1519 if (i == nr_queue) 1533 if (i == nr_queue)
1520 break; 1534 break;
1521 1535 blk_mq_exit_hctx(q, set, hctx, i);
1522 blk_mq_tag_idle(hctx);
1523
1524 if (set->ops->exit_hctx)
1525 set->ops->exit_hctx(hctx, i);
1526
1527 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1528 kfree(hctx->ctxs);
1529 blk_mq_free_bitmap(&hctx->ctx_map);
1530 } 1536 }
1531
1532} 1537}
1533 1538
1534static void blk_mq_free_hw_queues(struct request_queue *q, 1539static void blk_mq_free_hw_queues(struct request_queue *q,
@@ -1543,53 +1548,72 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
1543 } 1548 }
1544} 1549}
1545 1550
1546static int blk_mq_init_hw_queues(struct request_queue *q, 1551static int blk_mq_init_hctx(struct request_queue *q,
1547 struct blk_mq_tag_set *set) 1552 struct blk_mq_tag_set *set,
1553 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1548{ 1554{
1549 struct blk_mq_hw_ctx *hctx; 1555 int node;
1550 unsigned int i; 1556
1557 node = hctx->numa_node;
1558 if (node == NUMA_NO_NODE)
1559 node = hctx->numa_node = set->numa_node;
1560
1561 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1562 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1563 spin_lock_init(&hctx->lock);
1564 INIT_LIST_HEAD(&hctx->dispatch);
1565 hctx->queue = q;
1566 hctx->queue_num = hctx_idx;
1567 hctx->flags = set->flags;
1568 hctx->cmd_size = set->cmd_size;
1569
1570 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1571 blk_mq_hctx_notify, hctx);
1572 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1573
1574 hctx->tags = set->tags[hctx_idx];
1551 1575
1552 /* 1576 /*
1553 * Initialize hardware queues 1577 * Allocate space for all possible cpus to avoid allocation at
1578 * runtime
1554 */ 1579 */
1555 queue_for_each_hw_ctx(q, hctx, i) { 1580 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1556 int node; 1581 GFP_KERNEL, node);
1582 if (!hctx->ctxs)
1583 goto unregister_cpu_notifier;
1557 1584
1558 node = hctx->numa_node; 1585 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1559 if (node == NUMA_NO_NODE) 1586 goto free_ctxs;
1560 node = hctx->numa_node = set->numa_node;
1561 1587
1562 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 1588 hctx->nr_ctx = 0;
1563 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1564 spin_lock_init(&hctx->lock);
1565 INIT_LIST_HEAD(&hctx->dispatch);
1566 hctx->queue = q;
1567 hctx->queue_num = i;
1568 hctx->flags = set->flags;
1569 hctx->cmd_size = set->cmd_size;
1570 1589
1571 blk_mq_init_cpu_notifier(&hctx->cpu_notifier, 1590 if (set->ops->init_hctx &&
1572 blk_mq_hctx_notify, hctx); 1591 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1573 blk_mq_register_cpu_notifier(&hctx->cpu_notifier); 1592 goto free_bitmap;
1574 1593
1575 hctx->tags = set->tags[i]; 1594 return 0;
1576 1595
1577 /* 1596 free_bitmap:
1578 * Allocate space for all possible cpus to avoid allocation at 1597 blk_mq_free_bitmap(&hctx->ctx_map);
1579 * runtime 1598 free_ctxs:
1580 */ 1599 kfree(hctx->ctxs);
1581 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), 1600 unregister_cpu_notifier:
1582 GFP_KERNEL, node); 1601 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1583 if (!hctx->ctxs)
1584 break;
1585 1602
1586 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node)) 1603 return -1;
1587 break; 1604}
1588 1605
1589 hctx->nr_ctx = 0; 1606static int blk_mq_init_hw_queues(struct request_queue *q,
1607 struct blk_mq_tag_set *set)
1608{
1609 struct blk_mq_hw_ctx *hctx;
1610 unsigned int i;
1590 1611
1591 if (set->ops->init_hctx && 1612 /*
1592 set->ops->init_hctx(hctx, set->driver_data, i)) 1613 * Initialize hardware queues
1614 */
1615 queue_for_each_hw_ctx(q, hctx, i) {
1616 if (blk_mq_init_hctx(q, set, hctx, i))
1593 break; 1617 break;
1594 } 1618 }
1595 1619