diff options
author | Ming Lei <ming.lei@canonical.com> | 2015-04-20 22:00:20 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-04-23 12:27:38 -0400 |
commit | 2a34c0872adf252f23a6fef2d051a169ac796cef (patch) | |
tree | d820359eae469febd262ebfb87811fec8db215c2 /block | |
parent | f054b56c951bf1731ba7314a4c7f1cc0b2977cc9 (diff) |
blk-mq: fix CPU hotplug handling
hctx->tags has to be set as NULL in case that it is to be unmapped
no matter if set->tags[hctx->queue_num] is NULL or not in blk_mq_map_swqueue()
because shared tags can be freed already from another request queue.
The same situation has to be considered during handling CPU online too.
Unmapped hw queue can be remapped after CPU topo is changed, so we need
to allocate tags for the hw queue in blk_mq_map_swqueue(). Then tags
allocation for hw queue can be removed in hctx cpu online notifier, and it
is reasonable to do that after mapping is updated.
Cc: <stable@vger.kernel.org>
Reported-by: Dongsu Park <dongsu.park@profitbricks.com>
Tested-by: Dongsu Park <dongsu.park@profitbricks.com>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 34 |
1 files changed, 13 insertions, 21 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 1fccb98aa28f..76f460e36f1d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1574,22 +1574,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) | |||
1574 | return NOTIFY_OK; | 1574 | return NOTIFY_OK; |
1575 | } | 1575 | } |
1576 | 1576 | ||
1577 | static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu) | ||
1578 | { | ||
1579 | struct request_queue *q = hctx->queue; | ||
1580 | struct blk_mq_tag_set *set = q->tag_set; | ||
1581 | |||
1582 | if (set->tags[hctx->queue_num]) | ||
1583 | return NOTIFY_OK; | ||
1584 | |||
1585 | set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num); | ||
1586 | if (!set->tags[hctx->queue_num]) | ||
1587 | return NOTIFY_STOP; | ||
1588 | |||
1589 | hctx->tags = set->tags[hctx->queue_num]; | ||
1590 | return NOTIFY_OK; | ||
1591 | } | ||
1592 | |||
1593 | static int blk_mq_hctx_notify(void *data, unsigned long action, | 1577 | static int blk_mq_hctx_notify(void *data, unsigned long action, |
1594 | unsigned int cpu) | 1578 | unsigned int cpu) |
1595 | { | 1579 | { |
@@ -1597,8 +1581,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action, | |||
1597 | 1581 | ||
1598 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) | 1582 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) |
1599 | return blk_mq_hctx_cpu_offline(hctx, cpu); | 1583 | return blk_mq_hctx_cpu_offline(hctx, cpu); |
1600 | else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) | 1584 | |
1601 | return blk_mq_hctx_cpu_online(hctx, cpu); | 1585 | /* |
1586 | * In case of CPU online, tags may be reallocated | ||
1587 | * in blk_mq_map_swqueue() after mapping is updated. | ||
1588 | */ | ||
1602 | 1589 | ||
1603 | return NOTIFY_OK; | 1590 | return NOTIFY_OK; |
1604 | } | 1591 | } |
@@ -1778,6 +1765,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1778 | unsigned int i; | 1765 | unsigned int i; |
1779 | struct blk_mq_hw_ctx *hctx; | 1766 | struct blk_mq_hw_ctx *hctx; |
1780 | struct blk_mq_ctx *ctx; | 1767 | struct blk_mq_ctx *ctx; |
1768 | struct blk_mq_tag_set *set = q->tag_set; | ||
1781 | 1769 | ||
1782 | queue_for_each_hw_ctx(q, hctx, i) { | 1770 | queue_for_each_hw_ctx(q, hctx, i) { |
1783 | cpumask_clear(hctx->cpumask); | 1771 | cpumask_clear(hctx->cpumask); |
@@ -1806,16 +1794,20 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1806 | * disable it and free the request entries. | 1794 | * disable it and free the request entries. |
1807 | */ | 1795 | */ |
1808 | if (!hctx->nr_ctx) { | 1796 | if (!hctx->nr_ctx) { |
1809 | struct blk_mq_tag_set *set = q->tag_set; | ||
1810 | |||
1811 | if (set->tags[i]) { | 1797 | if (set->tags[i]) { |
1812 | blk_mq_free_rq_map(set, set->tags[i], i); | 1798 | blk_mq_free_rq_map(set, set->tags[i], i); |
1813 | set->tags[i] = NULL; | 1799 | set->tags[i] = NULL; |
1814 | hctx->tags = NULL; | ||
1815 | } | 1800 | } |
1801 | hctx->tags = NULL; | ||
1816 | continue; | 1802 | continue; |
1817 | } | 1803 | } |
1818 | 1804 | ||
1805 | /* unmapped hw queue can be remapped after CPU topo changed */ | ||
1806 | if (!set->tags[i]) | ||
1807 | set->tags[i] = blk_mq_init_rq_map(set, i); | ||
1808 | hctx->tags = set->tags[i]; | ||
1809 | WARN_ON(!hctx->tags); | ||
1810 | |||
1819 | /* | 1811 | /* |
1820 | * Set the map size to the number of mapped software queues. | 1812 | * Set the map size to the number of mapped software queues. |
1821 | * This is more accurate and more efficient than looping | 1813 | * This is more accurate and more efficient than looping |