diff options
| -rw-r--r-- | block/blk-mq.c | 26 |
1 files changed, 21 insertions, 5 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 7ad7c11fe01d..4bf850e8d6b5 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1870,7 +1870,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, | |||
| 1870 | static void blk_mq_map_swqueue(struct request_queue *q, | 1870 | static void blk_mq_map_swqueue(struct request_queue *q, |
| 1871 | const struct cpumask *online_mask) | 1871 | const struct cpumask *online_mask) |
| 1872 | { | 1872 | { |
| 1873 | unsigned int i; | 1873 | unsigned int i, hctx_idx; |
| 1874 | struct blk_mq_hw_ctx *hctx; | 1874 | struct blk_mq_hw_ctx *hctx; |
| 1875 | struct blk_mq_ctx *ctx; | 1875 | struct blk_mq_ctx *ctx; |
| 1876 | struct blk_mq_tag_set *set = q->tag_set; | 1876 | struct blk_mq_tag_set *set = q->tag_set; |
| @@ -1893,6 +1893,21 @@ static void blk_mq_map_swqueue(struct request_queue *q, | |||
| 1893 | if (!cpumask_test_cpu(i, online_mask)) | 1893 | if (!cpumask_test_cpu(i, online_mask)) |
| 1894 | continue; | 1894 | continue; |
| 1895 | 1895 | ||
| 1896 | hctx_idx = q->mq_map[i]; | ||
| 1897 | /* unmapped hw queue can be remapped after CPU topo changed */ | ||
| 1898 | if (!set->tags[hctx_idx]) { | ||
| 1899 | set->tags[hctx_idx] = blk_mq_init_rq_map(set, hctx_idx); | ||
| 1900 | |||
| 1901 | /* | ||
| 1902 | * If tags initialization fail for some hctx, | ||
| 1903 | * that hctx won't be brought online. In this | ||
| 1904 | * case, remap the current ctx to hctx[0] which | ||
| 1905 | * is guaranteed to always have tags allocated | ||
| 1906 | */ | ||
| 1907 | if (!set->tags[hctx_idx]) | ||
| 1908 | q->mq_map[i] = 0; | ||
| 1909 | } | ||
| 1910 | |||
| 1896 | ctx = per_cpu_ptr(q->queue_ctx, i); | 1911 | ctx = per_cpu_ptr(q->queue_ctx, i); |
| 1897 | hctx = blk_mq_map_queue(q, i); | 1912 | hctx = blk_mq_map_queue(q, i); |
| 1898 | 1913 | ||
| @@ -1909,7 +1924,11 @@ static void blk_mq_map_swqueue(struct request_queue *q, | |||
| 1909 | * disable it and free the request entries. | 1924 | * disable it and free the request entries. |
| 1910 | */ | 1925 | */ |
| 1911 | if (!hctx->nr_ctx) { | 1926 | if (!hctx->nr_ctx) { |
| 1912 | if (set->tags[i]) { | 1927 | /* Never unmap queue 0. We need it as a |
| 1928 | * fallback in case of a new remap fails | ||
| 1929 | * allocation | ||
| 1930 | */ | ||
| 1931 | if (i && set->tags[i]) { | ||
| 1913 | blk_mq_free_rq_map(set, set->tags[i], i); | 1932 | blk_mq_free_rq_map(set, set->tags[i], i); |
| 1914 | set->tags[i] = NULL; | 1933 | set->tags[i] = NULL; |
| 1915 | } | 1934 | } |
| @@ -1917,9 +1936,6 @@ static void blk_mq_map_swqueue(struct request_queue *q, | |||
| 1917 | continue; | 1936 | continue; |
| 1918 | } | 1937 | } |
| 1919 | 1938 | ||
| 1920 | /* unmapped hw queue can be remapped after CPU topo changed */ | ||
| 1921 | if (!set->tags[i]) | ||
| 1922 | set->tags[i] = blk_mq_init_rq_map(set, i); | ||
| 1923 | hctx->tags = set->tags[i]; | 1939 | hctx->tags = set->tags[i]; |
| 1924 | WARN_ON(!hctx->tags); | 1940 | WARN_ON(!hctx->tags); |
| 1925 | 1941 | ||
