diff options
author | Bart Van Assche <bart.vanassche@sandisk.com> | 2017-06-20 14:15:38 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-06-20 21:27:14 -0400 |
commit | 073196787727e454e17a96d222ea55eba2000978 (patch) | |
tree | 3b0c7733b6cfe467367abbd59fe2e4d59dac730f /block/blk-mq.c | |
parent | efbeccdb59d666b9c77d505af01097cc0a9d102b (diff) |
blk-mq: Reduce blk_mq_hw_ctx size
Since the srcu structure is rather large (184 bytes on an x86-64
system with kernel debugging disabled), only allocate it if needed.
Reported-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 30 |
1 files changed, 22 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index ca03cd4b263f..3e0cc11b1a90 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -172,7 +172,7 @@ void blk_mq_quiesce_queue(struct request_queue *q) | |||
172 | 172 | ||
173 | queue_for_each_hw_ctx(q, hctx, i) { | 173 | queue_for_each_hw_ctx(q, hctx, i) { |
174 | if (hctx->flags & BLK_MQ_F_BLOCKING) | 174 | if (hctx->flags & BLK_MQ_F_BLOCKING) |
175 | synchronize_srcu(&hctx->queue_rq_srcu); | 175 | synchronize_srcu(hctx->queue_rq_srcu); |
176 | else | 176 | else |
177 | rcu = true; | 177 | rcu = true; |
178 | } | 178 | } |
@@ -1094,9 +1094,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) | |||
1094 | } else { | 1094 | } else { |
1095 | might_sleep(); | 1095 | might_sleep(); |
1096 | 1096 | ||
1097 | srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); | 1097 | srcu_idx = srcu_read_lock(hctx->queue_rq_srcu); |
1098 | blk_mq_sched_dispatch_requests(hctx); | 1098 | blk_mq_sched_dispatch_requests(hctx); |
1099 | srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); | 1099 | srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx); |
1100 | } | 1100 | } |
1101 | } | 1101 | } |
1102 | 1102 | ||
@@ -1505,9 +1505,9 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, | |||
1505 | 1505 | ||
1506 | might_sleep(); | 1506 | might_sleep(); |
1507 | 1507 | ||
1508 | srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); | 1508 | srcu_idx = srcu_read_lock(hctx->queue_rq_srcu); |
1509 | __blk_mq_try_issue_directly(hctx, rq, cookie, true); | 1509 | __blk_mq_try_issue_directly(hctx, rq, cookie, true); |
1510 | srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); | 1510 | srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx); |
1511 | } | 1511 | } |
1512 | } | 1512 | } |
1513 | 1513 | ||
@@ -1853,7 +1853,7 @@ static void blk_mq_exit_hctx(struct request_queue *q, | |||
1853 | set->ops->exit_hctx(hctx, hctx_idx); | 1853 | set->ops->exit_hctx(hctx, hctx_idx); |
1854 | 1854 | ||
1855 | if (hctx->flags & BLK_MQ_F_BLOCKING) | 1855 | if (hctx->flags & BLK_MQ_F_BLOCKING) |
1856 | cleanup_srcu_struct(&hctx->queue_rq_srcu); | 1856 | cleanup_srcu_struct(hctx->queue_rq_srcu); |
1857 | 1857 | ||
1858 | blk_mq_remove_cpuhp(hctx); | 1858 | blk_mq_remove_cpuhp(hctx); |
1859 | blk_free_flush_queue(hctx->fq); | 1859 | blk_free_flush_queue(hctx->fq); |
@@ -1926,7 +1926,7 @@ static int blk_mq_init_hctx(struct request_queue *q, | |||
1926 | goto free_fq; | 1926 | goto free_fq; |
1927 | 1927 | ||
1928 | if (hctx->flags & BLK_MQ_F_BLOCKING) | 1928 | if (hctx->flags & BLK_MQ_F_BLOCKING) |
1929 | init_srcu_struct(&hctx->queue_rq_srcu); | 1929 | init_srcu_struct(hctx->queue_rq_srcu); |
1930 | 1930 | ||
1931 | blk_mq_debugfs_register_hctx(q, hctx); | 1931 | blk_mq_debugfs_register_hctx(q, hctx); |
1932 | 1932 | ||
@@ -2201,6 +2201,20 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
2201 | } | 2201 | } |
2202 | EXPORT_SYMBOL(blk_mq_init_queue); | 2202 | EXPORT_SYMBOL(blk_mq_init_queue); |
2203 | 2203 | ||
2204 | static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) | ||
2205 | { | ||
2206 | int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); | ||
2207 | |||
2208 | BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu), | ||
2209 | __alignof__(struct blk_mq_hw_ctx)) != | ||
2210 | sizeof(struct blk_mq_hw_ctx)); | ||
2211 | |||
2212 | if (tag_set->flags & BLK_MQ_F_BLOCKING) | ||
2213 | hw_ctx_size += sizeof(struct srcu_struct); | ||
2214 | |||
2215 | return hw_ctx_size; | ||
2216 | } | ||
2217 | |||
2204 | static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, | 2218 | static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, |
2205 | struct request_queue *q) | 2219 | struct request_queue *q) |
2206 | { | 2220 | { |
@@ -2215,7 +2229,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, | |||
2215 | continue; | 2229 | continue; |
2216 | 2230 | ||
2217 | node = blk_mq_hw_queue_to_node(q->mq_map, i); | 2231 | node = blk_mq_hw_queue_to_node(q->mq_map, i); |
2218 | hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), | 2232 | hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set), |
2219 | GFP_KERNEL, node); | 2233 | GFP_KERNEL, node); |
2220 | if (!hctxs[i]) | 2234 | if (!hctxs[i]) |
2221 | break; | 2235 | break; |