aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2016-08-24 17:54:25 -0400
committerJens Axboe <axboe@fb.com>2016-08-29 10:13:21 -0400
commit27489a3c827b7eebba26eda0320bb0f100bef167 (patch)
tree899f2101b78e5a5bab8121686d6f8576a5dcf940 /block
parentee63cfa7fc197b63669623721b8009cce5b0659b (diff)
blk-mq: turn hctx->run_work into a regular work struct
We don't need the larger delayed work struct, since we always run it immediately. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq.c9
2 files changed, 5 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2d08597533a4..34ff8088eebe 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -288,7 +288,7 @@ void blk_sync_queue(struct request_queue *q)
288 int i; 288 int i;
289 289
290 queue_for_each_hw_ctx(q, hctx, i) { 290 queue_for_each_hw_ctx(q, hctx, i) {
291 cancel_delayed_work_sync(&hctx->run_work); 291 cancel_work_sync(&hctx->run_work);
292 cancel_delayed_work_sync(&hctx->delay_work); 292 cancel_delayed_work_sync(&hctx->delay_work);
293 } 293 }
294 } else { 294 } else {
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 13f5a6c1de76..b68fdcbe58f6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -936,8 +936,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
936 put_cpu(); 936 put_cpu();
937 } 937 }
938 938
939 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), 939 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
940 &hctx->run_work, 0);
941} 940}
942 941
943void blk_mq_run_hw_queues(struct request_queue *q, bool async) 942void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -958,7 +957,7 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
958 957
959void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 958void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
960{ 959{
961 cancel_delayed_work(&hctx->run_work); 960 cancel_work(&hctx->run_work);
962 cancel_delayed_work(&hctx->delay_work); 961 cancel_delayed_work(&hctx->delay_work);
963 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 962 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
964} 963}
@@ -1011,7 +1010,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
1011{ 1010{
1012 struct blk_mq_hw_ctx *hctx; 1011 struct blk_mq_hw_ctx *hctx;
1013 1012
1014 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 1013 hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
1015 1014
1016 __blk_mq_run_hw_queue(hctx); 1015 __blk_mq_run_hw_queue(hctx);
1017} 1016}
@@ -1722,7 +1721,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
1722 if (node == NUMA_NO_NODE) 1721 if (node == NUMA_NO_NODE)
1723 node = hctx->numa_node = set->numa_node; 1722 node = hctx->numa_node = set->numa_node;
1724 1723
1725 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 1724 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1726 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); 1725 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1727 spin_lock_init(&hctx->lock); 1726 spin_lock_init(&hctx->lock);
1728 INIT_LIST_HEAD(&hctx->dispatch); 1727 INIT_LIST_HEAD(&hctx->dispatch);