diff options
author | Ming Lei <tom.leiming@gmail.com> | 2013-12-26 08:31:35 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-12-31 11:53:05 -0500 |
commit | 43a5e4e21964a6efb4d14a34644ec7109d0ae891 (patch) | |
tree | 9ff635ec990583c0877d4056841d35e6018825a1 /block | |
parent | b28bc9b38c52f63f43e3fd875af982f2240a2859 (diff) |
block: blk-mq: support draining mq queue
blk_mq_drain_queue() is introduced so that we can drain
mq queue inside blk_cleanup_queue().
Also don't accept new requests any more if queue is marked
as dying.
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 10 | ||||
-rw-r--r-- | block/blk-exec.c | 4 | ||||
-rw-r--r-- | block/blk-mq.c | 43 | ||||
-rw-r--r-- | block/blk-mq.h | 1 |
4 files changed, 40 insertions, 18 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 5da8e900d3b1..accb7fc6ec94 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -38,6 +38,7 @@ | |||
38 | 38 | ||
39 | #include "blk.h" | 39 | #include "blk.h" |
40 | #include "blk-cgroup.h" | 40 | #include "blk-cgroup.h" |
41 | #include "blk-mq.h" | ||
41 | 42 | ||
42 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); | 43 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); |
43 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | 44 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); |
@@ -497,8 +498,13 @@ void blk_cleanup_queue(struct request_queue *q) | |||
497 | * Drain all requests queued before DYING marking. Set DEAD flag to | 498 | * Drain all requests queued before DYING marking. Set DEAD flag to |
498 | * prevent that q->request_fn() gets invoked after draining finished. | 499 | * prevent that q->request_fn() gets invoked after draining finished. |
499 | */ | 500 | */ |
500 | spin_lock_irq(lock); | 501 | if (q->mq_ops) { |
501 | __blk_drain_queue(q, true); | 502 | blk_mq_drain_queue(q); |
503 | spin_lock_irq(lock); | ||
504 | } else { | ||
505 | spin_lock_irq(lock); | ||
506 | __blk_drain_queue(q, true); | ||
507 | } | ||
502 | queue_flag_set(QUEUE_FLAG_DEAD, q); | 508 | queue_flag_set(QUEUE_FLAG_DEAD, q); |
503 | spin_unlock_irq(lock); | 509 | spin_unlock_irq(lock); |
504 | 510 | ||
diff --git a/block/blk-exec.c b/block/blk-exec.c index c3edf9dff566..bbfc072a79c2 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -60,6 +60,10 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
60 | rq->rq_disk = bd_disk; | 60 | rq->rq_disk = bd_disk; |
61 | rq->end_io = done; | 61 | rq->end_io = done; |
62 | 62 | ||
63 | /* | ||
64 | * don't check dying flag for MQ because the request won't | ||
65 | * be resued after dying flag is set | ||
66 | */ | ||
63 | if (q->mq_ops) { | 67 | if (q->mq_ops) { |
64 | blk_mq_insert_request(q, rq, true); | 68 | blk_mq_insert_request(q, rq, true); |
65 | return; | 69 | return; |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3929f43d0b03..e2f811cba417 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -106,10 +106,13 @@ static int blk_mq_queue_enter(struct request_queue *q) | |||
106 | 106 | ||
107 | spin_lock_irq(q->queue_lock); | 107 | spin_lock_irq(q->queue_lock); |
108 | ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq, | 108 | ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq, |
109 | !blk_queue_bypass(q), *q->queue_lock); | 109 | !blk_queue_bypass(q) || blk_queue_dying(q), |
110 | *q->queue_lock); | ||
110 | /* inc usage with lock hold to avoid freeze_queue runs here */ | 111 | /* inc usage with lock hold to avoid freeze_queue runs here */ |
111 | if (!ret) | 112 | if (!ret && !blk_queue_dying(q)) |
112 | __percpu_counter_add(&q->mq_usage_counter, 1, 1000000); | 113 | __percpu_counter_add(&q->mq_usage_counter, 1, 1000000); |
114 | else if (blk_queue_dying(q)) | ||
115 | ret = -ENODEV; | ||
113 | spin_unlock_irq(q->queue_lock); | 116 | spin_unlock_irq(q->queue_lock); |
114 | 117 | ||
115 | return ret; | 118 | return ret; |
@@ -120,6 +123,22 @@ static void blk_mq_queue_exit(struct request_queue *q) | |||
120 | __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); | 123 | __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); |
121 | } | 124 | } |
122 | 125 | ||
126 | static void __blk_mq_drain_queue(struct request_queue *q) | ||
127 | { | ||
128 | while (true) { | ||
129 | s64 count; | ||
130 | |||
131 | spin_lock_irq(q->queue_lock); | ||
132 | count = percpu_counter_sum(&q->mq_usage_counter); | ||
133 | spin_unlock_irq(q->queue_lock); | ||
134 | |||
135 | if (count == 0) | ||
136 | break; | ||
137 | blk_mq_run_queues(q, false); | ||
138 | msleep(10); | ||
139 | } | ||
140 | } | ||
141 | |||
123 | /* | 142 | /* |
124 | * Guarantee no request is in use, so we can change any data structure of | 143 | * Guarantee no request is in use, so we can change any data structure of |
125 | * the queue afterward. | 144 | * the queue afterward. |
@@ -133,21 +152,13 @@ static void blk_mq_freeze_queue(struct request_queue *q) | |||
133 | queue_flag_set(QUEUE_FLAG_BYPASS, q); | 152 | queue_flag_set(QUEUE_FLAG_BYPASS, q); |
134 | spin_unlock_irq(q->queue_lock); | 153 | spin_unlock_irq(q->queue_lock); |
135 | 154 | ||
136 | if (!drain) | 155 | if (drain) |
137 | return; | 156 | __blk_mq_drain_queue(q); |
138 | 157 | } | |
139 | while (true) { | ||
140 | s64 count; | ||
141 | |||
142 | spin_lock_irq(q->queue_lock); | ||
143 | count = percpu_counter_sum(&q->mq_usage_counter); | ||
144 | spin_unlock_irq(q->queue_lock); | ||
145 | 158 | ||
146 | if (count == 0) | 159 | void blk_mq_drain_queue(struct request_queue *q) |
147 | break; | 160 | { |
148 | blk_mq_run_queues(q, false); | 161 | __blk_mq_drain_queue(q); |
149 | msleep(10); | ||
150 | } | ||
151 | } | 162 | } |
152 | 163 | ||
153 | static void blk_mq_unfreeze_queue(struct request_queue *q) | 164 | static void blk_mq_unfreeze_queue(struct request_queue *q) |
diff --git a/block/blk-mq.h b/block/blk-mq.h index 52bf1f96a2c2..caa614f24409 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -27,6 +27,7 @@ void blk_mq_complete_request(struct request *rq, int error); | |||
27 | void blk_mq_run_request(struct request *rq, bool run_queue, bool async); | 27 | void blk_mq_run_request(struct request *rq, bool run_queue, bool async); |
28 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); | 28 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
29 | void blk_mq_init_flush(struct request_queue *q); | 29 | void blk_mq_init_flush(struct request_queue *q); |
30 | void blk_mq_drain_queue(struct request_queue *q); | ||
30 | 31 | ||
31 | /* | 32 | /* |
32 | * CPU hotplug helpers | 33 | * CPU hotplug helpers |