diff options
author | Ming Lei <tom.leiming@gmail.com> | 2013-12-26 08:31:35 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-12-31 11:53:05 -0500 |
commit | 43a5e4e21964a6efb4d14a34644ec7109d0ae891 (patch) | |
tree | 9ff635ec990583c0877d4056841d35e6018825a1 /block/blk-mq.c | |
parent | b28bc9b38c52f63f43e3fd875af982f2240a2859 (diff) |
block: blk-mq: support draining mq queue
blk_mq_drain_queue() is introduced so that we can drain
mq queue inside blk_cleanup_queue().
Also don't accept new requests any more if queue is marked
as dying.
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 43 |
1 files changed, 27 insertions, 16 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3929f43d0b03..e2f811cba417 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -106,10 +106,13 @@ static int blk_mq_queue_enter(struct request_queue *q) | |||
106 | 106 | ||
107 | spin_lock_irq(q->queue_lock); | 107 | spin_lock_irq(q->queue_lock); |
108 | ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq, | 108 | ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq, |
109 | !blk_queue_bypass(q), *q->queue_lock); | 109 | !blk_queue_bypass(q) || blk_queue_dying(q), |
110 | *q->queue_lock); | ||
110 | /* inc usage with lock hold to avoid freeze_queue runs here */ | 111 | /* inc usage with lock hold to avoid freeze_queue runs here */ |
111 | if (!ret) | 112 | if (!ret && !blk_queue_dying(q)) |
112 | __percpu_counter_add(&q->mq_usage_counter, 1, 1000000); | 113 | __percpu_counter_add(&q->mq_usage_counter, 1, 1000000); |
114 | else if (blk_queue_dying(q)) | ||
115 | ret = -ENODEV; | ||
113 | spin_unlock_irq(q->queue_lock); | 116 | spin_unlock_irq(q->queue_lock); |
114 | 117 | ||
115 | return ret; | 118 | return ret; |
@@ -120,6 +123,22 @@ static void blk_mq_queue_exit(struct request_queue *q) | |||
120 | __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); | 123 | __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); |
121 | } | 124 | } |
122 | 125 | ||
126 | static void __blk_mq_drain_queue(struct request_queue *q) | ||
127 | { | ||
128 | while (true) { | ||
129 | s64 count; | ||
130 | |||
131 | spin_lock_irq(q->queue_lock); | ||
132 | count = percpu_counter_sum(&q->mq_usage_counter); | ||
133 | spin_unlock_irq(q->queue_lock); | ||
134 | |||
135 | if (count == 0) | ||
136 | break; | ||
137 | blk_mq_run_queues(q, false); | ||
138 | msleep(10); | ||
139 | } | ||
140 | } | ||
141 | |||
123 | /* | 142 | /* |
124 | * Guarantee no request is in use, so we can change any data structure of | 143 | * Guarantee no request is in use, so we can change any data structure of |
125 | * the queue afterward. | 144 | * the queue afterward. |
@@ -133,21 +152,13 @@ static void blk_mq_freeze_queue(struct request_queue *q) | |||
133 | queue_flag_set(QUEUE_FLAG_BYPASS, q); | 152 | queue_flag_set(QUEUE_FLAG_BYPASS, q); |
134 | spin_unlock_irq(q->queue_lock); | 153 | spin_unlock_irq(q->queue_lock); |
135 | 154 | ||
136 | if (!drain) | 155 | if (drain) |
137 | return; | 156 | __blk_mq_drain_queue(q); |
138 | 157 | } | |
139 | while (true) { | ||
140 | s64 count; | ||
141 | |||
142 | spin_lock_irq(q->queue_lock); | ||
143 | count = percpu_counter_sum(&q->mq_usage_counter); | ||
144 | spin_unlock_irq(q->queue_lock); | ||
145 | 158 | ||
146 | if (count == 0) | 159 | void blk_mq_drain_queue(struct request_queue *q) |
147 | break; | 160 | { |
148 | blk_mq_run_queues(q, false); | 161 | __blk_mq_drain_queue(q); |
149 | msleep(10); | ||
150 | } | ||
151 | } | 162 | } |
152 | 163 | ||
153 | static void blk_mq_unfreeze_queue(struct request_queue *q) | 164 | static void blk_mq_unfreeze_queue(struct request_queue *q) |