diff options
author | Tejun Heo <tj@kernel.org> | 2014-07-01 12:33:02 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-07-01 12:33:02 -0400 |
commit | 72d6f02a8d4e0dda74de3a541b1c4ae82f5f7b45 (patch) | |
tree | 63e27bc8021a69afa0ee10798aa340c7b0e35a60 /block/blk-mq.c | |
parent | 780db2071ac4d167ee4154ad9c96088f1bba044b (diff) |
blk-mq: collapse __blk_mq_drain_queue() into blk_mq_freeze_queue()
Keeping __blk_mq_drain_queue() as a separate function doesn't buy us
anything and it's gonna be further simplified. Let's flatten it into
its caller.
This patch doesn't make any functional change.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Nicholas A. Bellinger <nab@linux-iscsi.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 23 |
1 files changed, 9 insertions, 14 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 1e324a123d40..22682fb4be65 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -108,8 +108,16 @@ static void blk_mq_queue_exit(struct request_queue *q) | |||
108 | __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); | 108 | __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); |
109 | } | 109 | } |
110 | 110 | ||
111 | void blk_mq_drain_queue(struct request_queue *q) | 111 | /* |
112 | * Guarantee no request is in use, so we can change any data structure of | ||
113 | * the queue afterward. | ||
114 | */ | ||
115 | void blk_mq_freeze_queue(struct request_queue *q) | ||
112 | { | 116 | { |
117 | spin_lock_irq(q->queue_lock); | ||
118 | q->mq_freeze_depth++; | ||
119 | spin_unlock_irq(q->queue_lock); | ||
120 | |||
113 | while (true) { | 121 | while (true) { |
114 | s64 count; | 122 | s64 count; |
115 | 123 | ||
@@ -124,19 +132,6 @@ void blk_mq_drain_queue(struct request_queue *q) | |||
124 | } | 132 | } |
125 | } | 133 | } |
126 | 134 | ||
127 | /* | ||
128 | * Guarantee no request is in use, so we can change any data structure of | ||
129 | * the queue afterward. | ||
130 | */ | ||
131 | void blk_mq_freeze_queue(struct request_queue *q) | ||
132 | { | ||
133 | spin_lock_irq(q->queue_lock); | ||
134 | q->mq_freeze_depth++; | ||
135 | spin_unlock_irq(q->queue_lock); | ||
136 | |||
137 | blk_mq_drain_queue(q); | ||
138 | } | ||
139 | |||
140 | static void blk_mq_unfreeze_queue(struct request_queue *q) | 135 | static void blk_mq_unfreeze_queue(struct request_queue *q) |
141 | { | 136 | { |
142 | bool wake = false; | 137 | bool wake = false; |