aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c8
1 files changed, 1 insertions, 7 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f3f495ea4ee..ecba5fcef20 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -324,12 +324,8 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
324 /* 324 /*
325 * Need to allocate a group. Allocation of group also needs allocation 325 * Need to allocate a group. Allocation of group also needs allocation
326 * of per cpu stats which in-turn takes a mutex() and can block. Hence 326 * of per cpu stats which in-turn takes a mutex() and can block. Hence
327 * we need to drop rcu lock and queue_lock before we call alloc 327 * we need to drop rcu lock and queue_lock before we call alloc.
328 *
329 * Take the request queue reference to make sure queue does not
330 * go away once we return from allocation.
331 */ 328 */
332 blk_get_queue(q);
333 rcu_read_unlock(); 329 rcu_read_unlock();
334 spin_unlock_irq(q->queue_lock); 330 spin_unlock_irq(q->queue_lock);
335 331
@@ -339,13 +335,11 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
339 * dead 335 * dead
340 */ 336 */
341 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 337 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
342 blk_put_queue(q);
343 if (tg) 338 if (tg)
344 kfree(tg); 339 kfree(tg);
345 340
346 return ERR_PTR(-ENODEV); 341 return ERR_PTR(-ENODEV);
347 } 342 }
348 blk_put_queue(q);
349 343
350 /* Group allocated and queue is still alive. take the lock */ 344 /* Group allocated and queue is still alive. take the lock */
351 spin_lock_irq(q->queue_lock); 345 spin_lock_irq(q->queue_lock);