aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 9beaac7fb39..c252df9169d 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -303,21 +303,23 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
303 return tg; 303 return tg;
304} 304}
305 305
306static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 306static struct throtl_grp *throtl_get_tg(struct throtl_data *td,
307 struct blkio_cgroup *blkcg)
307{ 308{
308 struct throtl_grp *tg = NULL, *__tg = NULL; 309 struct throtl_grp *tg = NULL, *__tg = NULL;
309 struct blkio_cgroup *blkcg;
310 struct request_queue *q = td->queue; 310 struct request_queue *q = td->queue;
311 311
312 /* no throttling for dead queue */ 312 /* no throttling for dead queue */
313 if (unlikely(blk_queue_bypass(q))) 313 if (unlikely(blk_queue_bypass(q)))
314 return NULL; 314 return NULL;
315 315
316 blkcg = task_blkio_cgroup(current);
317 tg = throtl_find_tg(td, blkcg); 316 tg = throtl_find_tg(td, blkcg);
318 if (tg) 317 if (tg)
319 return tg; 318 return tg;
320 319
320 if (!css_tryget(&blkcg->css))
321 return NULL;
322
321 /* 323 /*
322 * Need to allocate a group. Allocation of group also needs allocation 324 * Need to allocate a group. Allocation of group also needs allocation
323 * of per cpu stats which in-turn takes a mutex() and can block. Hence 325 * of per cpu stats which in-turn takes a mutex() and can block. Hence
@@ -331,6 +333,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
331 /* Group allocated and queue is still alive. take the lock */ 333 /* Group allocated and queue is still alive. take the lock */
332 rcu_read_lock(); 334 rcu_read_lock();
333 spin_lock_irq(q->queue_lock); 335 spin_lock_irq(q->queue_lock);
336 css_put(&blkcg->css);
334 337
335 /* Make sure @q is still alive */ 338 /* Make sure @q is still alive */
336 if (unlikely(blk_queue_bypass(q))) { 339 if (unlikely(blk_queue_bypass(q))) {
@@ -339,11 +342,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
339 } 342 }
340 343
341 /* 344 /*
342 * Initialize the new group. After sleeping, read the blkcg again.
343 */
344 blkcg = task_blkio_cgroup(current);
345
346 /*
347 * If some other thread already allocated the group while we were 345 * If some other thread already allocated the group while we were
348 * not holding queue lock, free up the group 346 * not holding queue lock, free up the group
349 */ 347 */
@@ -1163,7 +1161,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1163 * IO group 1161 * IO group
1164 */ 1162 */
1165 spin_lock_irq(q->queue_lock); 1163 spin_lock_irq(q->queue_lock);
1166 tg = throtl_get_tg(td); 1164 tg = throtl_get_tg(td, blkcg);
1167 if (unlikely(!tg)) 1165 if (unlikely(!tg))
1168 goto out_unlock; 1166 goto out_unlock;
1169 1167