aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:02 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:22 -0500
commit0a5a7d0e32be6643b881f0e7cd9d0d06fadde27a (patch)
tree271f62b5f75c239831c7def1c445a6e990366730 /block/blk-throttle.c
parent2a7f124414b35645049e9c1b125a6f0b470aa5ae (diff)
blkcg: update blkg get functions take blkio_cgroup as parameter
In both blkg get functions - throtl_get_tg() and cfq_get_cfqg(), instead of obtaining blkcg of %current explicitly, let the caller specify the blkcg to use as parameter and make both functions hold on to the blkcg. This is part of block cgroup interface cleanup and will help making blkcg API more modular. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 9beaac7fb397..c252df9169db 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -303,21 +303,23 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
303 return tg; 303 return tg;
304} 304}
305 305
306static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 306static struct throtl_grp *throtl_get_tg(struct throtl_data *td,
307 struct blkio_cgroup *blkcg)
307{ 308{
308 struct throtl_grp *tg = NULL, *__tg = NULL; 309 struct throtl_grp *tg = NULL, *__tg = NULL;
309 struct blkio_cgroup *blkcg;
310 struct request_queue *q = td->queue; 310 struct request_queue *q = td->queue;
311 311
312 /* no throttling for dead queue */ 312 /* no throttling for dead queue */
313 if (unlikely(blk_queue_bypass(q))) 313 if (unlikely(blk_queue_bypass(q)))
314 return NULL; 314 return NULL;
315 315
316 blkcg = task_blkio_cgroup(current);
317 tg = throtl_find_tg(td, blkcg); 316 tg = throtl_find_tg(td, blkcg);
318 if (tg) 317 if (tg)
319 return tg; 318 return tg;
320 319
320 if (!css_tryget(&blkcg->css))
321 return NULL;
322
321 /* 323 /*
322 * Need to allocate a group. Allocation of group also needs allocation 324 * Need to allocate a group. Allocation of group also needs allocation
323 * of per cpu stats which in-turn takes a mutex() and can block. Hence 325 * of per cpu stats which in-turn takes a mutex() and can block. Hence
@@ -331,6 +333,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
331 /* Group allocated and queue is still alive. take the lock */ 333 /* Group allocated and queue is still alive. take the lock */
332 rcu_read_lock(); 334 rcu_read_lock();
333 spin_lock_irq(q->queue_lock); 335 spin_lock_irq(q->queue_lock);
336 css_put(&blkcg->css);
334 337
335 /* Make sure @q is still alive */ 338 /* Make sure @q is still alive */
336 if (unlikely(blk_queue_bypass(q))) { 339 if (unlikely(blk_queue_bypass(q))) {
@@ -339,11 +342,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
339 } 342 }
340 343
341 /* 344 /*
342 * Initialize the new group. After sleeping, read the blkcg again.
343 */
344 blkcg = task_blkio_cgroup(current);
345
346 /*
347 * If some other thread already allocated the group while we were 345 * If some other thread already allocated the group while we were
348 * not holding queue lock, free up the group 346 * not holding queue lock, free up the group
349 */ 347 */
@@ -1163,7 +1161,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1163 * IO group 1161 * IO group
1164 */ 1162 */
1165 spin_lock_irq(q->queue_lock); 1163 spin_lock_irq(q->queue_lock);
1166 tg = throtl_get_tg(td); 1164 tg = throtl_get_tg(td, blkcg);
1167 if (unlikely(!tg)) 1165 if (unlikely(!tg))
1168 goto out_unlock; 1166 goto out_unlock;
1169 1167