aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:02 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:22 -0500
commit0a5a7d0e32be6643b881f0e7cd9d0d06fadde27a (patch)
tree271f62b5f75c239831c7def1c445a6e990366730 /block
parent2a7f124414b35645049e9c1b125a6f0b470aa5ae (diff)
blkcg: update blkg get functions take blkio_cgroup as parameter
In both blkg get functions - throtl_get_tg() and cfq_get_cfqg(), instead of obtaining blkcg of %current explicitly, let the caller specify the blkcg to use as parameter and make both functions hold on to the blkcg. This is part of block cgroup interface cleanup and will help making blkcg API more modular. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-throttle.c16
-rw-r--r--block/cfq-iosched.c20
2 files changed, 19 insertions, 17 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 9beaac7fb397..c252df9169db 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -303,21 +303,23 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
303 return tg; 303 return tg;
304} 304}
305 305
306static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 306static struct throtl_grp *throtl_get_tg(struct throtl_data *td,
307 struct blkio_cgroup *blkcg)
307{ 308{
308 struct throtl_grp *tg = NULL, *__tg = NULL; 309 struct throtl_grp *tg = NULL, *__tg = NULL;
309 struct blkio_cgroup *blkcg;
310 struct request_queue *q = td->queue; 310 struct request_queue *q = td->queue;
311 311
312 /* no throttling for dead queue */ 312 /* no throttling for dead queue */
313 if (unlikely(blk_queue_bypass(q))) 313 if (unlikely(blk_queue_bypass(q)))
314 return NULL; 314 return NULL;
315 315
316 blkcg = task_blkio_cgroup(current);
317 tg = throtl_find_tg(td, blkcg); 316 tg = throtl_find_tg(td, blkcg);
318 if (tg) 317 if (tg)
319 return tg; 318 return tg;
320 319
320 if (!css_tryget(&blkcg->css))
321 return NULL;
322
321 /* 323 /*
322 * Need to allocate a group. Allocation of group also needs allocation 324 * Need to allocate a group. Allocation of group also needs allocation
323 * of per cpu stats which in-turn takes a mutex() and can block. Hence 325 * of per cpu stats which in-turn takes a mutex() and can block. Hence
@@ -331,6 +333,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
331 /* Group allocated and queue is still alive. take the lock */ 333 /* Group allocated and queue is still alive. take the lock */
332 rcu_read_lock(); 334 rcu_read_lock();
333 spin_lock_irq(q->queue_lock); 335 spin_lock_irq(q->queue_lock);
336 css_put(&blkcg->css);
334 337
335 /* Make sure @q is still alive */ 338 /* Make sure @q is still alive */
336 if (unlikely(blk_queue_bypass(q))) { 339 if (unlikely(blk_queue_bypass(q))) {
@@ -339,11 +342,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
339 } 342 }
340 343
341 /* 344 /*
342 * Initialize the new group. After sleeping, read the blkcg again.
343 */
344 blkcg = task_blkio_cgroup(current);
345
346 /*
347 * If some other thread already allocated the group while we were 345 * If some other thread already allocated the group while we were
348 * not holding queue lock, free up the group 346 * not holding queue lock, free up the group
349 */ 347 */
@@ -1163,7 +1161,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1163 * IO group 1161 * IO group
1164 */ 1162 */
1165 spin_lock_irq(q->queue_lock); 1163 spin_lock_irq(q->queue_lock);
1166 tg = throtl_get_tg(td); 1164 tg = throtl_get_tg(td, blkcg);
1167 if (unlikely(!tg)) 1165 if (unlikely(!tg))
1168 goto out_unlock; 1166 goto out_unlock;
1169 1167
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 6063c4482b86..0f7a81fc7c73 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1122,17 +1122,19 @@ cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
1122 * Search for the cfq group current task belongs to. request_queue lock must 1122 * Search for the cfq group current task belongs to. request_queue lock must
1123 * be held. 1123 * be held.
1124 */ 1124 */
1125static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) 1125static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd,
1126 struct blkio_cgroup *blkcg)
1126{ 1127{
1127 struct blkio_cgroup *blkcg;
1128 struct cfq_group *cfqg = NULL, *__cfqg = NULL; 1128 struct cfq_group *cfqg = NULL, *__cfqg = NULL;
1129 struct request_queue *q = cfqd->queue; 1129 struct request_queue *q = cfqd->queue;
1130 1130
1131 blkcg = task_blkio_cgroup(current);
1132 cfqg = cfq_find_cfqg(cfqd, blkcg); 1131 cfqg = cfq_find_cfqg(cfqd, blkcg);
1133 if (cfqg) 1132 if (cfqg)
1134 return cfqg; 1133 return cfqg;
1135 1134
1135 if (!css_tryget(&blkcg->css))
1136 return NULL;
1137
1136 /* 1138 /*
1137 * Need to allocate a group. Allocation of group also needs allocation 1139 * Need to allocate a group. Allocation of group also needs allocation
1138 * of per cpu stats which in-turn takes a mutex() and can block. Hence 1140 * of per cpu stats which in-turn takes a mutex() and can block. Hence
@@ -1142,16 +1144,14 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1142 * around by the time we return. CFQ queue allocation code does 1144 * around by the time we return. CFQ queue allocation code does
1143 * the same. It might be racy though. 1145 * the same. It might be racy though.
1144 */ 1146 */
1145
1146 rcu_read_unlock(); 1147 rcu_read_unlock();
1147 spin_unlock_irq(q->queue_lock); 1148 spin_unlock_irq(q->queue_lock);
1148 1149
1149 cfqg = cfq_alloc_cfqg(cfqd); 1150 cfqg = cfq_alloc_cfqg(cfqd);
1150 1151
1151 spin_lock_irq(q->queue_lock); 1152 spin_lock_irq(q->queue_lock);
1152
1153 rcu_read_lock(); 1153 rcu_read_lock();
1154 blkcg = task_blkio_cgroup(current); 1154 css_put(&blkcg->css);
1155 1155
1156 /* 1156 /*
1157 * If some other thread already allocated the group while we were 1157 * If some other thread already allocated the group while we were
@@ -1278,7 +1278,8 @@ static bool cfq_clear_queue(struct request_queue *q)
1278} 1278}
1279 1279
1280#else /* GROUP_IOSCHED */ 1280#else /* GROUP_IOSCHED */
1281static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) 1281static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd,
1282 struct blkio_cgroup *blkcg)
1282{ 1283{
1283 return &cfqd->root_group; 1284 return &cfqd->root_group;
1284} 1285}
@@ -2860,6 +2861,7 @@ static struct cfq_queue *
2860cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, 2861cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2861 struct io_context *ioc, gfp_t gfp_mask) 2862 struct io_context *ioc, gfp_t gfp_mask)
2862{ 2863{
2864 struct blkio_cgroup *blkcg;
2863 struct cfq_queue *cfqq, *new_cfqq = NULL; 2865 struct cfq_queue *cfqq, *new_cfqq = NULL;
2864 struct cfq_io_cq *cic; 2866 struct cfq_io_cq *cic;
2865 struct cfq_group *cfqg; 2867 struct cfq_group *cfqg;
@@ -2867,7 +2869,9 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2867retry: 2869retry:
2868 rcu_read_lock(); 2870 rcu_read_lock();
2869 2871
2870 cfqg = cfq_get_cfqg(cfqd); 2872 blkcg = task_blkio_cgroup(current);
2873
2874 cfqg = cfq_get_cfqg(cfqd, blkcg);
2871 cic = cfq_cic_lookup(cfqd, ioc); 2875 cic = cfq_cic_lookup(cfqd, ioc);
2872 /* cic always exists here */ 2876 /* cic always exists here */
2873 cfqq = cic_to_cfqq(cic, is_sync); 2877 cfqq = cic_to_cfqq(cic, is_sync);