aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:01 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:22 -0500
commit2a7f124414b35645049e9c1b125a6f0b470aa5ae (patch)
tree298d9bc310dc46baed69baf88f083c6db35f0964
parent72e06c255181537d0b3e1f657a9ed81655d745b1 (diff)
blkcg: move rcu_read_lock() outside of blkio_group get functions
rcu_read_lock() in throtl_get_tb() and cfq_get_cfqg() holds onto @blkcg while looking up blkg. For API cleanup, the next patch will make the caller responsible for determining @blkcg to look blkg from and let them specify it as a parameter. Move rcu read locking out to the callers to prepare for the change. -v2: Originally this patch was described as a fix for RCU read locking bug around @blkg, which Vivek pointed out to be incorrect. It was from misunderstanding the role of rcu locking as protecting @blkg not @blkcg. Patch description updated. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-throttle.c18
-rw-r--r--block/cfq-iosched.c11
2 files changed, 11 insertions, 18 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 3699ab40d494..9beaac7fb397 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -313,25 +313,23 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
313 if (unlikely(blk_queue_bypass(q))) 313 if (unlikely(blk_queue_bypass(q)))
314 return NULL; 314 return NULL;
315 315
316 rcu_read_lock();
317 blkcg = task_blkio_cgroup(current); 316 blkcg = task_blkio_cgroup(current);
318 tg = throtl_find_tg(td, blkcg); 317 tg = throtl_find_tg(td, blkcg);
319 if (tg) { 318 if (tg)
320 rcu_read_unlock();
321 return tg; 319 return tg;
322 }
323 320
324 /* 321 /*
325 * Need to allocate a group. Allocation of group also needs allocation 322 * Need to allocate a group. Allocation of group also needs allocation
326 * of per cpu stats which in-turn takes a mutex() and can block. Hence 323 * of per cpu stats which in-turn takes a mutex() and can block. Hence
327 * we need to drop rcu lock and queue_lock before we call alloc. 324 * we need to drop rcu lock and queue_lock before we call alloc.
328 */ 325 */
329 rcu_read_unlock();
330 spin_unlock_irq(q->queue_lock); 326 spin_unlock_irq(q->queue_lock);
327 rcu_read_unlock();
331 328
332 tg = throtl_alloc_tg(td); 329 tg = throtl_alloc_tg(td);
333 330
334 /* Group allocated and queue is still alive. take the lock */ 331 /* Group allocated and queue is still alive. take the lock */
332 rcu_read_lock();
335 spin_lock_irq(q->queue_lock); 333 spin_lock_irq(q->queue_lock);
336 334
337 /* Make sure @q is still alive */ 335 /* Make sure @q is still alive */
@@ -343,7 +341,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
343 /* 341 /*
344 * Initialize the new group. After sleeping, read the blkcg again. 342 * Initialize the new group. After sleeping, read the blkcg again.
345 */ 343 */
346 rcu_read_lock();
347 blkcg = task_blkio_cgroup(current); 344 blkcg = task_blkio_cgroup(current);
348 345
349 /* 346 /*
@@ -354,7 +351,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
354 351
355 if (__tg) { 352 if (__tg) {
356 kfree(tg); 353 kfree(tg);
357 rcu_read_unlock();
358 return __tg; 354 return __tg;
359 } 355 }
360 356
@@ -365,7 +361,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
365 } 361 }
366 362
367 throtl_init_add_tg_lists(td, tg, blkcg); 363 throtl_init_add_tg_lists(td, tg, blkcg);
368 rcu_read_unlock();
369 return tg; 364 return tg;
370} 365}
371 366
@@ -1150,7 +1145,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1150 * basic fields like stats and io rates. If a group has no rules, 1145 * basic fields like stats and io rates. If a group has no rules,
1151 * just update the dispatch stats in lockless manner and return. 1146 * just update the dispatch stats in lockless manner and return.
1152 */ 1147 */
1153
1154 rcu_read_lock(); 1148 rcu_read_lock();
1155 blkcg = task_blkio_cgroup(current); 1149 blkcg = task_blkio_cgroup(current);
1156 tg = throtl_find_tg(td, blkcg); 1150 tg = throtl_find_tg(td, blkcg);
@@ -1160,11 +1154,9 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1160 if (tg_no_rule_group(tg, rw)) { 1154 if (tg_no_rule_group(tg, rw)) {
1161 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, 1155 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1162 rw, rw_is_sync(bio->bi_rw)); 1156 rw, rw_is_sync(bio->bi_rw));
1163 rcu_read_unlock(); 1157 goto out_unlock_rcu;
1164 goto out;
1165 } 1158 }
1166 } 1159 }
1167 rcu_read_unlock();
1168 1160
1169 /* 1161 /*
1170 * Either group has not been allocated yet or it is not an unlimited 1162 * Either group has not been allocated yet or it is not an unlimited
@@ -1222,6 +1214,8 @@ queue_bio:
1222 1214
1223out_unlock: 1215out_unlock:
1224 spin_unlock_irq(q->queue_lock); 1216 spin_unlock_irq(q->queue_lock);
1217out_unlock_rcu:
1218 rcu_read_unlock();
1225out: 1219out:
1226 return throttled; 1220 return throttled;
1227} 1221}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 61693d3404d0..6063c4482b86 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1128,13 +1128,10 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1128 struct cfq_group *cfqg = NULL, *__cfqg = NULL; 1128 struct cfq_group *cfqg = NULL, *__cfqg = NULL;
1129 struct request_queue *q = cfqd->queue; 1129 struct request_queue *q = cfqd->queue;
1130 1130
1131 rcu_read_lock();
1132 blkcg = task_blkio_cgroup(current); 1131 blkcg = task_blkio_cgroup(current);
1133 cfqg = cfq_find_cfqg(cfqd, blkcg); 1132 cfqg = cfq_find_cfqg(cfqd, blkcg);
1134 if (cfqg) { 1133 if (cfqg)
1135 rcu_read_unlock();
1136 return cfqg; 1134 return cfqg;
1137 }
1138 1135
1139 /* 1136 /*
1140 * Need to allocate a group. Allocation of group also needs allocation 1137 * Need to allocate a group. Allocation of group also needs allocation
@@ -1164,7 +1161,6 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1164 1161
1165 if (__cfqg) { 1162 if (__cfqg) {
1166 kfree(cfqg); 1163 kfree(cfqg);
1167 rcu_read_unlock();
1168 return __cfqg; 1164 return __cfqg;
1169 } 1165 }
1170 1166
@@ -1172,7 +1168,6 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1172 cfqg = &cfqd->root_group; 1168 cfqg = &cfqd->root_group;
1173 1169
1174 cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg); 1170 cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
1175 rcu_read_unlock();
1176 return cfqg; 1171 return cfqg;
1177} 1172}
1178 1173
@@ -2870,6 +2865,8 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2870 struct cfq_group *cfqg; 2865 struct cfq_group *cfqg;
2871 2866
2872retry: 2867retry:
2868 rcu_read_lock();
2869
2873 cfqg = cfq_get_cfqg(cfqd); 2870 cfqg = cfq_get_cfqg(cfqd);
2874 cic = cfq_cic_lookup(cfqd, ioc); 2871 cic = cfq_cic_lookup(cfqd, ioc);
2875 /* cic always exists here */ 2872 /* cic always exists here */
@@ -2885,6 +2882,7 @@ retry:
2885 cfqq = new_cfqq; 2882 cfqq = new_cfqq;
2886 new_cfqq = NULL; 2883 new_cfqq = NULL;
2887 } else if (gfp_mask & __GFP_WAIT) { 2884 } else if (gfp_mask & __GFP_WAIT) {
2885 rcu_read_unlock();
2888 spin_unlock_irq(cfqd->queue->queue_lock); 2886 spin_unlock_irq(cfqd->queue->queue_lock);
2889 new_cfqq = kmem_cache_alloc_node(cfq_pool, 2887 new_cfqq = kmem_cache_alloc_node(cfq_pool,
2890 gfp_mask | __GFP_ZERO, 2888 gfp_mask | __GFP_ZERO,
@@ -2910,6 +2908,7 @@ retry:
2910 if (new_cfqq) 2908 if (new_cfqq)
2911 kmem_cache_free(cfq_pool, new_cfqq); 2909 kmem_cache_free(cfq_pool, new_cfqq);
2912 2910
2911 rcu_read_unlock();
2913 return cfqq; 2912 return cfqq;
2914} 2913}
2915 2914