aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-05-23 06:16:21 -0400
committerJens Axboe <axboe@kernel.dk>2012-05-23 06:16:21 -0400
commitff26eaadf4d914e397872b99885d45756104e9ae (patch)
tree5020f3e4a14ab68f6d027366e719b7fb5193123c /block/blk-throttle.c
parent0b7877d4eea3f93e3dd941999522bbd8c538cb53 (diff)
blkcg: tg_stats_alloc_lock is an irq lock
tg_stats_alloc_lock nests inside queue lock and should always be held with irq disabled. throtl_pd_{init|exit}() were using non-irqsafe spinlock ops which triggered inverse lock ordering via irq warning via RCU freeing of blkg invoking throtl_pd_exit() w/o disabling IRQ. Update both functions to use irq safe operations. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Sasha Levin <sasha.levin@oracle.com> LKML-Reference: <1335339396.16988.80.camel@lappy> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 14dedecfc7e8..5b0659512047 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -219,6 +219,7 @@ alloc_stats:
219static void throtl_pd_init(struct blkcg_gq *blkg) 219static void throtl_pd_init(struct blkcg_gq *blkg)
220{ 220{
221 struct throtl_grp *tg = blkg_to_tg(blkg); 221 struct throtl_grp *tg = blkg_to_tg(blkg);
222 unsigned long flags;
222 223
223 RB_CLEAR_NODE(&tg->rb_node); 224 RB_CLEAR_NODE(&tg->rb_node);
224 bio_list_init(&tg->bio_lists[0]); 225 bio_list_init(&tg->bio_lists[0]);
@@ -235,19 +236,20 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
235 * but percpu allocator can't be called from IO path. Queue tg on 236 * but percpu allocator can't be called from IO path. Queue tg on
236 * tg_stats_alloc_list and allocate from work item. 237 * tg_stats_alloc_list and allocate from work item.
237 */ 238 */
238 spin_lock(&tg_stats_alloc_lock); 239 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
239 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); 240 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
240 queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0); 241 queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
241 spin_unlock(&tg_stats_alloc_lock); 242 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
242} 243}
243 244
244static void throtl_pd_exit(struct blkcg_gq *blkg) 245static void throtl_pd_exit(struct blkcg_gq *blkg)
245{ 246{
246 struct throtl_grp *tg = blkg_to_tg(blkg); 247 struct throtl_grp *tg = blkg_to_tg(blkg);
248 unsigned long flags;
247 249
248 spin_lock(&tg_stats_alloc_lock); 250 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
249 list_del_init(&tg->stats_alloc_node); 251 list_del_init(&tg->stats_alloc_node);
250 spin_unlock(&tg_stats_alloc_lock); 252 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
251 253
252 free_percpu(tg->stats_cpu); 254 free_percpu(tg->stats_cpu);
253} 255}