aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-14 16:52:31 -0400
committerTejun Heo <tj@kernel.org>2013-05-14 16:52:31 -0400
commit2a4fd070ee8561d918a3776388331bb7e92ea59e (patch)
tree53eb9c6df1ea0e02cb3f3442fb85f9e25a4691ed /block/blk-cgroup.h
parentdb61367038dcd222476881cb09fd54661b3cd508 (diff)
blkcg: move bulk of blkcg_gq release operations to the RCU callback
Currently, when the last reference of a blkcg_gq is put, all then release operations sans the actual freeing happen directly in blkg_put(). As blkg_put() may be called under queue_lock, all pd_exit_fn()s may be too. This makes it impossible for pd_exit_fn()s to use del_timer_sync() on timers which grab the queue_lock which is an irq-safe lock due to the deadlock possibility described in the comment on top of del_timer_sync(). This can be easily avoided by perfoming the release operations in the RCU callback instead of directly from blkg_put(). This patch moves the blkcg_gq release operations to the RCU callback. As this leaves __blkg_release() with only call_rcu() invocation, blkg_rcu_free() is renamed to __blkg_release_rcu(), exported and call_rcu() invocation is now done directly from blkg_put() instead of going through __blkg_release() which is removed. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block/blk-cgroup.h')
-rw-r--r--block/blk-cgroup.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index e15f731d2cdd..8056c03a3382 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -266,7 +266,7 @@ static inline void blkg_get(struct blkcg_gq *blkg)
266 blkg->refcnt++; 266 blkg->refcnt++;
267} 267}
268 268
269void __blkg_release(struct blkcg_gq *blkg); 269void __blkg_release_rcu(struct rcu_head *rcu);
270 270
271/** 271/**
272 * blkg_put - put a blkg reference 272 * blkg_put - put a blkg reference
@@ -279,7 +279,7 @@ static inline void blkg_put(struct blkcg_gq *blkg)
279 lockdep_assert_held(blkg->q->queue_lock); 279 lockdep_assert_held(blkg->q->queue_lock);
280 WARN_ON_ONCE(blkg->refcnt <= 0); 280 WARN_ON_ONCE(blkg->refcnt <= 0);
281 if (!--blkg->refcnt) 281 if (!--blkg->refcnt)
282 __blkg_release(blkg); 282 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
283} 283}
284 284
285struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, 285struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,