aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:21 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:24 -0500
commit9f13ef678efd977487fc0c2e489f17c9a8c67a3e (patch)
treee58a2dd153ad24b2ea173d5dfb575c507e1f7589 /block/blk-cgroup.h
parente8989fae38d9831c72b20375a206a919ca468c52 (diff)
blkcg: use double locking instead of RCU for blkg synchronization
blkgs are chained from both blkcgs and request_queues and thus subjected to two locks - blkcg->lock and q->queue_lock. As both blkcg and q can go away anytime, locking during removal is tricky. It's currently solved by wrapping removal inside RCU, which makes the synchronization complex. There are three locks to worry about - the outer RCU, q lock and blkcg lock, and it leads to nasty subtle complications like conditional synchronize_rcu() on queue exit paths. For all other paths, blkcg lock is naturally nested inside q lock and the only exception is blkcg removal path, which is a very cold path and can be implemented as clumsy but conceptually-simple reverse double lock dancing. This patch updates blkg removal path such that blkgs are removed while holding both q and blkcg locks, which is trivial for request queue exit path - blkg_destroy_all(). The blkcg removal path, blkiocg_pre_destroy(), implements reverse double lock dancing essentially identical to ioc_release_fn(). This simplifies blkg locking - no half-dead blkgs to worry about. Now unnecessary RCU annotations will be removed by the next patch. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.h')
-rw-r--r--block/blk-cgroup.h4
1 files changed, 0 insertions, 4 deletions
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 6e8ee86a2870..df73040a6a5f 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -380,7 +380,6 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg,
380extern struct blkio_cgroup blkio_root_cgroup; 380extern struct blkio_cgroup blkio_root_cgroup;
381extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); 381extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
382extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk); 382extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
383extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
384extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, 383extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
385 struct request_queue *q); 384 struct request_queue *q);
386struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, 385struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
@@ -416,9 +415,6 @@ cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
416static inline struct blkio_cgroup * 415static inline struct blkio_cgroup *
417task_blkio_cgroup(struct task_struct *tsk) { return NULL; } 416task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
418 417
419static inline int
420blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
421
422static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, 418static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
423 void *key) { return NULL; } 419 void *key) { return NULL; }
424static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, 420static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,