diff options
author | Vivek Goyal <vgoyal@redhat.com> | 2009-12-03 12:59:47 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-12-03 13:28:52 -0500 |
commit | b1c3576961847da26c91b1e97f226bb66be5fa3f (patch) | |
tree | e228525220031232463f9cbbe017bad67807e6d4 /block/blk-cgroup.c | |
parent | 25fb5169d4c9d4255107abbb7c08ab712434efc8 (diff) |
blkio: Take care of cgroup deletion and cfq group reference counting
o One can choose to change elevator or delete a cgroup. Implement group
reference counting so that both elevator exit and cgroup deletion can
take place gracefully.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Nauman Rafique <nauman@google.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r-- | block/blk-cgroup.c | 66 |
1 files changed, 64 insertions, 2 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 4f6afd76ec59..0426ab692fd5 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -13,6 +13,8 @@ | |||
13 | #include <linux/ioprio.h> | 13 | #include <linux/ioprio.h> |
14 | #include "blk-cgroup.h" | 14 | #include "blk-cgroup.h" |
15 | 15 | ||
16 | extern void cfq_unlink_blkio_group(void *, struct blkio_group *); | ||
17 | |||
16 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; | 18 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; |
17 | 19 | ||
18 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) | 20 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) |
@@ -28,14 +30,43 @@ void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | |||
28 | 30 | ||
29 | spin_lock_irqsave(&blkcg->lock, flags); | 31 | spin_lock_irqsave(&blkcg->lock, flags); |
30 | rcu_assign_pointer(blkg->key, key); | 32 | rcu_assign_pointer(blkg->key, key); |
33 | blkg->blkcg_id = css_id(&blkcg->css); | ||
31 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | 34 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
32 | spin_unlock_irqrestore(&blkcg->lock, flags); | 35 | spin_unlock_irqrestore(&blkcg->lock, flags); |
33 | } | 36 | } |
34 | 37 | ||
38 | static void __blkiocg_del_blkio_group(struct blkio_group *blkg) | ||
39 | { | ||
40 | hlist_del_init_rcu(&blkg->blkcg_node); | ||
41 | blkg->blkcg_id = 0; | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1 | ||
46 | * indicating that blk_group was unhashed by the time we got to it. | ||
47 | */ | ||
35 | int blkiocg_del_blkio_group(struct blkio_group *blkg) | 48 | int blkiocg_del_blkio_group(struct blkio_group *blkg) |
36 | { | 49 | { |
37 | /* Implemented later */ | 50 | struct blkio_cgroup *blkcg; |
38 | return 0; | 51 | unsigned long flags; |
52 | struct cgroup_subsys_state *css; | ||
53 | int ret = 1; | ||
54 | |||
55 | rcu_read_lock(); | ||
56 | css = css_lookup(&blkio_subsys, blkg->blkcg_id); | ||
57 | if (!css) | ||
58 | goto out; | ||
59 | |||
60 | blkcg = container_of(css, struct blkio_cgroup, css); | ||
61 | spin_lock_irqsave(&blkcg->lock, flags); | ||
62 | if (!hlist_unhashed(&blkg->blkcg_node)) { | ||
63 | __blkiocg_del_blkio_group(blkg); | ||
64 | ret = 0; | ||
65 | } | ||
66 | spin_unlock_irqrestore(&blkcg->lock, flags); | ||
67 | out: | ||
68 | rcu_read_unlock(); | ||
69 | return ret; | ||
39 | } | 70 | } |
40 | 71 | ||
41 | /* called under rcu_read_lock(). */ | 72 | /* called under rcu_read_lock(). */ |
@@ -97,8 +128,39 @@ static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |||
97 | static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) | 128 | static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) |
98 | { | 129 | { |
99 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | 130 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); |
131 | unsigned long flags; | ||
132 | struct blkio_group *blkg; | ||
133 | void *key; | ||
100 | 134 | ||
135 | rcu_read_lock(); | ||
136 | remove_entry: | ||
137 | spin_lock_irqsave(&blkcg->lock, flags); | ||
138 | |||
139 | if (hlist_empty(&blkcg->blkg_list)) { | ||
140 | spin_unlock_irqrestore(&blkcg->lock, flags); | ||
141 | goto done; | ||
142 | } | ||
143 | |||
144 | blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, | ||
145 | blkcg_node); | ||
146 | key = rcu_dereference(blkg->key); | ||
147 | __blkiocg_del_blkio_group(blkg); | ||
148 | |||
149 | spin_unlock_irqrestore(&blkcg->lock, flags); | ||
150 | |||
151 | /* | ||
152 | * This blkio_group is being unlinked as associated cgroup is going | ||
153 | * away. Let all the IO controlling policies know about this event. | ||
154 | * | ||
155 | * Currently this is static call to one io controlling policy. Once | ||
156 | * we have more policies in place, we need some dynamic registration | ||
157 | * of callback function. | ||
158 | */ | ||
159 | cfq_unlink_blkio_group(key, blkg); | ||
160 | goto remove_entry; | ||
161 | done: | ||
101 | free_css_id(&blkio_subsys, &blkcg->css); | 162 | free_css_id(&blkio_subsys, &blkcg->css); |
163 | rcu_read_unlock(); | ||
102 | kfree(blkcg); | 164 | kfree(blkcg); |
103 | } | 165 | } |
104 | 166 | ||