aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-08-21 05:27:00 -0400
committerIngo Molnar <mingo@kernel.org>2012-08-21 05:27:00 -0400
commitbcada3d4b8c96b8792c2306f363992ca5ab9da42 (patch)
treee420679a5db6ea4e1694eef57f9abb6acac8d4d3 /block/blk-cgroup.c
parent26198c21d1b286a084fe5d514a30bc7e6c712a34 (diff)
parent000078bc3ee69efb1124b8478c7527389a826074 (diff)
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo: * Fix include order for bison/flex-generated C files, from Ben Hutchings * Build fixes and documentation corrections from David Ahern * Group parsing support, from Jiri Olsa * UI/gtk refactorings and improvements from Namhyung Kim * NULL deref fix for perf script, from Namhyung Kim * Assorted cleanups from Robert Richter * Let O= makes handle relative paths, from Steven Rostedt * perf script python fixes, from Feng Tang. * Improve 'perf lock' error message when the needed tracepoints are not present, from David Ahern. * Initial bash completion support, from Frederic Weisbecker * Allow building without libelf, from Namhyung Kim. * Support DWARF CFI based unwind to have callchains when %bp based unwinding is not possible, from Jiri Olsa. * Symbol resolution fixes, while fixing support PPC64 files with an .opt ELF section was the end goal, several fixes for code that handles all architectures and cleanups are included, from Cody Schafer. * Add a description for the JIT interface, from Andi Kleen. * Assorted fixes for Documentation and build in 32 bit, from Robert Richter * Add support for non-tracepoint events in perf script python, from Feng Tang * Cache the libtraceevent event_format associated to each evsel early, so that we avoid relookups, i.e. calling pevent_find_event repeatedly when processing tracepoint events. [ This is to reduce the surface contact with libtraceevents and make clear what is that the perf tools needs from that lib: so far parsing the common and per event fields. ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c139
1 files changed, 90 insertions, 49 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index e7dee617358e..f3b44a65fc7a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -31,27 +31,6 @@ EXPORT_SYMBOL_GPL(blkcg_root);
31 31
32static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 32static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
33 33
34struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
35{
36 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
37 struct blkcg, css);
38}
39EXPORT_SYMBOL_GPL(cgroup_to_blkcg);
40
41static struct blkcg *task_blkcg(struct task_struct *tsk)
42{
43 return container_of(task_subsys_state(tsk, blkio_subsys_id),
44 struct blkcg, css);
45}
46
47struct blkcg *bio_blkcg(struct bio *bio)
48{
49 if (bio && bio->bi_css)
50 return container_of(bio->bi_css, struct blkcg, css);
51 return task_blkcg(current);
52}
53EXPORT_SYMBOL_GPL(bio_blkcg);
54
55static bool blkcg_policy_enabled(struct request_queue *q, 34static bool blkcg_policy_enabled(struct request_queue *q,
56 const struct blkcg_policy *pol) 35 const struct blkcg_policy *pol)
57{ 36{
@@ -84,6 +63,7 @@ static void blkg_free(struct blkcg_gq *blkg)
84 kfree(pd); 63 kfree(pd);
85 } 64 }
86 65
66 blk_exit_rl(&blkg->rl);
87 kfree(blkg); 67 kfree(blkg);
88} 68}
89 69
@@ -91,16 +71,18 @@ static void blkg_free(struct blkcg_gq *blkg)
91 * blkg_alloc - allocate a blkg 71 * blkg_alloc - allocate a blkg
92 * @blkcg: block cgroup the new blkg is associated with 72 * @blkcg: block cgroup the new blkg is associated with
93 * @q: request_queue the new blkg is associated with 73 * @q: request_queue the new blkg is associated with
74 * @gfp_mask: allocation mask to use
94 * 75 *
95 * Allocate a new blkg assocating @blkcg and @q. 76 * Allocate a new blkg assocating @blkcg and @q.
96 */ 77 */
97static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) 78static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
79 gfp_t gfp_mask)
98{ 80{
99 struct blkcg_gq *blkg; 81 struct blkcg_gq *blkg;
100 int i; 82 int i;
101 83
102 /* alloc and init base part */ 84 /* alloc and init base part */
103 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); 85 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
104 if (!blkg) 86 if (!blkg)
105 return NULL; 87 return NULL;
106 88
@@ -109,6 +91,13 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
109 blkg->blkcg = blkcg; 91 blkg->blkcg = blkcg;
110 blkg->refcnt = 1; 92 blkg->refcnt = 1;
111 93
94 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
95 if (blkcg != &blkcg_root) {
96 if (blk_init_rl(&blkg->rl, q, gfp_mask))
97 goto err_free;
98 blkg->rl.blkg = blkg;
99 }
100
112 for (i = 0; i < BLKCG_MAX_POLS; i++) { 101 for (i = 0; i < BLKCG_MAX_POLS; i++) {
113 struct blkcg_policy *pol = blkcg_policy[i]; 102 struct blkcg_policy *pol = blkcg_policy[i];
114 struct blkg_policy_data *pd; 103 struct blkg_policy_data *pd;
@@ -117,11 +106,9 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
117 continue; 106 continue;
118 107
119 /* alloc per-policy data and attach it to blkg */ 108 /* alloc per-policy data and attach it to blkg */
120 pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node); 109 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
121 if (!pd) { 110 if (!pd)
122 blkg_free(blkg); 111 goto err_free;
123 return NULL;
124 }
125 112
126 blkg->pd[i] = pd; 113 blkg->pd[i] = pd;
127 pd->blkg = blkg; 114 pd->blkg = blkg;
@@ -132,6 +119,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
132 } 119 }
133 120
134 return blkg; 121 return blkg;
122
123err_free:
124 blkg_free(blkg);
125 return NULL;
135} 126}
136 127
137static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, 128static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
@@ -175,9 +166,13 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
175} 166}
176EXPORT_SYMBOL_GPL(blkg_lookup); 167EXPORT_SYMBOL_GPL(blkg_lookup);
177 168
169/*
170 * If @new_blkg is %NULL, this function tries to allocate a new one as
171 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
172 */
178static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, 173static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
179 struct request_queue *q) 174 struct request_queue *q,
180 __releases(q->queue_lock) __acquires(q->queue_lock) 175 struct blkcg_gq *new_blkg)
181{ 176{
182 struct blkcg_gq *blkg; 177 struct blkcg_gq *blkg;
183 int ret; 178 int ret;
@@ -189,24 +184,26 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
189 blkg = __blkg_lookup(blkcg, q); 184 blkg = __blkg_lookup(blkcg, q);
190 if (blkg) { 185 if (blkg) {
191 rcu_assign_pointer(blkcg->blkg_hint, blkg); 186 rcu_assign_pointer(blkcg->blkg_hint, blkg);
192 return blkg; 187 goto out_free;
193 } 188 }
194 189
195 /* blkg holds a reference to blkcg */ 190 /* blkg holds a reference to blkcg */
196 if (!css_tryget(&blkcg->css)) 191 if (!css_tryget(&blkcg->css)) {
197 return ERR_PTR(-EINVAL); 192 blkg = ERR_PTR(-EINVAL);
193 goto out_free;
194 }
198 195
199 /* allocate */ 196 /* allocate */
200 ret = -ENOMEM; 197 if (!new_blkg) {
201 blkg = blkg_alloc(blkcg, q); 198 new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
202 if (unlikely(!blkg)) 199 if (unlikely(!new_blkg)) {
203 goto err_put; 200 blkg = ERR_PTR(-ENOMEM);
201 goto out_put;
202 }
203 }
204 blkg = new_blkg;
204 205
205 /* insert */ 206 /* insert */
206 ret = radix_tree_preload(GFP_ATOMIC);
207 if (ret)
208 goto err_free;
209
210 spin_lock(&blkcg->lock); 207 spin_lock(&blkcg->lock);
211 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); 208 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
212 if (likely(!ret)) { 209 if (likely(!ret)) {
@@ -215,15 +212,15 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
215 } 212 }
216 spin_unlock(&blkcg->lock); 213 spin_unlock(&blkcg->lock);
217 214
218 radix_tree_preload_end();
219
220 if (!ret) 215 if (!ret)
221 return blkg; 216 return blkg;
222err_free: 217
223 blkg_free(blkg); 218 blkg = ERR_PTR(ret);
224err_put: 219out_put:
225 css_put(&blkcg->css); 220 css_put(&blkcg->css);
226 return ERR_PTR(ret); 221out_free:
222 blkg_free(new_blkg);
223 return blkg;
227} 224}
228 225
229struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 226struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
@@ -235,7 +232,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
235 */ 232 */
236 if (unlikely(blk_queue_bypass(q))) 233 if (unlikely(blk_queue_bypass(q)))
237 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); 234 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
238 return __blkg_lookup_create(blkcg, q); 235 return __blkg_lookup_create(blkcg, q, NULL);
239} 236}
240EXPORT_SYMBOL_GPL(blkg_lookup_create); 237EXPORT_SYMBOL_GPL(blkg_lookup_create);
241 238
@@ -313,6 +310,38 @@ void __blkg_release(struct blkcg_gq *blkg)
313} 310}
314EXPORT_SYMBOL_GPL(__blkg_release); 311EXPORT_SYMBOL_GPL(__blkg_release);
315 312
313/*
314 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
315 * because the root blkg uses @q->root_rl instead of its own rl.
316 */
317struct request_list *__blk_queue_next_rl(struct request_list *rl,
318 struct request_queue *q)
319{
320 struct list_head *ent;
321 struct blkcg_gq *blkg;
322
323 /*
324 * Determine the current blkg list_head. The first entry is
325 * root_rl which is off @q->blkg_list and mapped to the head.
326 */
327 if (rl == &q->root_rl) {
328 ent = &q->blkg_list;
329 } else {
330 blkg = container_of(rl, struct blkcg_gq, rl);
331 ent = &blkg->q_node;
332 }
333
334 /* walk to the next list_head, skip root blkcg */
335 ent = ent->next;
336 if (ent == &q->root_blkg->q_node)
337 ent = ent->next;
338 if (ent == &q->blkg_list)
339 return NULL;
340
341 blkg = container_of(ent, struct blkcg_gq, q_node);
342 return &blkg->rl;
343}
344
316static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, 345static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
317 u64 val) 346 u64 val)
318{ 347{
@@ -734,24 +763,36 @@ int blkcg_activate_policy(struct request_queue *q,
734 struct blkcg_gq *blkg; 763 struct blkcg_gq *blkg;
735 struct blkg_policy_data *pd, *n; 764 struct blkg_policy_data *pd, *n;
736 int cnt = 0, ret; 765 int cnt = 0, ret;
766 bool preloaded;
737 767
738 if (blkcg_policy_enabled(q, pol)) 768 if (blkcg_policy_enabled(q, pol))
739 return 0; 769 return 0;
740 770
771 /* preallocations for root blkg */
772 blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
773 if (!blkg)
774 return -ENOMEM;
775
776 preloaded = !radix_tree_preload(GFP_KERNEL);
777
741 blk_queue_bypass_start(q); 778 blk_queue_bypass_start(q);
742 779
743 /* make sure the root blkg exists and count the existing blkgs */ 780 /* make sure the root blkg exists and count the existing blkgs */
744 spin_lock_irq(q->queue_lock); 781 spin_lock_irq(q->queue_lock);
745 782
746 rcu_read_lock(); 783 rcu_read_lock();
747 blkg = __blkg_lookup_create(&blkcg_root, q); 784 blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
748 rcu_read_unlock(); 785 rcu_read_unlock();
749 786
787 if (preloaded)
788 radix_tree_preload_end();
789
750 if (IS_ERR(blkg)) { 790 if (IS_ERR(blkg)) {
751 ret = PTR_ERR(blkg); 791 ret = PTR_ERR(blkg);
752 goto out_unlock; 792 goto out_unlock;
753 } 793 }
754 q->root_blkg = blkg; 794 q->root_blkg = blkg;
795 q->root_rl.blkg = blkg;
755 796
756 list_for_each_entry(blkg, &q->blkg_list, q_node) 797 list_for_each_entry(blkg, &q->blkg_list, q_node)
757 cnt++; 798 cnt++;