aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-16 16:57:25 -0400
committerJens Axboe <axboe@kernel.dk>2012-04-20 04:06:17 -0400
commit3c798398e393e5f9502dbab2b51e6c25e2e8f2ac (patch)
treed6f638e6a25dec4887e64bcc35b98bc394cb974f /block/blk-cgroup.c
parent36558c8a30e121f97b5852ae33e28081af21bdbf (diff)
blkcg: mass rename of blkcg API
During the recent blkcg cleanup, most of blkcg API has changed to such extent that mass renaming wouldn't cause any noticeable pain. Take the chance and cleanup the naming. * Rename blkio_cgroup to blkcg. * Drop blkio / blkiocg prefixes and consistently use blkcg. * Rename blkio_group to blkcg_gq, which is consistent with io_cq but keep the blkg prefix / variable name. * Rename policy method type and field names to signify they're dealing with policy data. * Rename blkio_policy_type to blkcg_policy. This patch doesn't cause any functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c202
1 files changed, 99 insertions, 103 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 63337024e4d7..997570329517 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -26,39 +26,39 @@
26 26
27static DEFINE_MUTEX(blkcg_pol_mutex); 27static DEFINE_MUTEX(blkcg_pol_mutex);
28 28
29struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; 29struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
30EXPORT_SYMBOL_GPL(blkio_root_cgroup); 30EXPORT_SYMBOL_GPL(blkcg_root);
31 31
32static struct blkio_policy_type *blkio_policy[BLKCG_MAX_POLS]; 32static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
33 33
34struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) 34struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
35{ 35{
36 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), 36 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
37 struct blkio_cgroup, css); 37 struct blkcg, css);
38} 38}
39EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); 39EXPORT_SYMBOL_GPL(cgroup_to_blkcg);
40 40
41static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) 41static struct blkcg *task_blkcg(struct task_struct *tsk)
42{ 42{
43 return container_of(task_subsys_state(tsk, blkio_subsys_id), 43 return container_of(task_subsys_state(tsk, blkio_subsys_id),
44 struct blkio_cgroup, css); 44 struct blkcg, css);
45} 45}
46 46
47struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio) 47struct blkcg *bio_blkcg(struct bio *bio)
48{ 48{
49 if (bio && bio->bi_css) 49 if (bio && bio->bi_css)
50 return container_of(bio->bi_css, struct blkio_cgroup, css); 50 return container_of(bio->bi_css, struct blkcg, css);
51 return task_blkio_cgroup(current); 51 return task_blkcg(current);
52} 52}
53EXPORT_SYMBOL_GPL(bio_blkio_cgroup); 53EXPORT_SYMBOL_GPL(bio_blkcg);
54 54
55static bool blkcg_policy_enabled(struct request_queue *q, 55static bool blkcg_policy_enabled(struct request_queue *q,
56 const struct blkio_policy_type *pol) 56 const struct blkcg_policy *pol)
57{ 57{
58 return pol && test_bit(pol->plid, q->blkcg_pols); 58 return pol && test_bit(pol->plid, q->blkcg_pols);
59} 59}
60 60
61static size_t blkg_pd_size(const struct blkio_policy_type *pol) 61static size_t blkg_pd_size(const struct blkcg_policy *pol)
62{ 62{
63 return sizeof(struct blkg_policy_data) + pol->pdata_size; 63 return sizeof(struct blkg_policy_data) + pol->pdata_size;
64} 64}
@@ -69,7 +69,7 @@ static size_t blkg_pd_size(const struct blkio_policy_type *pol)
69 * 69 *
70 * Free @blkg which may be partially allocated. 70 * Free @blkg which may be partially allocated.
71 */ 71 */
72static void blkg_free(struct blkio_group *blkg) 72static void blkg_free(struct blkcg_gq *blkg)
73{ 73{
74 int i; 74 int i;
75 75
@@ -77,14 +77,14 @@ static void blkg_free(struct blkio_group *blkg)
77 return; 77 return;
78 78
79 for (i = 0; i < BLKCG_MAX_POLS; i++) { 79 for (i = 0; i < BLKCG_MAX_POLS; i++) {
80 struct blkio_policy_type *pol = blkio_policy[i]; 80 struct blkcg_policy *pol = blkcg_policy[i];
81 struct blkg_policy_data *pd = blkg->pd[i]; 81 struct blkg_policy_data *pd = blkg->pd[i];
82 82
83 if (!pd) 83 if (!pd)
84 continue; 84 continue;
85 85
86 if (pol && pol->ops.blkio_exit_group_fn) 86 if (pol && pol->ops.pd_exit_fn)
87 pol->ops.blkio_exit_group_fn(blkg); 87 pol->ops.pd_exit_fn(blkg);
88 88
89 kfree(pd); 89 kfree(pd);
90 } 90 }
@@ -99,10 +99,9 @@ static void blkg_free(struct blkio_group *blkg)
99 * 99 *
100 * Allocate a new blkg assocating @blkcg and @q. 100 * Allocate a new blkg assocating @blkcg and @q.
101 */ 101 */
102static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg, 102static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
103 struct request_queue *q)
104{ 103{
105 struct blkio_group *blkg; 104 struct blkcg_gq *blkg;
106 int i; 105 int i;
107 106
108 /* alloc and init base part */ 107 /* alloc and init base part */
@@ -116,7 +115,7 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
116 blkg->refcnt = 1; 115 blkg->refcnt = 1;
117 116
118 for (i = 0; i < BLKCG_MAX_POLS; i++) { 117 for (i = 0; i < BLKCG_MAX_POLS; i++) {
119 struct blkio_policy_type *pol = blkio_policy[i]; 118 struct blkcg_policy *pol = blkcg_policy[i];
120 struct blkg_policy_data *pd; 119 struct blkg_policy_data *pd;
121 120
122 if (!blkcg_policy_enabled(q, pol)) 121 if (!blkcg_policy_enabled(q, pol))
@@ -135,19 +134,19 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
135 134
136 /* invoke per-policy init */ 135 /* invoke per-policy init */
137 for (i = 0; i < BLKCG_MAX_POLS; i++) { 136 for (i = 0; i < BLKCG_MAX_POLS; i++) {
138 struct blkio_policy_type *pol = blkio_policy[i]; 137 struct blkcg_policy *pol = blkcg_policy[i];
139 138
140 if (blkcg_policy_enabled(blkg->q, pol)) 139 if (blkcg_policy_enabled(blkg->q, pol))
141 pol->ops.blkio_init_group_fn(blkg); 140 pol->ops.pd_init_fn(blkg);
142 } 141 }
143 142
144 return blkg; 143 return blkg;
145} 144}
146 145
147static struct blkio_group *__blkg_lookup(struct blkio_cgroup *blkcg, 146static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
148 struct request_queue *q) 147 struct request_queue *q)
149{ 148{
150 struct blkio_group *blkg; 149 struct blkcg_gq *blkg;
151 struct hlist_node *n; 150 struct hlist_node *n;
152 151
153 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) 152 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
@@ -165,8 +164,7 @@ static struct blkio_group *__blkg_lookup(struct blkio_cgroup *blkcg,
165 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing 164 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
166 * - see blk_queue_bypass_start() for details. 165 * - see blk_queue_bypass_start() for details.
167 */ 166 */
168struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, 167struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
169 struct request_queue *q)
170{ 168{
171 WARN_ON_ONCE(!rcu_read_lock_held()); 169 WARN_ON_ONCE(!rcu_read_lock_held());
172 170
@@ -176,11 +174,11 @@ struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
176} 174}
177EXPORT_SYMBOL_GPL(blkg_lookup); 175EXPORT_SYMBOL_GPL(blkg_lookup);
178 176
179static struct blkio_group *__blkg_lookup_create(struct blkio_cgroup *blkcg, 177static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
180 struct request_queue *q) 178 struct request_queue *q)
181 __releases(q->queue_lock) __acquires(q->queue_lock) 179 __releases(q->queue_lock) __acquires(q->queue_lock)
182{ 180{
183 struct blkio_group *blkg; 181 struct blkcg_gq *blkg;
184 182
185 WARN_ON_ONCE(!rcu_read_lock_held()); 183 WARN_ON_ONCE(!rcu_read_lock_held());
186 lockdep_assert_held(q->queue_lock); 184 lockdep_assert_held(q->queue_lock);
@@ -213,8 +211,8 @@ out:
213 return blkg; 211 return blkg;
214} 212}
215 213
216struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, 214struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
217 struct request_queue *q) 215 struct request_queue *q)
218{ 216{
219 /* 217 /*
220 * This could be the first entry point of blkcg implementation and 218 * This could be the first entry point of blkcg implementation and
@@ -226,10 +224,10 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
226} 224}
227EXPORT_SYMBOL_GPL(blkg_lookup_create); 225EXPORT_SYMBOL_GPL(blkg_lookup_create);
228 226
229static void blkg_destroy(struct blkio_group *blkg) 227static void blkg_destroy(struct blkcg_gq *blkg)
230{ 228{
231 struct request_queue *q = blkg->q; 229 struct request_queue *q = blkg->q;
232 struct blkio_cgroup *blkcg = blkg->blkcg; 230 struct blkcg *blkcg = blkg->blkcg;
233 231
234 lockdep_assert_held(q->queue_lock); 232 lockdep_assert_held(q->queue_lock);
235 lockdep_assert_held(&blkcg->lock); 233 lockdep_assert_held(&blkcg->lock);
@@ -255,12 +253,12 @@ static void blkg_destroy(struct blkio_group *blkg)
255 */ 253 */
256static void blkg_destroy_all(struct request_queue *q) 254static void blkg_destroy_all(struct request_queue *q)
257{ 255{
258 struct blkio_group *blkg, *n; 256 struct blkcg_gq *blkg, *n;
259 257
260 lockdep_assert_held(q->queue_lock); 258 lockdep_assert_held(q->queue_lock);
261 259
262 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 260 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
263 struct blkio_cgroup *blkcg = blkg->blkcg; 261 struct blkcg *blkcg = blkg->blkcg;
264 262
265 spin_lock(&blkcg->lock); 263 spin_lock(&blkcg->lock);
266 blkg_destroy(blkg); 264 blkg_destroy(blkg);
@@ -270,10 +268,10 @@ static void blkg_destroy_all(struct request_queue *q)
270 268
271static void blkg_rcu_free(struct rcu_head *rcu_head) 269static void blkg_rcu_free(struct rcu_head *rcu_head)
272{ 270{
273 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head)); 271 blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
274} 272}
275 273
276void __blkg_release(struct blkio_group *blkg) 274void __blkg_release(struct blkcg_gq *blkg)
277{ 275{
278 /* release the extra blkcg reference this blkg has been holding */ 276 /* release the extra blkcg reference this blkg has been holding */
279 css_put(&blkg->blkcg->css); 277 css_put(&blkg->blkcg->css);
@@ -291,11 +289,11 @@ void __blkg_release(struct blkio_group *blkg)
291} 289}
292EXPORT_SYMBOL_GPL(__blkg_release); 290EXPORT_SYMBOL_GPL(__blkg_release);
293 291
294static int 292static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
295blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) 293 u64 val)
296{ 294{
297 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 295 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
298 struct blkio_group *blkg; 296 struct blkcg_gq *blkg;
299 struct hlist_node *n; 297 struct hlist_node *n;
300 int i; 298 int i;
301 299
@@ -309,11 +307,11 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
309 */ 307 */
310 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 308 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
311 for (i = 0; i < BLKCG_MAX_POLS; i++) { 309 for (i = 0; i < BLKCG_MAX_POLS; i++) {
312 struct blkio_policy_type *pol = blkio_policy[i]; 310 struct blkcg_policy *pol = blkcg_policy[i];
313 311
314 if (blkcg_policy_enabled(blkg->q, pol) && 312 if (blkcg_policy_enabled(blkg->q, pol) &&
315 pol->ops.blkio_reset_group_stats_fn) 313 pol->ops.pd_reset_stats_fn)
316 pol->ops.blkio_reset_group_stats_fn(blkg); 314 pol->ops.pd_reset_stats_fn(blkg);
317 } 315 }
318 } 316 }
319 317
@@ -322,7 +320,7 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
322 return 0; 320 return 0;
323} 321}
324 322
325static const char *blkg_dev_name(struct blkio_group *blkg) 323static const char *blkg_dev_name(struct blkcg_gq *blkg)
326{ 324{
327 /* some drivers (floppy) instantiate a queue w/o disk registered */ 325 /* some drivers (floppy) instantiate a queue w/o disk registered */
328 if (blkg->q->backing_dev_info.dev) 326 if (blkg->q->backing_dev_info.dev)
@@ -347,12 +345,12 @@ static const char *blkg_dev_name(struct blkio_group *blkg)
347 * This is to be used to construct print functions for 345 * This is to be used to construct print functions for
348 * cftype->read_seq_string method. 346 * cftype->read_seq_string method.
349 */ 347 */
350void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, 348void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
351 u64 (*prfill)(struct seq_file *, void *, int), 349 u64 (*prfill)(struct seq_file *, void *, int),
352 const struct blkio_policy_type *pol, int data, 350 const struct blkcg_policy *pol, int data,
353 bool show_total) 351 bool show_total)
354{ 352{
355 struct blkio_group *blkg; 353 struct blkcg_gq *blkg;
356 struct hlist_node *n; 354 struct hlist_node *n;
357 u64 total = 0; 355 u64 total = 0;
358 356
@@ -462,13 +460,12 @@ EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
462 * value. This function returns with RCU read lock and queue lock held and 460 * value. This function returns with RCU read lock and queue lock held and
463 * must be paired with blkg_conf_finish(). 461 * must be paired with blkg_conf_finish().
464 */ 462 */
465int blkg_conf_prep(struct blkio_cgroup *blkcg, 463int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
466 const struct blkio_policy_type *pol, const char *input, 464 const char *input, struct blkg_conf_ctx *ctx)
467 struct blkg_conf_ctx *ctx)
468 __acquires(rcu) __acquires(disk->queue->queue_lock) 465 __acquires(rcu) __acquires(disk->queue->queue_lock)
469{ 466{
470 struct gendisk *disk; 467 struct gendisk *disk;
471 struct blkio_group *blkg; 468 struct blkcg_gq *blkg;
472 unsigned int major, minor; 469 unsigned int major, minor;
473 unsigned long long v; 470 unsigned long long v;
474 int part, ret; 471 int part, ret;
@@ -529,16 +526,16 @@ void blkg_conf_finish(struct blkg_conf_ctx *ctx)
529} 526}
530EXPORT_SYMBOL_GPL(blkg_conf_finish); 527EXPORT_SYMBOL_GPL(blkg_conf_finish);
531 528
532struct cftype blkio_files[] = { 529struct cftype blkcg_files[] = {
533 { 530 {
534 .name = "reset_stats", 531 .name = "reset_stats",
535 .write_u64 = blkiocg_reset_stats, 532 .write_u64 = blkcg_reset_stats,
536 }, 533 },
537 { } /* terminate */ 534 { } /* terminate */
538}; 535};
539 536
540/** 537/**
541 * blkiocg_pre_destroy - cgroup pre_destroy callback 538 * blkcg_pre_destroy - cgroup pre_destroy callback
542 * @cgroup: cgroup of interest 539 * @cgroup: cgroup of interest
543 * 540 *
544 * This function is called when @cgroup is about to go away and responsible 541 * This function is called when @cgroup is about to go away and responsible
@@ -548,15 +545,15 @@ struct cftype blkio_files[] = {
548 * 545 *
549 * This is the blkcg counterpart of ioc_release_fn(). 546 * This is the blkcg counterpart of ioc_release_fn().
550 */ 547 */
551static int blkiocg_pre_destroy(struct cgroup *cgroup) 548static int blkcg_pre_destroy(struct cgroup *cgroup)
552{ 549{
553 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 550 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
554 551
555 spin_lock_irq(&blkcg->lock); 552 spin_lock_irq(&blkcg->lock);
556 553
557 while (!hlist_empty(&blkcg->blkg_list)) { 554 while (!hlist_empty(&blkcg->blkg_list)) {
558 struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first, 555 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
559 struct blkio_group, blkcg_node); 556 struct blkcg_gq, blkcg_node);
560 struct request_queue *q = blkg->q; 557 struct request_queue *q = blkg->q;
561 558
562 if (spin_trylock(q->queue_lock)) { 559 if (spin_trylock(q->queue_lock)) {
@@ -573,22 +570,22 @@ static int blkiocg_pre_destroy(struct cgroup *cgroup)
573 return 0; 570 return 0;
574} 571}
575 572
576static void blkiocg_destroy(struct cgroup *cgroup) 573static void blkcg_destroy(struct cgroup *cgroup)
577{ 574{
578 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 575 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
579 576
580 if (blkcg != &blkio_root_cgroup) 577 if (blkcg != &blkcg_root)
581 kfree(blkcg); 578 kfree(blkcg);
582} 579}
583 580
584static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup) 581static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
585{ 582{
586 static atomic64_t id_seq = ATOMIC64_INIT(0); 583 static atomic64_t id_seq = ATOMIC64_INIT(0);
587 struct blkio_cgroup *blkcg; 584 struct blkcg *blkcg;
588 struct cgroup *parent = cgroup->parent; 585 struct cgroup *parent = cgroup->parent;
589 586
590 if (!parent) { 587 if (!parent) {
591 blkcg = &blkio_root_cgroup; 588 blkcg = &blkcg_root;
592 goto done; 589 goto done;
593 } 590 }
594 591
@@ -656,7 +653,7 @@ void blkcg_exit_queue(struct request_queue *q)
656 * of the main cic data structures. For now we allow a task to change 653 * of the main cic data structures. For now we allow a task to change
657 * its cgroup only if it's the only owner of its ioc. 654 * its cgroup only if it's the only owner of its ioc.
658 */ 655 */
659static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 656static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
660{ 657{
661 struct task_struct *task; 658 struct task_struct *task;
662 struct io_context *ioc; 659 struct io_context *ioc;
@@ -677,12 +674,12 @@ static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
677 674
678struct cgroup_subsys blkio_subsys = { 675struct cgroup_subsys blkio_subsys = {
679 .name = "blkio", 676 .name = "blkio",
680 .create = blkiocg_create, 677 .create = blkcg_create,
681 .can_attach = blkiocg_can_attach, 678 .can_attach = blkcg_can_attach,
682 .pre_destroy = blkiocg_pre_destroy, 679 .pre_destroy = blkcg_pre_destroy,
683 .destroy = blkiocg_destroy, 680 .destroy = blkcg_destroy,
684 .subsys_id = blkio_subsys_id, 681 .subsys_id = blkio_subsys_id,
685 .base_cftypes = blkio_files, 682 .base_cftypes = blkcg_files,
686 .module = THIS_MODULE, 683 .module = THIS_MODULE,
687}; 684};
688EXPORT_SYMBOL_GPL(blkio_subsys); 685EXPORT_SYMBOL_GPL(blkio_subsys);
@@ -704,10 +701,10 @@ EXPORT_SYMBOL_GPL(blkio_subsys);
704 * [un]registerations. Returns 0 on success, -errno on failure. 701 * [un]registerations. Returns 0 on success, -errno on failure.
705 */ 702 */
706int blkcg_activate_policy(struct request_queue *q, 703int blkcg_activate_policy(struct request_queue *q,
707 const struct blkio_policy_type *pol) 704 const struct blkcg_policy *pol)
708{ 705{
709 LIST_HEAD(pds); 706 LIST_HEAD(pds);
710 struct blkio_group *blkg; 707 struct blkcg_gq *blkg;
711 struct blkg_policy_data *pd, *n; 708 struct blkg_policy_data *pd, *n;
712 int cnt = 0, ret; 709 int cnt = 0, ret;
713 710
@@ -720,7 +717,7 @@ int blkcg_activate_policy(struct request_queue *q,
720 spin_lock_irq(q->queue_lock); 717 spin_lock_irq(q->queue_lock);
721 718
722 rcu_read_lock(); 719 rcu_read_lock();
723 blkg = __blkg_lookup_create(&blkio_root_cgroup, q); 720 blkg = __blkg_lookup_create(&blkcg_root, q);
724 rcu_read_unlock(); 721 rcu_read_unlock();
725 722
726 if (IS_ERR(blkg)) { 723 if (IS_ERR(blkg)) {
@@ -764,7 +761,7 @@ int blkcg_activate_policy(struct request_queue *q,
764 761
765 blkg->pd[pol->plid] = pd; 762 blkg->pd[pol->plid] = pd;
766 pd->blkg = blkg; 763 pd->blkg = blkg;
767 pol->ops.blkio_init_group_fn(blkg); 764 pol->ops.pd_init_fn(blkg);
768 765
769 spin_unlock(&blkg->blkcg->lock); 766 spin_unlock(&blkg->blkcg->lock);
770 } 767 }
@@ -790,9 +787,9 @@ EXPORT_SYMBOL_GPL(blkcg_activate_policy);
790 * blkcg_activate_policy(). 787 * blkcg_activate_policy().
791 */ 788 */
792void blkcg_deactivate_policy(struct request_queue *q, 789void blkcg_deactivate_policy(struct request_queue *q,
793 const struct blkio_policy_type *pol) 790 const struct blkcg_policy *pol)
794{ 791{
795 struct blkio_group *blkg; 792 struct blkcg_gq *blkg;
796 793
797 if (!blkcg_policy_enabled(q, pol)) 794 if (!blkcg_policy_enabled(q, pol))
798 return; 795 return;
@@ -810,8 +807,8 @@ void blkcg_deactivate_policy(struct request_queue *q,
810 /* grab blkcg lock too while removing @pd from @blkg */ 807 /* grab blkcg lock too while removing @pd from @blkg */
811 spin_lock(&blkg->blkcg->lock); 808 spin_lock(&blkg->blkcg->lock);
812 809
813 if (pol->ops.blkio_exit_group_fn) 810 if (pol->ops.pd_exit_fn)
814 pol->ops.blkio_exit_group_fn(blkg); 811 pol->ops.pd_exit_fn(blkg);
815 812
816 kfree(blkg->pd[pol->plid]); 813 kfree(blkg->pd[pol->plid]);
817 blkg->pd[pol->plid] = NULL; 814 blkg->pd[pol->plid] = NULL;
@@ -825,14 +822,13 @@ void blkcg_deactivate_policy(struct request_queue *q,
825EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 822EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
826 823
827/** 824/**
828 * blkio_policy_register - register a blkcg policy 825 * blkcg_policy_register - register a blkcg policy
829 * @blkiop: blkcg policy to register 826 * @pol: blkcg policy to register
830 * 827 *
831 * Register @blkiop with blkcg core. Might sleep and @blkiop may be 828 * Register @pol with blkcg core. Might sleep and @pol may be modified on
832 * modified on successful registration. Returns 0 on success and -errno on 829 * successful registration. Returns 0 on success and -errno on failure.
833 * failure.
834 */ 830 */
835int blkio_policy_register(struct blkio_policy_type *blkiop) 831int blkcg_policy_register(struct blkcg_policy *pol)
836{ 832{
837 int i, ret; 833 int i, ret;
838 834
@@ -841,45 +837,45 @@ int blkio_policy_register(struct blkio_policy_type *blkiop)
841 /* find an empty slot */ 837 /* find an empty slot */
842 ret = -ENOSPC; 838 ret = -ENOSPC;
843 for (i = 0; i < BLKCG_MAX_POLS; i++) 839 for (i = 0; i < BLKCG_MAX_POLS; i++)
844 if (!blkio_policy[i]) 840 if (!blkcg_policy[i])
845 break; 841 break;
846 if (i >= BLKCG_MAX_POLS) 842 if (i >= BLKCG_MAX_POLS)
847 goto out_unlock; 843 goto out_unlock;
848 844
849 /* register and update blkgs */ 845 /* register and update blkgs */
850 blkiop->plid = i; 846 pol->plid = i;
851 blkio_policy[i] = blkiop; 847 blkcg_policy[i] = pol;
852 848
853 /* everything is in place, add intf files for the new policy */ 849 /* everything is in place, add intf files for the new policy */
854 if (blkiop->cftypes) 850 if (pol->cftypes)
855 WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes)); 851 WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
856 ret = 0; 852 ret = 0;
857out_unlock: 853out_unlock:
858 mutex_unlock(&blkcg_pol_mutex); 854 mutex_unlock(&blkcg_pol_mutex);
859 return ret; 855 return ret;
860} 856}
861EXPORT_SYMBOL_GPL(blkio_policy_register); 857EXPORT_SYMBOL_GPL(blkcg_policy_register);
862 858
863/** 859/**
864 * blkiop_policy_unregister - unregister a blkcg policy 860 * blkcg_policy_unregister - unregister a blkcg policy
865 * @blkiop: blkcg policy to unregister 861 * @pol: blkcg policy to unregister
866 * 862 *
867 * Undo blkio_policy_register(@blkiop). Might sleep. 863 * Undo blkcg_policy_register(@pol). Might sleep.
868 */ 864 */
869void blkio_policy_unregister(struct blkio_policy_type *blkiop) 865void blkcg_policy_unregister(struct blkcg_policy *pol)
870{ 866{
871 mutex_lock(&blkcg_pol_mutex); 867 mutex_lock(&blkcg_pol_mutex);
872 868
873 if (WARN_ON(blkio_policy[blkiop->plid] != blkiop)) 869 if (WARN_ON(blkcg_policy[pol->plid] != pol))
874 goto out_unlock; 870 goto out_unlock;
875 871
876 /* kill the intf files first */ 872 /* kill the intf files first */
877 if (blkiop->cftypes) 873 if (pol->cftypes)
878 cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes); 874 cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
879 875
880 /* unregister and update blkgs */ 876 /* unregister and update blkgs */
881 blkio_policy[blkiop->plid] = NULL; 877 blkcg_policy[pol->plid] = NULL;
882out_unlock: 878out_unlock:
883 mutex_unlock(&blkcg_pol_mutex); 879 mutex_unlock(&blkcg_pol_mutex);
884} 880}
885EXPORT_SYMBOL_GPL(blkio_policy_unregister); 881EXPORT_SYMBOL_GPL(blkcg_policy_unregister);