aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-16 16:57:25 -0400
committerJens Axboe <axboe@kernel.dk>2012-04-20 04:06:17 -0400
commit3c798398e393e5f9502dbab2b51e6c25e2e8f2ac (patch)
treed6f638e6a25dec4887e64bcc35b98bc394cb974f /block
parent36558c8a30e121f97b5852ae33e28081af21bdbf (diff)
blkcg: mass rename of blkcg API
During the recent blkcg cleanup, most of blkcg API has changed to such extent that mass renaming wouldn't cause any noticeable pain. Take the chance and cleanup the naming. * Rename blkio_cgroup to blkcg. * Drop blkio / blkiocg prefixes and consistently use blkcg. * Rename blkio_group to blkcg_gq, which is consistent with io_cq but keep the blkg prefix / variable name. * Rename policy method type and field names to signify they're dealing with policy data. * Rename blkio_policy_type to blkcg_policy. This patch doesn't cause any functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c202
-rw-r--r--block/blk-cgroup.h109
-rw-r--r--block/blk-throttle.c72
-rw-r--r--block/cfq-iosched.c78
4 files changed, 228 insertions, 233 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 63337024e4d7..997570329517 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -26,39 +26,39 @@
26 26
27static DEFINE_MUTEX(blkcg_pol_mutex); 27static DEFINE_MUTEX(blkcg_pol_mutex);
28 28
29struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; 29struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
30EXPORT_SYMBOL_GPL(blkio_root_cgroup); 30EXPORT_SYMBOL_GPL(blkcg_root);
31 31
32static struct blkio_policy_type *blkio_policy[BLKCG_MAX_POLS]; 32static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
33 33
34struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) 34struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
35{ 35{
36 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), 36 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
37 struct blkio_cgroup, css); 37 struct blkcg, css);
38} 38}
39EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); 39EXPORT_SYMBOL_GPL(cgroup_to_blkcg);
40 40
41static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) 41static struct blkcg *task_blkcg(struct task_struct *tsk)
42{ 42{
43 return container_of(task_subsys_state(tsk, blkio_subsys_id), 43 return container_of(task_subsys_state(tsk, blkio_subsys_id),
44 struct blkio_cgroup, css); 44 struct blkcg, css);
45} 45}
46 46
47struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio) 47struct blkcg *bio_blkcg(struct bio *bio)
48{ 48{
49 if (bio && bio->bi_css) 49 if (bio && bio->bi_css)
50 return container_of(bio->bi_css, struct blkio_cgroup, css); 50 return container_of(bio->bi_css, struct blkcg, css);
51 return task_blkio_cgroup(current); 51 return task_blkcg(current);
52} 52}
53EXPORT_SYMBOL_GPL(bio_blkio_cgroup); 53EXPORT_SYMBOL_GPL(bio_blkcg);
54 54
55static bool blkcg_policy_enabled(struct request_queue *q, 55static bool blkcg_policy_enabled(struct request_queue *q,
56 const struct blkio_policy_type *pol) 56 const struct blkcg_policy *pol)
57{ 57{
58 return pol && test_bit(pol->plid, q->blkcg_pols); 58 return pol && test_bit(pol->plid, q->blkcg_pols);
59} 59}
60 60
61static size_t blkg_pd_size(const struct blkio_policy_type *pol) 61static size_t blkg_pd_size(const struct blkcg_policy *pol)
62{ 62{
63 return sizeof(struct blkg_policy_data) + pol->pdata_size; 63 return sizeof(struct blkg_policy_data) + pol->pdata_size;
64} 64}
@@ -69,7 +69,7 @@ static size_t blkg_pd_size(const struct blkio_policy_type *pol)
69 * 69 *
70 * Free @blkg which may be partially allocated. 70 * Free @blkg which may be partially allocated.
71 */ 71 */
72static void blkg_free(struct blkio_group *blkg) 72static void blkg_free(struct blkcg_gq *blkg)
73{ 73{
74 int i; 74 int i;
75 75
@@ -77,14 +77,14 @@ static void blkg_free(struct blkio_group *blkg)
77 return; 77 return;
78 78
79 for (i = 0; i < BLKCG_MAX_POLS; i++) { 79 for (i = 0; i < BLKCG_MAX_POLS; i++) {
80 struct blkio_policy_type *pol = blkio_policy[i]; 80 struct blkcg_policy *pol = blkcg_policy[i];
81 struct blkg_policy_data *pd = blkg->pd[i]; 81 struct blkg_policy_data *pd = blkg->pd[i];
82 82
83 if (!pd) 83 if (!pd)
84 continue; 84 continue;
85 85
86 if (pol && pol->ops.blkio_exit_group_fn) 86 if (pol && pol->ops.pd_exit_fn)
87 pol->ops.blkio_exit_group_fn(blkg); 87 pol->ops.pd_exit_fn(blkg);
88 88
89 kfree(pd); 89 kfree(pd);
90 } 90 }
@@ -99,10 +99,9 @@ static void blkg_free(struct blkio_group *blkg)
99 * 99 *
100 * Allocate a new blkg assocating @blkcg and @q. 100 * Allocate a new blkg assocating @blkcg and @q.
101 */ 101 */
102static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg, 102static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
103 struct request_queue *q)
104{ 103{
105 struct blkio_group *blkg; 104 struct blkcg_gq *blkg;
106 int i; 105 int i;
107 106
108 /* alloc and init base part */ 107 /* alloc and init base part */
@@ -116,7 +115,7 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
116 blkg->refcnt = 1; 115 blkg->refcnt = 1;
117 116
118 for (i = 0; i < BLKCG_MAX_POLS; i++) { 117 for (i = 0; i < BLKCG_MAX_POLS; i++) {
119 struct blkio_policy_type *pol = blkio_policy[i]; 118 struct blkcg_policy *pol = blkcg_policy[i];
120 struct blkg_policy_data *pd; 119 struct blkg_policy_data *pd;
121 120
122 if (!blkcg_policy_enabled(q, pol)) 121 if (!blkcg_policy_enabled(q, pol))
@@ -135,19 +134,19 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
135 134
136 /* invoke per-policy init */ 135 /* invoke per-policy init */
137 for (i = 0; i < BLKCG_MAX_POLS; i++) { 136 for (i = 0; i < BLKCG_MAX_POLS; i++) {
138 struct blkio_policy_type *pol = blkio_policy[i]; 137 struct blkcg_policy *pol = blkcg_policy[i];
139 138
140 if (blkcg_policy_enabled(blkg->q, pol)) 139 if (blkcg_policy_enabled(blkg->q, pol))
141 pol->ops.blkio_init_group_fn(blkg); 140 pol->ops.pd_init_fn(blkg);
142 } 141 }
143 142
144 return blkg; 143 return blkg;
145} 144}
146 145
147static struct blkio_group *__blkg_lookup(struct blkio_cgroup *blkcg, 146static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
148 struct request_queue *q) 147 struct request_queue *q)
149{ 148{
150 struct blkio_group *blkg; 149 struct blkcg_gq *blkg;
151 struct hlist_node *n; 150 struct hlist_node *n;
152 151
153 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) 152 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
@@ -165,8 +164,7 @@ static struct blkio_group *__blkg_lookup(struct blkio_cgroup *blkcg,
165 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing 164 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
166 * - see blk_queue_bypass_start() for details. 165 * - see blk_queue_bypass_start() for details.
167 */ 166 */
168struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, 167struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
169 struct request_queue *q)
170{ 168{
171 WARN_ON_ONCE(!rcu_read_lock_held()); 169 WARN_ON_ONCE(!rcu_read_lock_held());
172 170
@@ -176,11 +174,11 @@ struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
176} 174}
177EXPORT_SYMBOL_GPL(blkg_lookup); 175EXPORT_SYMBOL_GPL(blkg_lookup);
178 176
179static struct blkio_group *__blkg_lookup_create(struct blkio_cgroup *blkcg, 177static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
180 struct request_queue *q) 178 struct request_queue *q)
181 __releases(q->queue_lock) __acquires(q->queue_lock) 179 __releases(q->queue_lock) __acquires(q->queue_lock)
182{ 180{
183 struct blkio_group *blkg; 181 struct blkcg_gq *blkg;
184 182
185 WARN_ON_ONCE(!rcu_read_lock_held()); 183 WARN_ON_ONCE(!rcu_read_lock_held());
186 lockdep_assert_held(q->queue_lock); 184 lockdep_assert_held(q->queue_lock);
@@ -213,8 +211,8 @@ out:
213 return blkg; 211 return blkg;
214} 212}
215 213
216struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, 214struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
217 struct request_queue *q) 215 struct request_queue *q)
218{ 216{
219 /* 217 /*
220 * This could be the first entry point of blkcg implementation and 218 * This could be the first entry point of blkcg implementation and
@@ -226,10 +224,10 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
226} 224}
227EXPORT_SYMBOL_GPL(blkg_lookup_create); 225EXPORT_SYMBOL_GPL(blkg_lookup_create);
228 226
229static void blkg_destroy(struct blkio_group *blkg) 227static void blkg_destroy(struct blkcg_gq *blkg)
230{ 228{
231 struct request_queue *q = blkg->q; 229 struct request_queue *q = blkg->q;
232 struct blkio_cgroup *blkcg = blkg->blkcg; 230 struct blkcg *blkcg = blkg->blkcg;
233 231
234 lockdep_assert_held(q->queue_lock); 232 lockdep_assert_held(q->queue_lock);
235 lockdep_assert_held(&blkcg->lock); 233 lockdep_assert_held(&blkcg->lock);
@@ -255,12 +253,12 @@ static void blkg_destroy(struct blkio_group *blkg)
255 */ 253 */
256static void blkg_destroy_all(struct request_queue *q) 254static void blkg_destroy_all(struct request_queue *q)
257{ 255{
258 struct blkio_group *blkg, *n; 256 struct blkcg_gq *blkg, *n;
259 257
260 lockdep_assert_held(q->queue_lock); 258 lockdep_assert_held(q->queue_lock);
261 259
262 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 260 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
263 struct blkio_cgroup *blkcg = blkg->blkcg; 261 struct blkcg *blkcg = blkg->blkcg;
264 262
265 spin_lock(&blkcg->lock); 263 spin_lock(&blkcg->lock);
266 blkg_destroy(blkg); 264 blkg_destroy(blkg);
@@ -270,10 +268,10 @@ static void blkg_destroy_all(struct request_queue *q)
270 268
271static void blkg_rcu_free(struct rcu_head *rcu_head) 269static void blkg_rcu_free(struct rcu_head *rcu_head)
272{ 270{
273 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head)); 271 blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
274} 272}
275 273
276void __blkg_release(struct blkio_group *blkg) 274void __blkg_release(struct blkcg_gq *blkg)
277{ 275{
278 /* release the extra blkcg reference this blkg has been holding */ 276 /* release the extra blkcg reference this blkg has been holding */
279 css_put(&blkg->blkcg->css); 277 css_put(&blkg->blkcg->css);
@@ -291,11 +289,11 @@ void __blkg_release(struct blkio_group *blkg)
291} 289}
292EXPORT_SYMBOL_GPL(__blkg_release); 290EXPORT_SYMBOL_GPL(__blkg_release);
293 291
294static int 292static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
295blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) 293 u64 val)
296{ 294{
297 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 295 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
298 struct blkio_group *blkg; 296 struct blkcg_gq *blkg;
299 struct hlist_node *n; 297 struct hlist_node *n;
300 int i; 298 int i;
301 299
@@ -309,11 +307,11 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
309 */ 307 */
310 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 308 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
311 for (i = 0; i < BLKCG_MAX_POLS; i++) { 309 for (i = 0; i < BLKCG_MAX_POLS; i++) {
312 struct blkio_policy_type *pol = blkio_policy[i]; 310 struct blkcg_policy *pol = blkcg_policy[i];
313 311
314 if (blkcg_policy_enabled(blkg->q, pol) && 312 if (blkcg_policy_enabled(blkg->q, pol) &&
315 pol->ops.blkio_reset_group_stats_fn) 313 pol->ops.pd_reset_stats_fn)
316 pol->ops.blkio_reset_group_stats_fn(blkg); 314 pol->ops.pd_reset_stats_fn(blkg);
317 } 315 }
318 } 316 }
319 317
@@ -322,7 +320,7 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
322 return 0; 320 return 0;
323} 321}
324 322
325static const char *blkg_dev_name(struct blkio_group *blkg) 323static const char *blkg_dev_name(struct blkcg_gq *blkg)
326{ 324{
327 /* some drivers (floppy) instantiate a queue w/o disk registered */ 325 /* some drivers (floppy) instantiate a queue w/o disk registered */
328 if (blkg->q->backing_dev_info.dev) 326 if (blkg->q->backing_dev_info.dev)
@@ -347,12 +345,12 @@ static const char *blkg_dev_name(struct blkio_group *blkg)
347 * This is to be used to construct print functions for 345 * This is to be used to construct print functions for
348 * cftype->read_seq_string method. 346 * cftype->read_seq_string method.
349 */ 347 */
350void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, 348void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
351 u64 (*prfill)(struct seq_file *, void *, int), 349 u64 (*prfill)(struct seq_file *, void *, int),
352 const struct blkio_policy_type *pol, int data, 350 const struct blkcg_policy *pol, int data,
353 bool show_total) 351 bool show_total)
354{ 352{
355 struct blkio_group *blkg; 353 struct blkcg_gq *blkg;
356 struct hlist_node *n; 354 struct hlist_node *n;
357 u64 total = 0; 355 u64 total = 0;
358 356
@@ -462,13 +460,12 @@ EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
462 * value. This function returns with RCU read lock and queue lock held and 460 * value. This function returns with RCU read lock and queue lock held and
463 * must be paired with blkg_conf_finish(). 461 * must be paired with blkg_conf_finish().
464 */ 462 */
465int blkg_conf_prep(struct blkio_cgroup *blkcg, 463int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
466 const struct blkio_policy_type *pol, const char *input, 464 const char *input, struct blkg_conf_ctx *ctx)
467 struct blkg_conf_ctx *ctx)
468 __acquires(rcu) __acquires(disk->queue->queue_lock) 465 __acquires(rcu) __acquires(disk->queue->queue_lock)
469{ 466{
470 struct gendisk *disk; 467 struct gendisk *disk;
471 struct blkio_group *blkg; 468 struct blkcg_gq *blkg;
472 unsigned int major, minor; 469 unsigned int major, minor;
473 unsigned long long v; 470 unsigned long long v;
474 int part, ret; 471 int part, ret;
@@ -529,16 +526,16 @@ void blkg_conf_finish(struct blkg_conf_ctx *ctx)
529} 526}
530EXPORT_SYMBOL_GPL(blkg_conf_finish); 527EXPORT_SYMBOL_GPL(blkg_conf_finish);
531 528
532struct cftype blkio_files[] = { 529struct cftype blkcg_files[] = {
533 { 530 {
534 .name = "reset_stats", 531 .name = "reset_stats",
535 .write_u64 = blkiocg_reset_stats, 532 .write_u64 = blkcg_reset_stats,
536 }, 533 },
537 { } /* terminate */ 534 { } /* terminate */
538}; 535};
539 536
540/** 537/**
541 * blkiocg_pre_destroy - cgroup pre_destroy callback 538 * blkcg_pre_destroy - cgroup pre_destroy callback
542 * @cgroup: cgroup of interest 539 * @cgroup: cgroup of interest
543 * 540 *
544 * This function is called when @cgroup is about to go away and responsible 541 * This function is called when @cgroup is about to go away and responsible
@@ -548,15 +545,15 @@ struct cftype blkio_files[] = {
548 * 545 *
549 * This is the blkcg counterpart of ioc_release_fn(). 546 * This is the blkcg counterpart of ioc_release_fn().
550 */ 547 */
551static int blkiocg_pre_destroy(struct cgroup *cgroup) 548static int blkcg_pre_destroy(struct cgroup *cgroup)
552{ 549{
553 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 550 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
554 551
555 spin_lock_irq(&blkcg->lock); 552 spin_lock_irq(&blkcg->lock);
556 553
557 while (!hlist_empty(&blkcg->blkg_list)) { 554 while (!hlist_empty(&blkcg->blkg_list)) {
558 struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first, 555 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
559 struct blkio_group, blkcg_node); 556 struct blkcg_gq, blkcg_node);
560 struct request_queue *q = blkg->q; 557 struct request_queue *q = blkg->q;
561 558
562 if (spin_trylock(q->queue_lock)) { 559 if (spin_trylock(q->queue_lock)) {
@@ -573,22 +570,22 @@ static int blkiocg_pre_destroy(struct cgroup *cgroup)
573 return 0; 570 return 0;
574} 571}
575 572
576static void blkiocg_destroy(struct cgroup *cgroup) 573static void blkcg_destroy(struct cgroup *cgroup)
577{ 574{
578 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 575 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
579 576
580 if (blkcg != &blkio_root_cgroup) 577 if (blkcg != &blkcg_root)
581 kfree(blkcg); 578 kfree(blkcg);
582} 579}
583 580
584static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup) 581static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
585{ 582{
586 static atomic64_t id_seq = ATOMIC64_INIT(0); 583 static atomic64_t id_seq = ATOMIC64_INIT(0);
587 struct blkio_cgroup *blkcg; 584 struct blkcg *blkcg;
588 struct cgroup *parent = cgroup->parent; 585 struct cgroup *parent = cgroup->parent;
589 586
590 if (!parent) { 587 if (!parent) {
591 blkcg = &blkio_root_cgroup; 588 blkcg = &blkcg_root;
592 goto done; 589 goto done;
593 } 590 }
594 591
@@ -656,7 +653,7 @@ void blkcg_exit_queue(struct request_queue *q)
656 * of the main cic data structures. For now we allow a task to change 653 * of the main cic data structures. For now we allow a task to change
657 * its cgroup only if it's the only owner of its ioc. 654 * its cgroup only if it's the only owner of its ioc.
658 */ 655 */
659static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 656static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
660{ 657{
661 struct task_struct *task; 658 struct task_struct *task;
662 struct io_context *ioc; 659 struct io_context *ioc;
@@ -677,12 +674,12 @@ static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
677 674
678struct cgroup_subsys blkio_subsys = { 675struct cgroup_subsys blkio_subsys = {
679 .name = "blkio", 676 .name = "blkio",
680 .create = blkiocg_create, 677 .create = blkcg_create,
681 .can_attach = blkiocg_can_attach, 678 .can_attach = blkcg_can_attach,
682 .pre_destroy = blkiocg_pre_destroy, 679 .pre_destroy = blkcg_pre_destroy,
683 .destroy = blkiocg_destroy, 680 .destroy = blkcg_destroy,
684 .subsys_id = blkio_subsys_id, 681 .subsys_id = blkio_subsys_id,
685 .base_cftypes = blkio_files, 682 .base_cftypes = blkcg_files,
686 .module = THIS_MODULE, 683 .module = THIS_MODULE,
687}; 684};
688EXPORT_SYMBOL_GPL(blkio_subsys); 685EXPORT_SYMBOL_GPL(blkio_subsys);
@@ -704,10 +701,10 @@ EXPORT_SYMBOL_GPL(blkio_subsys);
704 * [un]registerations. Returns 0 on success, -errno on failure. 701 * [un]registerations. Returns 0 on success, -errno on failure.
705 */ 702 */
706int blkcg_activate_policy(struct request_queue *q, 703int blkcg_activate_policy(struct request_queue *q,
707 const struct blkio_policy_type *pol) 704 const struct blkcg_policy *pol)
708{ 705{
709 LIST_HEAD(pds); 706 LIST_HEAD(pds);
710 struct blkio_group *blkg; 707 struct blkcg_gq *blkg;
711 struct blkg_policy_data *pd, *n; 708 struct blkg_policy_data *pd, *n;
712 int cnt = 0, ret; 709 int cnt = 0, ret;
713 710
@@ -720,7 +717,7 @@ int blkcg_activate_policy(struct request_queue *q,
720 spin_lock_irq(q->queue_lock); 717 spin_lock_irq(q->queue_lock);
721 718
722 rcu_read_lock(); 719 rcu_read_lock();
723 blkg = __blkg_lookup_create(&blkio_root_cgroup, q); 720 blkg = __blkg_lookup_create(&blkcg_root, q);
724 rcu_read_unlock(); 721 rcu_read_unlock();
725 722
726 if (IS_ERR(blkg)) { 723 if (IS_ERR(blkg)) {
@@ -764,7 +761,7 @@ int blkcg_activate_policy(struct request_queue *q,
764 761
765 blkg->pd[pol->plid] = pd; 762 blkg->pd[pol->plid] = pd;
766 pd->blkg = blkg; 763 pd->blkg = blkg;
767 pol->ops.blkio_init_group_fn(blkg); 764 pol->ops.pd_init_fn(blkg);
768 765
769 spin_unlock(&blkg->blkcg->lock); 766 spin_unlock(&blkg->blkcg->lock);
770 } 767 }
@@ -790,9 +787,9 @@ EXPORT_SYMBOL_GPL(blkcg_activate_policy);
790 * blkcg_activate_policy(). 787 * blkcg_activate_policy().
791 */ 788 */
792void blkcg_deactivate_policy(struct request_queue *q, 789void blkcg_deactivate_policy(struct request_queue *q,
793 const struct blkio_policy_type *pol) 790 const struct blkcg_policy *pol)
794{ 791{
795 struct blkio_group *blkg; 792 struct blkcg_gq *blkg;
796 793
797 if (!blkcg_policy_enabled(q, pol)) 794 if (!blkcg_policy_enabled(q, pol))
798 return; 795 return;
@@ -810,8 +807,8 @@ void blkcg_deactivate_policy(struct request_queue *q,
810 /* grab blkcg lock too while removing @pd from @blkg */ 807 /* grab blkcg lock too while removing @pd from @blkg */
811 spin_lock(&blkg->blkcg->lock); 808 spin_lock(&blkg->blkcg->lock);
812 809
813 if (pol->ops.blkio_exit_group_fn) 810 if (pol->ops.pd_exit_fn)
814 pol->ops.blkio_exit_group_fn(blkg); 811 pol->ops.pd_exit_fn(blkg);
815 812
816 kfree(blkg->pd[pol->plid]); 813 kfree(blkg->pd[pol->plid]);
817 blkg->pd[pol->plid] = NULL; 814 blkg->pd[pol->plid] = NULL;
@@ -825,14 +822,13 @@ void blkcg_deactivate_policy(struct request_queue *q,
825EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 822EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
826 823
827/** 824/**
828 * blkio_policy_register - register a blkcg policy 825 * blkcg_policy_register - register a blkcg policy
829 * @blkiop: blkcg policy to register 826 * @pol: blkcg policy to register
830 * 827 *
831 * Register @blkiop with blkcg core. Might sleep and @blkiop may be 828 * Register @pol with blkcg core. Might sleep and @pol may be modified on
832 * modified on successful registration. Returns 0 on success and -errno on 829 * successful registration. Returns 0 on success and -errno on failure.
833 * failure.
834 */ 830 */
835int blkio_policy_register(struct blkio_policy_type *blkiop) 831int blkcg_policy_register(struct blkcg_policy *pol)
836{ 832{
837 int i, ret; 833 int i, ret;
838 834
@@ -841,45 +837,45 @@ int blkio_policy_register(struct blkio_policy_type *blkiop)
841 /* find an empty slot */ 837 /* find an empty slot */
842 ret = -ENOSPC; 838 ret = -ENOSPC;
843 for (i = 0; i < BLKCG_MAX_POLS; i++) 839 for (i = 0; i < BLKCG_MAX_POLS; i++)
844 if (!blkio_policy[i]) 840 if (!blkcg_policy[i])
845 break; 841 break;
846 if (i >= BLKCG_MAX_POLS) 842 if (i >= BLKCG_MAX_POLS)
847 goto out_unlock; 843 goto out_unlock;
848 844
849 /* register and update blkgs */ 845 /* register and update blkgs */
850 blkiop->plid = i; 846 pol->plid = i;
851 blkio_policy[i] = blkiop; 847 blkcg_policy[i] = pol;
852 848
853 /* everything is in place, add intf files for the new policy */ 849 /* everything is in place, add intf files for the new policy */
854 if (blkiop->cftypes) 850 if (pol->cftypes)
855 WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes)); 851 WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
856 ret = 0; 852 ret = 0;
857out_unlock: 853out_unlock:
858 mutex_unlock(&blkcg_pol_mutex); 854 mutex_unlock(&blkcg_pol_mutex);
859 return ret; 855 return ret;
860} 856}
861EXPORT_SYMBOL_GPL(blkio_policy_register); 857EXPORT_SYMBOL_GPL(blkcg_policy_register);
862 858
863/** 859/**
864 * blkiop_policy_unregister - unregister a blkcg policy 860 * blkcg_policy_unregister - unregister a blkcg policy
865 * @blkiop: blkcg policy to unregister 861 * @pol: blkcg policy to unregister
866 * 862 *
867 * Undo blkio_policy_register(@blkiop). Might sleep. 863 * Undo blkcg_policy_register(@pol). Might sleep.
868 */ 864 */
869void blkio_policy_unregister(struct blkio_policy_type *blkiop) 865void blkcg_policy_unregister(struct blkcg_policy *pol)
870{ 866{
871 mutex_lock(&blkcg_pol_mutex); 867 mutex_lock(&blkcg_pol_mutex);
872 868
873 if (WARN_ON(blkio_policy[blkiop->plid] != blkiop)) 869 if (WARN_ON(blkcg_policy[pol->plid] != pol))
874 goto out_unlock; 870 goto out_unlock;
875 871
876 /* kill the intf files first */ 872 /* kill the intf files first */
877 if (blkiop->cftypes) 873 if (pol->cftypes)
878 cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes); 874 cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
879 875
880 /* unregister and update blkgs */ 876 /* unregister and update blkgs */
881 blkio_policy[blkiop->plid] = NULL; 877 blkcg_policy[pol->plid] = NULL;
882out_unlock: 878out_unlock:
883 mutex_unlock(&blkcg_pol_mutex); 879 mutex_unlock(&blkcg_pol_mutex);
884} 880}
885EXPORT_SYMBOL_GPL(blkio_policy_unregister); 881EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index b347aa08d166..a443b84d2c16 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -37,7 +37,7 @@ enum blkg_rwstat_type {
37 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, 37 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
38}; 38};
39 39
40struct blkio_cgroup { 40struct blkcg {
41 struct cgroup_subsys_state css; 41 struct cgroup_subsys_state css;
42 spinlock_t lock; 42 spinlock_t lock;
43 struct hlist_head blkg_list; 43 struct hlist_head blkg_list;
@@ -45,7 +45,7 @@ struct blkio_cgroup {
45 /* for policies to test whether associated blkcg has changed */ 45 /* for policies to test whether associated blkcg has changed */
46 uint64_t id; 46 uint64_t id;
47 47
48 /* TODO: per-policy storage in blkio_cgroup */ 48 /* TODO: per-policy storage in blkcg */
49 unsigned int cfq_weight; /* belongs to cfq */ 49 unsigned int cfq_weight; /* belongs to cfq */
50}; 50};
51 51
@@ -62,7 +62,7 @@ struct blkg_rwstat {
62/* per-blkg per-policy data */ 62/* per-blkg per-policy data */
63struct blkg_policy_data { 63struct blkg_policy_data {
64 /* the blkg this per-policy data belongs to */ 64 /* the blkg this per-policy data belongs to */
65 struct blkio_group *blkg; 65 struct blkcg_gq *blkg;
66 66
67 /* used during policy activation */ 67 /* used during policy activation */
68 struct list_head alloc_node; 68 struct list_head alloc_node;
@@ -71,12 +71,13 @@ struct blkg_policy_data {
71 char pdata[] __aligned(__alignof__(unsigned long long)); 71 char pdata[] __aligned(__alignof__(unsigned long long));
72}; 72};
73 73
74struct blkio_group { 74/* association between a blk cgroup and a request queue */
75struct blkcg_gq {
75 /* Pointer to the associated request_queue */ 76 /* Pointer to the associated request_queue */
76 struct request_queue *q; 77 struct request_queue *q;
77 struct list_head q_node; 78 struct list_head q_node;
78 struct hlist_node blkcg_node; 79 struct hlist_node blkcg_node;
79 struct blkio_cgroup *blkcg; 80 struct blkcg *blkcg;
80 /* reference count */ 81 /* reference count */
81 int refcnt; 82 int refcnt;
82 83
@@ -85,18 +86,18 @@ struct blkio_group {
85 struct rcu_head rcu_head; 86 struct rcu_head rcu_head;
86}; 87};
87 88
88typedef void (blkio_init_group_fn)(struct blkio_group *blkg); 89typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
89typedef void (blkio_exit_group_fn)(struct blkio_group *blkg); 90typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
90typedef void (blkio_reset_group_stats_fn)(struct blkio_group *blkg); 91typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
91 92
92struct blkio_policy_ops { 93struct blkcg_policy_ops {
93 blkio_init_group_fn *blkio_init_group_fn; 94 blkcg_pol_init_pd_fn *pd_init_fn;
94 blkio_exit_group_fn *blkio_exit_group_fn; 95 blkcg_pol_exit_pd_fn *pd_exit_fn;
95 blkio_reset_group_stats_fn *blkio_reset_group_stats_fn; 96 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
96}; 97};
97 98
98struct blkio_policy_type { 99struct blkcg_policy {
99 struct blkio_policy_ops ops; 100 struct blkcg_policy_ops ops;
100 int plid; 101 int plid;
101 /* policy specific private data size */ 102 /* policy specific private data size */
102 size_t pdata_size; 103 size_t pdata_size;
@@ -104,29 +105,28 @@ struct blkio_policy_type {
104 struct cftype *cftypes; 105 struct cftype *cftypes;
105}; 106};
106 107
107extern struct blkio_cgroup blkio_root_cgroup; 108extern struct blkcg blkcg_root;
108 109
109struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); 110struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
110struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio); 111struct blkcg *bio_blkcg(struct bio *bio);
111struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, 112struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
112 struct request_queue *q); 113struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
113struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, 114 struct request_queue *q);
114 struct request_queue *q);
115int blkcg_init_queue(struct request_queue *q); 115int blkcg_init_queue(struct request_queue *q);
116void blkcg_drain_queue(struct request_queue *q); 116void blkcg_drain_queue(struct request_queue *q);
117void blkcg_exit_queue(struct request_queue *q); 117void blkcg_exit_queue(struct request_queue *q);
118 118
119/* Blkio controller policy registration */ 119/* Blkio controller policy registration */
120int blkio_policy_register(struct blkio_policy_type *); 120int blkcg_policy_register(struct blkcg_policy *pol);
121void blkio_policy_unregister(struct blkio_policy_type *); 121void blkcg_policy_unregister(struct blkcg_policy *pol);
122int blkcg_activate_policy(struct request_queue *q, 122int blkcg_activate_policy(struct request_queue *q,
123 const struct blkio_policy_type *pol); 123 const struct blkcg_policy *pol);
124void blkcg_deactivate_policy(struct request_queue *q, 124void blkcg_deactivate_policy(struct request_queue *q,
125 const struct blkio_policy_type *pol); 125 const struct blkcg_policy *pol);
126 126
127void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, 127void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
128 u64 (*prfill)(struct seq_file *, void *, int), 128 u64 (*prfill)(struct seq_file *, void *, int),
129 const struct blkio_policy_type *pol, int data, 129 const struct blkcg_policy *pol, int data,
130 bool show_total); 130 bool show_total);
131u64 __blkg_prfill_u64(struct seq_file *sf, void *pdata, u64 v); 131u64 __blkg_prfill_u64(struct seq_file *sf, void *pdata, u64 v);
132u64 __blkg_prfill_rwstat(struct seq_file *sf, void *pdata, 132u64 __blkg_prfill_rwstat(struct seq_file *sf, void *pdata,
@@ -136,13 +136,12 @@ u64 blkg_prfill_rwstat(struct seq_file *sf, void *pdata, int off);
136 136
137struct blkg_conf_ctx { 137struct blkg_conf_ctx {
138 struct gendisk *disk; 138 struct gendisk *disk;
139 struct blkio_group *blkg; 139 struct blkcg_gq *blkg;
140 u64 v; 140 u64 v;
141}; 141};
142 142
143int blkg_conf_prep(struct blkio_cgroup *blkcg, 143int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
144 const struct blkio_policy_type *pol, const char *input, 144 const char *input, struct blkg_conf_ctx *ctx);
145 struct blkg_conf_ctx *ctx);
146void blkg_conf_finish(struct blkg_conf_ctx *ctx); 145void blkg_conf_finish(struct blkg_conf_ctx *ctx);
147 146
148 147
@@ -153,8 +152,8 @@ void blkg_conf_finish(struct blkg_conf_ctx *ctx);
153 * 152 *
154 * Return pointer to private data associated with the @blkg-@pol pair. 153 * Return pointer to private data associated with the @blkg-@pol pair.
155 */ 154 */
156static inline void *blkg_to_pdata(struct blkio_group *blkg, 155static inline void *blkg_to_pdata(struct blkcg_gq *blkg,
157 struct blkio_policy_type *pol) 156 struct blkcg_policy *pol)
158{ 157{
159 return blkg ? blkg->pd[pol->plid]->pdata : NULL; 158 return blkg ? blkg->pd[pol->plid]->pdata : NULL;
160} 159}
@@ -165,7 +164,7 @@ static inline void *blkg_to_pdata(struct blkio_group *blkg,
165 * 164 *
166 * @pdata is policy private data. Determine the blkg it's associated with. 165 * @pdata is policy private data. Determine the blkg it's associated with.
167 */ 166 */
168static inline struct blkio_group *pdata_to_blkg(void *pdata) 167static inline struct blkcg_gq *pdata_to_blkg(void *pdata)
169{ 168{
170 if (pdata) { 169 if (pdata) {
171 struct blkg_policy_data *pd = 170 struct blkg_policy_data *pd =
@@ -183,7 +182,7 @@ static inline struct blkio_group *pdata_to_blkg(void *pdata)
183 * 182 *
184 * Format the path of the cgroup of @blkg into @buf. 183 * Format the path of the cgroup of @blkg into @buf.
185 */ 184 */
186static inline int blkg_path(struct blkio_group *blkg, char *buf, int buflen) 185static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
187{ 186{
188 int ret; 187 int ret;
189 188
@@ -201,14 +200,14 @@ static inline int blkg_path(struct blkio_group *blkg, char *buf, int buflen)
201 * 200 *
202 * The caller should be holding queue_lock and an existing reference. 201 * The caller should be holding queue_lock and an existing reference.
203 */ 202 */
204static inline void blkg_get(struct blkio_group *blkg) 203static inline void blkg_get(struct blkcg_gq *blkg)
205{ 204{
206 lockdep_assert_held(blkg->q->queue_lock); 205 lockdep_assert_held(blkg->q->queue_lock);
207 WARN_ON_ONCE(!blkg->refcnt); 206 WARN_ON_ONCE(!blkg->refcnt);
208 blkg->refcnt++; 207 blkg->refcnt++;
209} 208}
210 209
211void __blkg_release(struct blkio_group *blkg); 210void __blkg_release(struct blkcg_gq *blkg);
212 211
213/** 212/**
214 * blkg_put - put a blkg reference 213 * blkg_put - put a blkg reference
@@ -216,7 +215,7 @@ void __blkg_release(struct blkio_group *blkg);
216 * 215 *
217 * The caller should be holding queue_lock. 216 * The caller should be holding queue_lock.
218 */ 217 */
219static inline void blkg_put(struct blkio_group *blkg) 218static inline void blkg_put(struct blkcg_gq *blkg)
220{ 219{
221 lockdep_assert_held(blkg->q->queue_lock); 220 lockdep_assert_held(blkg->q->queue_lock);
222 WARN_ON_ONCE(blkg->refcnt <= 0); 221 WARN_ON_ONCE(blkg->refcnt <= 0);
@@ -343,32 +342,32 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
343 342
344struct cgroup; 343struct cgroup;
345 344
346struct blkio_group { 345struct blkcg_gq {
347}; 346};
348 347
349struct blkio_policy_type { 348struct blkcg_policy {
350}; 349};
351 350
352static inline struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } 351static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
353static inline struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio) { return NULL; } 352static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
354static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, void *key) { return NULL; } 353static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
355static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 354static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
356static inline void blkcg_drain_queue(struct request_queue *q) { } 355static inline void blkcg_drain_queue(struct request_queue *q) { }
357static inline void blkcg_exit_queue(struct request_queue *q) { } 356static inline void blkcg_exit_queue(struct request_queue *q) { }
358static inline int blkio_policy_register(struct blkio_policy_type *blkiop) { return 0; } 357static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
359static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } 358static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
360static inline int blkcg_activate_policy(struct request_queue *q, 359static inline int blkcg_activate_policy(struct request_queue *q,
361 const struct blkio_policy_type *pol) { return 0; } 360 const struct blkcg_policy *pol) { return 0; }
362static inline void blkcg_deactivate_policy(struct request_queue *q, 361static inline void blkcg_deactivate_policy(struct request_queue *q,
363 const struct blkio_policy_type *pol) { } 362 const struct blkcg_policy *pol) { }
364 363
365static inline void *blkg_to_pdata(struct blkio_group *blkg, 364static inline void *blkg_to_pdata(struct blkcg_gq *blkg,
366 struct blkio_policy_type *pol) { return NULL; } 365 struct blkcg_policy *pol) { return NULL; }
367static inline struct blkio_group *pdata_to_blkg(void *pdata, 366static inline struct blkcg_gq *pdata_to_blkg(void *pdata,
368 struct blkio_policy_type *pol) { return NULL; } 367 struct blkcg_policy *pol) { return NULL; }
369static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } 368static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
370static inline void blkg_get(struct blkio_group *blkg) { } 369static inline void blkg_get(struct blkcg_gq *blkg) { }
371static inline void blkg_put(struct blkio_group *blkg) { } 370static inline void blkg_put(struct blkcg_gq *blkg) { }
372 371
373#endif /* CONFIG_BLK_CGROUP */ 372#endif /* CONFIG_BLK_CGROUP */
374#endif /* _BLK_CGROUP_H */ 373#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index e9b7a47f6da0..00c7eff66ecf 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -21,7 +21,7 @@ static int throtl_quantum = 32;
21/* Throttling is performed over 100ms slice and after that slice is renewed */ 21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */ 22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23 23
24static struct blkio_policy_type blkio_policy_throtl; 24static struct blkcg_policy blkcg_policy_throtl;
25 25
26/* A workqueue to queue throttle related work */ 26/* A workqueue to queue throttle related work */
27static struct workqueue_struct *kthrotld_workqueue; 27static struct workqueue_struct *kthrotld_workqueue;
@@ -120,12 +120,12 @@ static LIST_HEAD(tg_stats_alloc_list);
120static void tg_stats_alloc_fn(struct work_struct *); 120static void tg_stats_alloc_fn(struct work_struct *);
121static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn); 121static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
122 122
123static inline struct throtl_grp *blkg_to_tg(struct blkio_group *blkg) 123static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
124{ 124{
125 return blkg_to_pdata(blkg, &blkio_policy_throtl); 125 return blkg_to_pdata(blkg, &blkcg_policy_throtl);
126} 126}
127 127
128static inline struct blkio_group *tg_to_blkg(struct throtl_grp *tg) 128static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
129{ 129{
130 return pdata_to_blkg(tg); 130 return pdata_to_blkg(tg);
131} 131}
@@ -208,7 +208,7 @@ alloc_stats:
208 goto alloc_stats; 208 goto alloc_stats;
209} 209}
210 210
211static void throtl_init_blkio_group(struct blkio_group *blkg) 211static void throtl_pd_init(struct blkcg_gq *blkg)
212{ 212{
213 struct throtl_grp *tg = blkg_to_tg(blkg); 213 struct throtl_grp *tg = blkg_to_tg(blkg);
214 214
@@ -233,7 +233,7 @@ static void throtl_init_blkio_group(struct blkio_group *blkg)
233 spin_unlock(&tg_stats_alloc_lock); 233 spin_unlock(&tg_stats_alloc_lock);
234} 234}
235 235
236static void throtl_exit_blkio_group(struct blkio_group *blkg) 236static void throtl_pd_exit(struct blkcg_gq *blkg)
237{ 237{
238 struct throtl_grp *tg = blkg_to_tg(blkg); 238 struct throtl_grp *tg = blkg_to_tg(blkg);
239 239
@@ -244,7 +244,7 @@ static void throtl_exit_blkio_group(struct blkio_group *blkg)
244 free_percpu(tg->stats_cpu); 244 free_percpu(tg->stats_cpu);
245} 245}
246 246
247static void throtl_reset_group_stats(struct blkio_group *blkg) 247static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
248{ 248{
249 struct throtl_grp *tg = blkg_to_tg(blkg); 249 struct throtl_grp *tg = blkg_to_tg(blkg);
250 int cpu; 250 int cpu;
@@ -260,33 +260,33 @@ static void throtl_reset_group_stats(struct blkio_group *blkg)
260 } 260 }
261} 261}
262 262
263static struct 263static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
264throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) 264 struct blkcg *blkcg)
265{ 265{
266 /* 266 /*
267 * This is the common case when there are no blkio cgroups. 267 * This is the common case when there are no blkcgs. Avoid lookup
268 * Avoid lookup in this case 268 * in this case
269 */ 269 */
270 if (blkcg == &blkio_root_cgroup) 270 if (blkcg == &blkcg_root)
271 return td_root_tg(td); 271 return td_root_tg(td);
272 272
273 return blkg_to_tg(blkg_lookup(blkcg, td->queue)); 273 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
274} 274}
275 275
276static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, 276static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
277 struct blkio_cgroup *blkcg) 277 struct blkcg *blkcg)
278{ 278{
279 struct request_queue *q = td->queue; 279 struct request_queue *q = td->queue;
280 struct throtl_grp *tg = NULL; 280 struct throtl_grp *tg = NULL;
281 281
282 /* 282 /*
283 * This is the common case when there are no blkio cgroups. 283 * This is the common case when there are no blkcgs. Avoid lookup
284 * Avoid lookup in this case 284 * in this case
285 */ 285 */
286 if (blkcg == &blkio_root_cgroup) { 286 if (blkcg == &blkcg_root) {
287 tg = td_root_tg(td); 287 tg = td_root_tg(td);
288 } else { 288 } else {
289 struct blkio_group *blkg; 289 struct blkcg_gq *blkg;
290 290
291 blkg = blkg_lookup_create(blkcg, q); 291 blkg = blkg_lookup_create(blkcg, q);
292 292
@@ -665,7 +665,7 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
665 return 0; 665 return 0;
666} 666}
667 667
668static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes, 668static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
669 int rw) 669 int rw)
670{ 670{
671 struct throtl_grp *tg = blkg_to_tg(blkg); 671 struct throtl_grp *tg = blkg_to_tg(blkg);
@@ -822,7 +822,7 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
822static void throtl_process_limit_change(struct throtl_data *td) 822static void throtl_process_limit_change(struct throtl_data *td)
823{ 823{
824 struct request_queue *q = td->queue; 824 struct request_queue *q = td->queue;
825 struct blkio_group *blkg, *n; 825 struct blkcg_gq *blkg, *n;
826 826
827 if (!td->limits_changed) 827 if (!td->limits_changed)
828 return; 828 return;
@@ -951,9 +951,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, void *pdata, int off)
951static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft, 951static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
952 struct seq_file *sf) 952 struct seq_file *sf)
953{ 953{
954 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 954 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
955 955
956 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkio_policy_throtl, 956 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
957 cft->private, true); 957 cft->private, true);
958 return 0; 958 return 0;
959} 959}
@@ -979,29 +979,29 @@ static u64 tg_prfill_conf_uint(struct seq_file *sf, void *pdata, int off)
979static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft, 979static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
980 struct seq_file *sf) 980 struct seq_file *sf)
981{ 981{
982 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp), tg_prfill_conf_u64, 982 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
983 &blkio_policy_throtl, cft->private, false); 983 &blkcg_policy_throtl, cft->private, false);
984 return 0; 984 return 0;
985} 985}
986 986
987static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft, 987static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
988 struct seq_file *sf) 988 struct seq_file *sf)
989{ 989{
990 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp), tg_prfill_conf_uint, 990 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
991 &blkio_policy_throtl, cft->private, false); 991 &blkcg_policy_throtl, cft->private, false);
992 return 0; 992 return 0;
993} 993}
994 994
995static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, 995static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
996 bool is_u64) 996 bool is_u64)
997{ 997{
998 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 998 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
999 struct blkg_conf_ctx ctx; 999 struct blkg_conf_ctx ctx;
1000 struct throtl_grp *tg; 1000 struct throtl_grp *tg;
1001 struct throtl_data *td; 1001 struct throtl_data *td;
1002 int ret; 1002 int ret;
1003 1003
1004 ret = blkg_conf_prep(blkcg, &blkio_policy_throtl, buf, &ctx); 1004 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1005 if (ret) 1005 if (ret)
1006 return ret; 1006 return ret;
1007 1007
@@ -1086,11 +1086,11 @@ static void throtl_shutdown_wq(struct request_queue *q)
1086 cancel_delayed_work_sync(&td->throtl_work); 1086 cancel_delayed_work_sync(&td->throtl_work);
1087} 1087}
1088 1088
1089static struct blkio_policy_type blkio_policy_throtl = { 1089static struct blkcg_policy blkcg_policy_throtl = {
1090 .ops = { 1090 .ops = {
1091 .blkio_init_group_fn = throtl_init_blkio_group, 1091 .pd_init_fn = throtl_pd_init,
1092 .blkio_exit_group_fn = throtl_exit_blkio_group, 1092 .pd_exit_fn = throtl_pd_exit,
1093 .blkio_reset_group_stats_fn = throtl_reset_group_stats, 1093 .pd_reset_stats_fn = throtl_pd_reset_stats,
1094 }, 1094 },
1095 .pdata_size = sizeof(struct throtl_grp), 1095 .pdata_size = sizeof(struct throtl_grp),
1096 .cftypes = throtl_files, 1096 .cftypes = throtl_files,
@@ -1101,7 +1101,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1101 struct throtl_data *td = q->td; 1101 struct throtl_data *td = q->td;
1102 struct throtl_grp *tg; 1102 struct throtl_grp *tg;
1103 bool rw = bio_data_dir(bio), update_disptime = true; 1103 bool rw = bio_data_dir(bio), update_disptime = true;
1104 struct blkio_cgroup *blkcg; 1104 struct blkcg *blkcg;
1105 bool throttled = false; 1105 bool throttled = false;
1106 1106
1107 if (bio->bi_rw & REQ_THROTTLED) { 1107 if (bio->bi_rw & REQ_THROTTLED) {
@@ -1118,7 +1118,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1118 * just update the dispatch stats in lockless manner and return. 1118 * just update the dispatch stats in lockless manner and return.
1119 */ 1119 */
1120 rcu_read_lock(); 1120 rcu_read_lock();
1121 blkcg = bio_blkio_cgroup(bio); 1121 blkcg = bio_blkcg(bio);
1122 tg = throtl_lookup_tg(td, blkcg); 1122 tg = throtl_lookup_tg(td, blkcg);
1123 if (tg) { 1123 if (tg) {
1124 if (tg_no_rule_group(tg, rw)) { 1124 if (tg_no_rule_group(tg, rw)) {
@@ -1243,7 +1243,7 @@ int blk_throtl_init(struct request_queue *q)
1243 td->queue = q; 1243 td->queue = q;
1244 1244
1245 /* activate policy */ 1245 /* activate policy */
1246 ret = blkcg_activate_policy(q, &blkio_policy_throtl); 1246 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1247 if (ret) 1247 if (ret)
1248 kfree(td); 1248 kfree(td);
1249 return ret; 1249 return ret;
@@ -1253,7 +1253,7 @@ void blk_throtl_exit(struct request_queue *q)
1253{ 1253{
1254 BUG_ON(!q->td); 1254 BUG_ON(!q->td);
1255 throtl_shutdown_wq(q); 1255 throtl_shutdown_wq(q);
1256 blkcg_deactivate_policy(q, &blkio_policy_throtl); 1256 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
1257 kfree(q->td); 1257 kfree(q->td);
1258} 1258}
1259 1259
@@ -1263,7 +1263,7 @@ static int __init throtl_init(void)
1263 if (!kthrotld_workqueue) 1263 if (!kthrotld_workqueue)
1264 panic("Failed to create kthrotld\n"); 1264 panic("Failed to create kthrotld\n");
1265 1265
1266 return blkio_policy_register(&blkio_policy_throtl); 1266 return blkcg_policy_register(&blkcg_policy_throtl);
1267} 1267}
1268 1268
1269module_init(throtl_init); 1269module_init(throtl_init);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 901286b5f5cb..792218281d91 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -17,7 +17,7 @@
17#include "blk.h" 17#include "blk.h"
18#include "blk-cgroup.h" 18#include "blk-cgroup.h"
19 19
20static struct blkio_policy_type blkio_policy_cfq __maybe_unused; 20static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
21 21
22/* 22/*
23 * tunables 23 * tunables
@@ -202,7 +202,7 @@ struct cfqg_stats {
202 struct blkg_stat dequeue; 202 struct blkg_stat dequeue;
203 /* total time spent waiting for it to be assigned a timeslice. */ 203 /* total time spent waiting for it to be assigned a timeslice. */
204 struct blkg_stat group_wait_time; 204 struct blkg_stat group_wait_time;
205 /* time spent idling for this blkio_group */ 205 /* time spent idling for this blkcg_gq */
206 struct blkg_stat idle_time; 206 struct blkg_stat idle_time;
207 /* total time with empty current active q with other requests queued */ 207 /* total time with empty current active q with other requests queued */
208 struct blkg_stat empty_time; 208 struct blkg_stat empty_time;
@@ -553,12 +553,12 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
553 553
554#ifdef CONFIG_CFQ_GROUP_IOSCHED 554#ifdef CONFIG_CFQ_GROUP_IOSCHED
555 555
556static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) 556static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
557{ 557{
558 return blkg_to_pdata(blkg, &blkio_policy_cfq); 558 return blkg_to_pdata(blkg, &blkcg_policy_cfq);
559} 559}
560 560
561static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg) 561static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
562{ 562{
563 return pdata_to_blkg(cfqg); 563 return pdata_to_blkg(cfqg);
564} 564}
@@ -637,7 +637,7 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
637 io_start_time - start_time); 637 io_start_time - start_time);
638} 638}
639 639
640static void cfqg_stats_reset(struct blkio_group *blkg) 640static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
641{ 641{
642 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 642 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
643 struct cfqg_stats *stats = &cfqg->stats; 643 struct cfqg_stats *stats = &cfqg->stats;
@@ -662,8 +662,8 @@ static void cfqg_stats_reset(struct blkio_group *blkg)
662 662
663#else /* CONFIG_CFQ_GROUP_IOSCHED */ 663#else /* CONFIG_CFQ_GROUP_IOSCHED */
664 664
665static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) { return NULL; } 665static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg) { return NULL; }
666static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg) { return NULL; } 666static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg) { return NULL; }
667static inline void cfqg_get(struct cfq_group *cfqg) { } 667static inline void cfqg_get(struct cfq_group *cfqg) { }
668static inline void cfqg_put(struct cfq_group *cfqg) { } 668static inline void cfqg_put(struct cfq_group *cfqg) { }
669 669
@@ -1331,7 +1331,7 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1331} 1331}
1332 1332
1333#ifdef CONFIG_CFQ_GROUP_IOSCHED 1333#ifdef CONFIG_CFQ_GROUP_IOSCHED
1334static void cfq_init_blkio_group(struct blkio_group *blkg) 1334static void cfq_pd_init(struct blkcg_gq *blkg)
1335{ 1335{
1336 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1336 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1337 1337
@@ -1344,16 +1344,16 @@ static void cfq_init_blkio_group(struct blkio_group *blkg)
1344 * be held. 1344 * be held.
1345 */ 1345 */
1346static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, 1346static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1347 struct blkio_cgroup *blkcg) 1347 struct blkcg *blkcg)
1348{ 1348{
1349 struct request_queue *q = cfqd->queue; 1349 struct request_queue *q = cfqd->queue;
1350 struct cfq_group *cfqg = NULL; 1350 struct cfq_group *cfqg = NULL;
1351 1351
1352 /* avoid lookup for the common case where there's no blkio cgroup */ 1352 /* avoid lookup for the common case where there's no blkcg */
1353 if (blkcg == &blkio_root_cgroup) { 1353 if (blkcg == &blkcg_root) {
1354 cfqg = cfqd->root_group; 1354 cfqg = cfqd->root_group;
1355 } else { 1355 } else {
1356 struct blkio_group *blkg; 1356 struct blkcg_gq *blkg;
1357 1357
1358 blkg = blkg_lookup_create(blkcg, q); 1358 blkg = blkg_lookup_create(blkcg, q);
1359 if (!IS_ERR(blkg)) 1359 if (!IS_ERR(blkg))
@@ -1386,8 +1386,8 @@ static u64 cfqg_prfill_weight_device(struct seq_file *sf, void *pdata, int off)
1386static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft, 1386static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1387 struct seq_file *sf) 1387 struct seq_file *sf)
1388{ 1388{
1389 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp), 1389 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
1390 cfqg_prfill_weight_device, &blkio_policy_cfq, 0, 1390 cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
1391 false); 1391 false);
1392 return 0; 1392 return 0;
1393} 1393}
@@ -1395,19 +1395,19 @@ static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1395static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft, 1395static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
1396 struct seq_file *sf) 1396 struct seq_file *sf)
1397{ 1397{
1398 seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->cfq_weight); 1398 seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
1399 return 0; 1399 return 0;
1400} 1400}
1401 1401
1402static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, 1402static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1403 const char *buf) 1403 const char *buf)
1404{ 1404{
1405 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 1405 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1406 struct blkg_conf_ctx ctx; 1406 struct blkg_conf_ctx ctx;
1407 struct cfq_group *cfqg; 1407 struct cfq_group *cfqg;
1408 int ret; 1408 int ret;
1409 1409
1410 ret = blkg_conf_prep(blkcg, &blkio_policy_cfq, buf, &ctx); 1410 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1411 if (ret) 1411 if (ret)
1412 return ret; 1412 return ret;
1413 1413
@@ -1425,8 +1425,8 @@ static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1425 1425
1426static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) 1426static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1427{ 1427{
1428 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 1428 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1429 struct blkio_group *blkg; 1429 struct blkcg_gq *blkg;
1430 struct hlist_node *n; 1430 struct hlist_node *n;
1431 1431
1432 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) 1432 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
@@ -1449,9 +1449,9 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1449static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft, 1449static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
1450 struct seq_file *sf) 1450 struct seq_file *sf)
1451{ 1451{
1452 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 1452 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1453 1453
1454 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkio_policy_cfq, 1454 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
1455 cft->private, false); 1455 cft->private, false);
1456 return 0; 1456 return 0;
1457} 1457}
@@ -1459,9 +1459,9 @@ static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
1459static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft, 1459static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
1460 struct seq_file *sf) 1460 struct seq_file *sf)
1461{ 1461{
1462 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 1462 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1463 1463
1464 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkio_policy_cfq, 1464 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
1465 cft->private, true); 1465 cft->private, true);
1466 return 0; 1466 return 0;
1467} 1467}
@@ -1485,10 +1485,10 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, void *pdata, int off)
1485static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft, 1485static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
1486 struct seq_file *sf) 1486 struct seq_file *sf)
1487{ 1487{
1488 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 1488 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1489 1489
1490 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size, 1490 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
1491 &blkio_policy_cfq, 0, false); 1491 &blkcg_policy_cfq, 0, false);
1492 return 0; 1492 return 0;
1493} 1493}
1494#endif /* CONFIG_DEBUG_BLK_CGROUP */ 1494#endif /* CONFIG_DEBUG_BLK_CGROUP */
@@ -1580,7 +1580,7 @@ static struct cftype cfq_blkcg_files[] = {
1580}; 1580};
1581#else /* GROUP_IOSCHED */ 1581#else /* GROUP_IOSCHED */
1582static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, 1582static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1583 struct blkio_cgroup *blkcg) 1583 struct blkcg *blkcg)
1584{ 1584{
1585 return cfqd->root_group; 1585 return cfqd->root_group;
1586} 1586}
@@ -3135,7 +3135,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3135 uint64_t id; 3135 uint64_t id;
3136 3136
3137 rcu_read_lock(); 3137 rcu_read_lock();
3138 id = bio_blkio_cgroup(bio)->id; 3138 id = bio_blkcg(bio)->id;
3139 rcu_read_unlock(); 3139 rcu_read_unlock();
3140 3140
3141 /* 3141 /*
@@ -3166,14 +3166,14 @@ static struct cfq_queue *
3166cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, 3166cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3167 struct bio *bio, gfp_t gfp_mask) 3167 struct bio *bio, gfp_t gfp_mask)
3168{ 3168{
3169 struct blkio_cgroup *blkcg; 3169 struct blkcg *blkcg;
3170 struct cfq_queue *cfqq, *new_cfqq = NULL; 3170 struct cfq_queue *cfqq, *new_cfqq = NULL;
3171 struct cfq_group *cfqg; 3171 struct cfq_group *cfqg;
3172 3172
3173retry: 3173retry:
3174 rcu_read_lock(); 3174 rcu_read_lock();
3175 3175
3176 blkcg = bio_blkio_cgroup(bio); 3176 blkcg = bio_blkcg(bio);
3177 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); 3177 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
3178 cfqq = cic_to_cfqq(cic, is_sync); 3178 cfqq = cic_to_cfqq(cic, is_sync);
3179 3179
@@ -3944,14 +3944,14 @@ static void cfq_exit_queue(struct elevator_queue *e)
3944#ifndef CONFIG_CFQ_GROUP_IOSCHED 3944#ifndef CONFIG_CFQ_GROUP_IOSCHED
3945 kfree(cfqd->root_group); 3945 kfree(cfqd->root_group);
3946#endif 3946#endif
3947 blkcg_deactivate_policy(q, &blkio_policy_cfq); 3947 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
3948 kfree(cfqd); 3948 kfree(cfqd);
3949} 3949}
3950 3950
3951static int cfq_init_queue(struct request_queue *q) 3951static int cfq_init_queue(struct request_queue *q)
3952{ 3952{
3953 struct cfq_data *cfqd; 3953 struct cfq_data *cfqd;
3954 struct blkio_group *blkg __maybe_unused; 3954 struct blkcg_gq *blkg __maybe_unused;
3955 int i, ret; 3955 int i, ret;
3956 3956
3957 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 3957 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
@@ -3966,7 +3966,7 @@ static int cfq_init_queue(struct request_queue *q)
3966 3966
3967 /* Init root group and prefer root group over other groups by default */ 3967 /* Init root group and prefer root group over other groups by default */
3968#ifdef CONFIG_CFQ_GROUP_IOSCHED 3968#ifdef CONFIG_CFQ_GROUP_IOSCHED
3969 ret = blkcg_activate_policy(q, &blkio_policy_cfq); 3969 ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
3970 if (ret) 3970 if (ret)
3971 goto out_free; 3971 goto out_free;
3972 3972
@@ -4156,10 +4156,10 @@ static struct elevator_type iosched_cfq = {
4156}; 4156};
4157 4157
4158#ifdef CONFIG_CFQ_GROUP_IOSCHED 4158#ifdef CONFIG_CFQ_GROUP_IOSCHED
4159static struct blkio_policy_type blkio_policy_cfq = { 4159static struct blkcg_policy blkcg_policy_cfq = {
4160 .ops = { 4160 .ops = {
4161 .blkio_init_group_fn = cfq_init_blkio_group, 4161 .pd_init_fn = cfq_pd_init,
4162 .blkio_reset_group_stats_fn = cfqg_stats_reset, 4162 .pd_reset_stats_fn = cfq_pd_reset_stats,
4163 }, 4163 },
4164 .pdata_size = sizeof(struct cfq_group), 4164 .pdata_size = sizeof(struct cfq_group),
4165 .cftypes = cfq_blkcg_files, 4165 .cftypes = cfq_blkcg_files,
@@ -4185,7 +4185,7 @@ static int __init cfq_init(void)
4185 cfq_group_idle = 0; 4185 cfq_group_idle = 0;
4186#endif 4186#endif
4187 4187
4188 ret = blkio_policy_register(&blkio_policy_cfq); 4188 ret = blkcg_policy_register(&blkcg_policy_cfq);
4189 if (ret) 4189 if (ret)
4190 return ret; 4190 return ret;
4191 4191
@@ -4202,13 +4202,13 @@ static int __init cfq_init(void)
4202err_free_pool: 4202err_free_pool:
4203 kmem_cache_destroy(cfq_pool); 4203 kmem_cache_destroy(cfq_pool);
4204err_pol_unreg: 4204err_pol_unreg:
4205 blkio_policy_unregister(&blkio_policy_cfq); 4205 blkcg_policy_unregister(&blkcg_policy_cfq);
4206 return ret; 4206 return ret;
4207} 4207}
4208 4208
4209static void __exit cfq_exit(void) 4209static void __exit cfq_exit(void)
4210{ 4210{
4211 blkio_policy_unregister(&blkio_policy_cfq); 4211 blkcg_policy_unregister(&blkcg_policy_cfq);
4212 elv_unregister(&iosched_cfq); 4212 elv_unregister(&iosched_cfq);
4213 kmem_cache_destroy(cfq_pool); 4213 kmem_cache_destroy(cfq_pool);
4214} 4214}