aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c49
-rw-r--r--block/blk-cgroup.h38
-rw-r--r--block/blk-throttle.c43
-rw-r--r--block/cfq-iosched.c90
4 files changed, 105 insertions, 115 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 290792a13e3c..e90c7c164c83 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -437,10 +437,10 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
437 return &blkg->rl; 437 return &blkg->rl;
438} 438}
439 439
440static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, 440static int blkcg_reset_stats(struct cgroup_subsys_state *css,
441 u64 val) 441 struct cftype *cftype, u64 val)
442{ 442{
443 struct blkcg *blkcg = cgroup_to_blkcg(cgroup); 443 struct blkcg *blkcg = css_to_blkcg(css);
444 struct blkcg_gq *blkg; 444 struct blkcg_gq *blkg;
445 int i; 445 int i;
446 446
@@ -614,15 +614,13 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
614{ 614{
615 struct blkcg_policy *pol = blkcg_policy[pd->plid]; 615 struct blkcg_policy *pol = blkcg_policy[pd->plid];
616 struct blkcg_gq *pos_blkg; 616 struct blkcg_gq *pos_blkg;
617 struct cgroup *pos_cgrp; 617 struct cgroup_subsys_state *pos_css;
618 u64 sum; 618 u64 sum = 0;
619 619
620 lockdep_assert_held(pd->blkg->q->queue_lock); 620 lockdep_assert_held(pd->blkg->q->queue_lock);
621 621
622 sum = blkg_stat_read((void *)pd + off);
623
624 rcu_read_lock(); 622 rcu_read_lock();
625 blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) { 623 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
626 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); 624 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
627 struct blkg_stat *stat = (void *)pos_pd + off; 625 struct blkg_stat *stat = (void *)pos_pd + off;
628 626
@@ -649,16 +647,14 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
649{ 647{
650 struct blkcg_policy *pol = blkcg_policy[pd->plid]; 648 struct blkcg_policy *pol = blkcg_policy[pd->plid];
651 struct blkcg_gq *pos_blkg; 649 struct blkcg_gq *pos_blkg;
652 struct cgroup *pos_cgrp; 650 struct cgroup_subsys_state *pos_css;
653 struct blkg_rwstat sum; 651 struct blkg_rwstat sum = { };
654 int i; 652 int i;
655 653
656 lockdep_assert_held(pd->blkg->q->queue_lock); 654 lockdep_assert_held(pd->blkg->q->queue_lock);
657 655
658 sum = blkg_rwstat_read((void *)pd + off);
659
660 rcu_read_lock(); 656 rcu_read_lock();
661 blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) { 657 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
662 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); 658 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
663 struct blkg_rwstat *rwstat = (void *)pos_pd + off; 659 struct blkg_rwstat *rwstat = (void *)pos_pd + off;
664 struct blkg_rwstat tmp; 660 struct blkg_rwstat tmp;
@@ -765,18 +761,18 @@ struct cftype blkcg_files[] = {
765 761
766/** 762/**
767 * blkcg_css_offline - cgroup css_offline callback 763 * blkcg_css_offline - cgroup css_offline callback
768 * @cgroup: cgroup of interest 764 * @css: css of interest
769 * 765 *
770 * This function is called when @cgroup is about to go away and responsible 766 * This function is called when @css is about to go away and responsible
771 * for shooting down all blkgs associated with @cgroup. blkgs should be 767 * for shooting down all blkgs associated with @css. blkgs should be
772 * removed while holding both q and blkcg locks. As blkcg lock is nested 768 * removed while holding both q and blkcg locks. As blkcg lock is nested
773 * inside q lock, this function performs reverse double lock dancing. 769 * inside q lock, this function performs reverse double lock dancing.
774 * 770 *
775 * This is the blkcg counterpart of ioc_release_fn(). 771 * This is the blkcg counterpart of ioc_release_fn().
776 */ 772 */
777static void blkcg_css_offline(struct cgroup *cgroup) 773static void blkcg_css_offline(struct cgroup_subsys_state *css)
778{ 774{
779 struct blkcg *blkcg = cgroup_to_blkcg(cgroup); 775 struct blkcg *blkcg = css_to_blkcg(css);
780 776
781 spin_lock_irq(&blkcg->lock); 777 spin_lock_irq(&blkcg->lock);
782 778
@@ -798,21 +794,21 @@ static void blkcg_css_offline(struct cgroup *cgroup)
798 spin_unlock_irq(&blkcg->lock); 794 spin_unlock_irq(&blkcg->lock);
799} 795}
800 796
801static void blkcg_css_free(struct cgroup *cgroup) 797static void blkcg_css_free(struct cgroup_subsys_state *css)
802{ 798{
803 struct blkcg *blkcg = cgroup_to_blkcg(cgroup); 799 struct blkcg *blkcg = css_to_blkcg(css);
804 800
805 if (blkcg != &blkcg_root) 801 if (blkcg != &blkcg_root)
806 kfree(blkcg); 802 kfree(blkcg);
807} 803}
808 804
809static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup) 805static struct cgroup_subsys_state *
806blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
810{ 807{
811 static atomic64_t id_seq = ATOMIC64_INIT(0); 808 static atomic64_t id_seq = ATOMIC64_INIT(0);
812 struct blkcg *blkcg; 809 struct blkcg *blkcg;
813 struct cgroup *parent = cgroup->parent;
814 810
815 if (!parent) { 811 if (!parent_css) {
816 blkcg = &blkcg_root; 812 blkcg = &blkcg_root;
817 goto done; 813 goto done;
818 } 814 }
@@ -883,14 +879,15 @@ void blkcg_exit_queue(struct request_queue *q)
883 * of the main cic data structures. For now we allow a task to change 879 * of the main cic data structures. For now we allow a task to change
884 * its cgroup only if it's the only owner of its ioc. 880 * its cgroup only if it's the only owner of its ioc.
885 */ 881 */
886static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 882static int blkcg_can_attach(struct cgroup_subsys_state *css,
883 struct cgroup_taskset *tset)
887{ 884{
888 struct task_struct *task; 885 struct task_struct *task;
889 struct io_context *ioc; 886 struct io_context *ioc;
890 int ret = 0; 887 int ret = 0;
891 888
892 /* task_lock() is needed to avoid races with exit_io_context() */ 889 /* task_lock() is needed to avoid races with exit_io_context() */
893 cgroup_taskset_for_each(task, cgrp, tset) { 890 cgroup_taskset_for_each(task, css, tset) {
894 task_lock(task); 891 task_lock(task);
895 ioc = task->io_context; 892 ioc = task->io_context;
896 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 893 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
@@ -1127,7 +1124,7 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
1127 1124
1128 /* kill the intf files first */ 1125 /* kill the intf files first */
1129 if (pol->cftypes) 1126 if (pol->cftypes)
1130 cgroup_rm_cftypes(&blkio_subsys, pol->cftypes); 1127 cgroup_rm_cftypes(pol->cftypes);
1131 1128
1132 /* unregister and update blkgs */ 1129 /* unregister and update blkgs */
1133 blkcg_policy[pol->plid] = NULL; 1130 blkcg_policy[pol->plid] = NULL;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 8056c03a3382..ae6969a7ffd4 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -179,22 +179,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
179void blkg_conf_finish(struct blkg_conf_ctx *ctx); 179void blkg_conf_finish(struct blkg_conf_ctx *ctx);
180 180
181 181
182static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) 182static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
183{ 183{
184 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), 184 return css ? container_of(css, struct blkcg, css) : NULL;
185 struct blkcg, css);
186} 185}
187 186
188static inline struct blkcg *task_blkcg(struct task_struct *tsk) 187static inline struct blkcg *task_blkcg(struct task_struct *tsk)
189{ 188{
190 return container_of(task_subsys_state(tsk, blkio_subsys_id), 189 return css_to_blkcg(task_css(tsk, blkio_subsys_id));
191 struct blkcg, css);
192} 190}
193 191
194static inline struct blkcg *bio_blkcg(struct bio *bio) 192static inline struct blkcg *bio_blkcg(struct bio *bio)
195{ 193{
196 if (bio && bio->bi_css) 194 if (bio && bio->bi_css)
197 return container_of(bio->bi_css, struct blkcg, css); 195 return css_to_blkcg(bio->bi_css);
198 return task_blkcg(current); 196 return task_blkcg(current);
199} 197}
200 198
@@ -206,9 +204,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
206 */ 204 */
207static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) 205static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
208{ 206{
209 struct cgroup *pcg = blkcg->css.cgroup->parent; 207 return css_to_blkcg(css_parent(&blkcg->css));
210
211 return pcg ? cgroup_to_blkcg(pcg) : NULL;
212} 208}
213 209
214/** 210/**
@@ -288,32 +284,33 @@ struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
288/** 284/**
289 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 285 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
290 * @d_blkg: loop cursor pointing to the current descendant 286 * @d_blkg: loop cursor pointing to the current descendant
291 * @pos_cgrp: used for iteration 287 * @pos_css: used for iteration
292 * @p_blkg: target blkg to walk descendants of 288 * @p_blkg: target blkg to walk descendants of
293 * 289 *
294 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU 290 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
295 * read locked. If called under either blkcg or queue lock, the iteration 291 * read locked. If called under either blkcg or queue lock, the iteration
296 * is guaranteed to include all and only online blkgs. The caller may 292 * is guaranteed to include all and only online blkgs. The caller may
297 * update @pos_cgrp by calling cgroup_rightmost_descendant() to skip 293 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
298 * subtree. 294 * @p_blkg is included in the iteration and the first node to be visited.
299 */ 295 */
300#define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg) \ 296#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
301 cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \ 297 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
302 if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \ 298 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
303 (p_blkg)->q, false))) 299 (p_blkg)->q, false)))
304 300
305/** 301/**
306 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants 302 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
307 * @d_blkg: loop cursor pointing to the current descendant 303 * @d_blkg: loop cursor pointing to the current descendant
308 * @pos_cgrp: used for iteration 304 * @pos_css: used for iteration
309 * @p_blkg: target blkg to walk descendants of 305 * @p_blkg: target blkg to walk descendants of
310 * 306 *
311 * Similar to blkg_for_each_descendant_pre() but performs post-order 307 * Similar to blkg_for_each_descendant_pre() but performs post-order
312 * traversal instead. Synchronization rules are the same. 308 * traversal instead. Synchronization rules are the same. @p_blkg is
309 * included in the iteration and the last node to be visited.
313 */ 310 */
314#define blkg_for_each_descendant_post(d_blkg, pos_cgrp, p_blkg) \ 311#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
315 cgroup_for_each_descendant_post((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \ 312 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
316 if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \ 313 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
317 (p_blkg)->q, false))) 314 (p_blkg)->q, false)))
318 315
319/** 316/**
@@ -576,7 +573,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
576static inline void blkcg_deactivate_policy(struct request_queue *q, 573static inline void blkcg_deactivate_policy(struct request_queue *q,
577 const struct blkcg_policy *pol) { } 574 const struct blkcg_policy *pol) { }
578 575
579static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
580static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } 576static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
581 577
582static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 578static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 08a32dfd3844..8331aba9426f 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1293,10 +1293,10 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
1293 return __blkg_prfill_rwstat(sf, pd, &rwstat); 1293 return __blkg_prfill_rwstat(sf, pd, &rwstat);
1294} 1294}
1295 1295
1296static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft, 1296static int tg_print_cpu_rwstat(struct cgroup_subsys_state *css,
1297 struct seq_file *sf) 1297 struct cftype *cft, struct seq_file *sf)
1298{ 1298{
1299 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1299 struct blkcg *blkcg = css_to_blkcg(css);
1300 1300
1301 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl, 1301 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
1302 cft->private, true); 1302 cft->private, true);
@@ -1325,31 +1325,31 @@ static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1325 return __blkg_prfill_u64(sf, pd, v); 1325 return __blkg_prfill_u64(sf, pd, v);
1326} 1326}
1327 1327
1328static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft, 1328static int tg_print_conf_u64(struct cgroup_subsys_state *css,
1329 struct seq_file *sf) 1329 struct cftype *cft, struct seq_file *sf)
1330{ 1330{
1331 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64, 1331 blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_u64,
1332 &blkcg_policy_throtl, cft->private, false); 1332 &blkcg_policy_throtl, cft->private, false);
1333 return 0; 1333 return 0;
1334} 1334}
1335 1335
1336static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft, 1336static int tg_print_conf_uint(struct cgroup_subsys_state *css,
1337 struct seq_file *sf) 1337 struct cftype *cft, struct seq_file *sf)
1338{ 1338{
1339 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint, 1339 blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_uint,
1340 &blkcg_policy_throtl, cft->private, false); 1340 &blkcg_policy_throtl, cft->private, false);
1341 return 0; 1341 return 0;
1342} 1342}
1343 1343
1344static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, 1344static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
1345 bool is_u64) 1345 const char *buf, bool is_u64)
1346{ 1346{
1347 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1347 struct blkcg *blkcg = css_to_blkcg(css);
1348 struct blkg_conf_ctx ctx; 1348 struct blkg_conf_ctx ctx;
1349 struct throtl_grp *tg; 1349 struct throtl_grp *tg;
1350 struct throtl_service_queue *sq; 1350 struct throtl_service_queue *sq;
1351 struct blkcg_gq *blkg; 1351 struct blkcg_gq *blkg;
1352 struct cgroup *pos_cgrp; 1352 struct cgroup_subsys_state *pos_css;
1353 int ret; 1353 int ret;
1354 1354
1355 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); 1355 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
@@ -1379,8 +1379,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
1379 * restrictions in the whole hierarchy and allows them to bypass 1379 * restrictions in the whole hierarchy and allows them to bypass
1380 * blk-throttle. 1380 * blk-throttle.
1381 */ 1381 */
1382 tg_update_has_rules(tg); 1382 blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
1383 blkg_for_each_descendant_pre(blkg, pos_cgrp, ctx.blkg)
1384 tg_update_has_rules(blkg_to_tg(blkg)); 1383 tg_update_has_rules(blkg_to_tg(blkg));
1385 1384
1386 /* 1385 /*
@@ -1403,16 +1402,16 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
1403 return 0; 1402 return 0;
1404} 1403}
1405 1404
1406static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft, 1405static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft,
1407 const char *buf) 1406 const char *buf)
1408{ 1407{
1409 return tg_set_conf(cgrp, cft, buf, true); 1408 return tg_set_conf(css, cft, buf, true);
1410} 1409}
1411 1410
1412static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft, 1411static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft,
1413 const char *buf) 1412 const char *buf)
1414{ 1413{
1415 return tg_set_conf(cgrp, cft, buf, false); 1414 return tg_set_conf(css, cft, buf, false);
1416} 1415}
1417 1416
1418static struct cftype throtl_files[] = { 1417static struct cftype throtl_files[] = {
@@ -1623,7 +1622,7 @@ void blk_throtl_drain(struct request_queue *q)
1623{ 1622{
1624 struct throtl_data *td = q->td; 1623 struct throtl_data *td = q->td;
1625 struct blkcg_gq *blkg; 1624 struct blkcg_gq *blkg;
1626 struct cgroup *pos_cgrp; 1625 struct cgroup_subsys_state *pos_css;
1627 struct bio *bio; 1626 struct bio *bio;
1628 int rw; 1627 int rw;
1629 1628
@@ -1636,11 +1635,9 @@ void blk_throtl_drain(struct request_queue *q)
1636 * better to walk service_queue tree directly but blkg walk is 1635 * better to walk service_queue tree directly but blkg walk is
1637 * easier. 1636 * easier.
1638 */ 1637 */
1639 blkg_for_each_descendant_post(blkg, pos_cgrp, td->queue->root_blkg) 1638 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
1640 tg_drain_bios(&blkg_to_tg(blkg)->service_queue); 1639 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
1641 1640
1642 tg_drain_bios(&td_root_tg(td)->service_queue);
1643
1644 /* finally, transfer bios from top-level tg's into the td */ 1641 /* finally, transfer bios from top-level tg's into the td */
1645 tg_drain_bios(&td->service_queue); 1642 tg_drain_bios(&td->service_queue);
1646 1643
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d5bbdcfd0dab..dabb9d02cf9a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1607,12 +1607,11 @@ static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1607 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight); 1607 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1608} 1608}
1609 1609
1610static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft, 1610static int cfqg_print_weight_device(struct cgroup_subsys_state *css,
1611 struct seq_file *sf) 1611 struct cftype *cft, struct seq_file *sf)
1612{ 1612{
1613 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), 1613 blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_weight_device,
1614 cfqg_prfill_weight_device, &blkcg_policy_cfq, 0, 1614 &blkcg_policy_cfq, 0, false);
1615 false);
1616 return 0; 1615 return 0;
1617} 1616}
1618 1617
@@ -1626,35 +1625,34 @@ static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1626 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight); 1625 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1627} 1626}
1628 1627
1629static int cfqg_print_leaf_weight_device(struct cgroup *cgrp, 1628static int cfqg_print_leaf_weight_device(struct cgroup_subsys_state *css,
1630 struct cftype *cft, 1629 struct cftype *cft,
1631 struct seq_file *sf) 1630 struct seq_file *sf)
1632{ 1631{
1633 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), 1632 blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_leaf_weight_device,
1634 cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0, 1633 &blkcg_policy_cfq, 0, false);
1635 false);
1636 return 0; 1634 return 0;
1637} 1635}
1638 1636
1639static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft, 1637static int cfq_print_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1640 struct seq_file *sf) 1638 struct seq_file *sf)
1641{ 1639{
1642 seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight); 1640 seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_weight);
1643 return 0; 1641 return 0;
1644} 1642}
1645 1643
1646static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft, 1644static int cfq_print_leaf_weight(struct cgroup_subsys_state *css,
1647 struct seq_file *sf) 1645 struct cftype *cft, struct seq_file *sf)
1648{ 1646{
1649 seq_printf(sf, "%u\n", 1647 seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_leaf_weight);
1650 cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
1651 return 0; 1648 return 0;
1652} 1649}
1653 1650
1654static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, 1651static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
1655 const char *buf, bool is_leaf_weight) 1652 struct cftype *cft, const char *buf,
1653 bool is_leaf_weight)
1656{ 1654{
1657 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1655 struct blkcg *blkcg = css_to_blkcg(css);
1658 struct blkg_conf_ctx ctx; 1656 struct blkg_conf_ctx ctx;
1659 struct cfq_group *cfqg; 1657 struct cfq_group *cfqg;
1660 int ret; 1658 int ret;
@@ -1680,22 +1678,22 @@ static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1680 return ret; 1678 return ret;
1681} 1679}
1682 1680
1683static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, 1681static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
1684 const char *buf) 1682 struct cftype *cft, const char *buf)
1685{ 1683{
1686 return __cfqg_set_weight_device(cgrp, cft, buf, false); 1684 return __cfqg_set_weight_device(css, cft, buf, false);
1687} 1685}
1688 1686
1689static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft, 1687static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
1690 const char *buf) 1688 struct cftype *cft, const char *buf)
1691{ 1689{
1692 return __cfqg_set_weight_device(cgrp, cft, buf, true); 1690 return __cfqg_set_weight_device(css, cft, buf, true);
1693} 1691}
1694 1692
1695static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val, 1693static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1696 bool is_leaf_weight) 1694 u64 val, bool is_leaf_weight)
1697{ 1695{
1698 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1696 struct blkcg *blkcg = css_to_blkcg(css);
1699 struct blkcg_gq *blkg; 1697 struct blkcg_gq *blkg;
1700 1698
1701 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) 1699 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
@@ -1727,30 +1725,32 @@ static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
1727 return 0; 1725 return 0;
1728} 1726}
1729 1727
1730static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) 1728static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1729 u64 val)
1731{ 1730{
1732 return __cfq_set_weight(cgrp, cft, val, false); 1731 return __cfq_set_weight(css, cft, val, false);
1733} 1732}
1734 1733
1735static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) 1734static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1735 struct cftype *cft, u64 val)
1736{ 1736{
1737 return __cfq_set_weight(cgrp, cft, val, true); 1737 return __cfq_set_weight(css, cft, val, true);
1738} 1738}
1739 1739
1740static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft, 1740static int cfqg_print_stat(struct cgroup_subsys_state *css, struct cftype *cft,
1741 struct seq_file *sf) 1741 struct seq_file *sf)
1742{ 1742{
1743 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1743 struct blkcg *blkcg = css_to_blkcg(css);
1744 1744
1745 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq, 1745 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
1746 cft->private, false); 1746 cft->private, false);
1747 return 0; 1747 return 0;
1748} 1748}
1749 1749
1750static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft, 1750static int cfqg_print_rwstat(struct cgroup_subsys_state *css,
1751 struct seq_file *sf) 1751 struct cftype *cft, struct seq_file *sf)
1752{ 1752{
1753 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1753 struct blkcg *blkcg = css_to_blkcg(css);
1754 1754
1755 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq, 1755 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
1756 cft->private, true); 1756 cft->private, true);
@@ -1773,20 +1773,20 @@ static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1773 return __blkg_prfill_rwstat(sf, pd, &sum); 1773 return __blkg_prfill_rwstat(sf, pd, &sum);
1774} 1774}
1775 1775
1776static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft, 1776static int cfqg_print_stat_recursive(struct cgroup_subsys_state *css,
1777 struct seq_file *sf) 1777 struct cftype *cft, struct seq_file *sf)
1778{ 1778{
1779 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1779 struct blkcg *blkcg = css_to_blkcg(css);
1780 1780
1781 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive, 1781 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
1782 &blkcg_policy_cfq, cft->private, false); 1782 &blkcg_policy_cfq, cft->private, false);
1783 return 0; 1783 return 0;
1784} 1784}
1785 1785
1786static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft, 1786static int cfqg_print_rwstat_recursive(struct cgroup_subsys_state *css,
1787 struct seq_file *sf) 1787 struct cftype *cft, struct seq_file *sf)
1788{ 1788{
1789 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1789 struct blkcg *blkcg = css_to_blkcg(css);
1790 1790
1791 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive, 1791 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
1792 &blkcg_policy_cfq, cft->private, true); 1792 &blkcg_policy_cfq, cft->private, true);
@@ -1810,10 +1810,10 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1810} 1810}
1811 1811
1812/* print avg_queue_size */ 1812/* print avg_queue_size */
1813static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft, 1813static int cfqg_print_avg_queue_size(struct cgroup_subsys_state *css,
1814 struct seq_file *sf) 1814 struct cftype *cft, struct seq_file *sf)
1815{ 1815{
1816 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1816 struct blkcg *blkcg = css_to_blkcg(css);
1817 1817
1818 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size, 1818 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
1819 &blkcg_policy_cfq, 0, false); 1819 &blkcg_policy_cfq, 0, false);