aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-08-18 17:55:14 -0400
committerJens Axboe <axboe@fb.com>2015-08-18 18:49:17 -0400
commita9520cd6f2ac1fbbf206b915946534c6dddbaae2 (patch)
tree906dbb619ff1c078486a2fad514d3427aeec6250 /block/blk-throttle.c
parentb2ce2643cc705aa9043642d7b6248ccfd8e20629 (diff)
blkcg: make blkcg_policy methods take a pointer to blkcg_policy_data
The newly added ->pd_alloc_fn() and ->pd_free_fn() deal with pd (blkg_policy_data) while the older ones use blkg (blkcg_gq). As using blkg doesn't make sense for ->pd_alloc_fn() and after allocation pd can always be mapped to blkg and given that these are policy-specific methods, it makes sense to converge on pd. This patch makes all methods deal with pd instead of blkg. Most conversions are trivial. In blk-cgroup.c, a couple method invocation sites now test whether pd exists instead of policy state for consistency. This shouldn't cause any behavioral differences. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index c3a235b8ec7e..c2c75477a6b2 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -377,9 +377,10 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
377 return &tg->pd; 377 return &tg->pd;
378} 378}
379 379
380static void throtl_pd_init(struct blkcg_gq *blkg) 380static void throtl_pd_init(struct blkg_policy_data *pd)
381{ 381{
382 struct throtl_grp *tg = blkg_to_tg(blkg); 382 struct throtl_grp *tg = pd_to_tg(pd);
383 struct blkcg_gq *blkg = tg_to_blkg(tg);
383 struct throtl_data *td = blkg->q->td; 384 struct throtl_data *td = blkg->q->td;
384 struct throtl_service_queue *sq = &tg->service_queue; 385 struct throtl_service_queue *sq = &tg->service_queue;
385 386
@@ -417,13 +418,13 @@ static void tg_update_has_rules(struct throtl_grp *tg)
417 (tg->bps[rw] != -1 || tg->iops[rw] != -1); 418 (tg->bps[rw] != -1 || tg->iops[rw] != -1);
418} 419}
419 420
420static void throtl_pd_online(struct blkcg_gq *blkg) 421static void throtl_pd_online(struct blkg_policy_data *pd)
421{ 422{
422 /* 423 /*
423 * We don't want new groups to escape the limits of its ancestors. 424 * We don't want new groups to escape the limits of its ancestors.
424 * Update has_rules[] after a new group is brought online. 425 * Update has_rules[] after a new group is brought online.
425 */ 426 */
426 tg_update_has_rules(blkg_to_tg(blkg)); 427 tg_update_has_rules(pd_to_tg(pd));
427} 428}
428 429
429static void throtl_pd_free(struct blkg_policy_data *pd) 430static void throtl_pd_free(struct blkg_policy_data *pd)
@@ -435,9 +436,9 @@ static void throtl_pd_free(struct blkg_policy_data *pd)
435 kfree(tg); 436 kfree(tg);
436} 437}
437 438
438static void throtl_pd_reset_stats(struct blkcg_gq *blkg) 439static void throtl_pd_reset_stats(struct blkg_policy_data *pd)
439{ 440{
440 struct throtl_grp *tg = blkg_to_tg(blkg); 441 struct throtl_grp *tg = pd_to_tg(pd);
441 int cpu; 442 int cpu;
442 443
443 for_each_possible_cpu(cpu) { 444 for_each_possible_cpu(cpu) {