summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-08-18 17:55:13 -0400
committerJens Axboe <axboe@fb.com>2015-08-18 18:49:17 -0400
commitb2ce2643cc705aa9043642d7b6248ccfd8e20629 (patch)
tree5a759c516fc27535b5f429b1cd369fd39764880e
parent4fb72036fbf9c28de7a64b1d3f19b4ce9da1c6bf (diff)
blk-throttle: clean up blkg_policy_data alloc/init/exit/free methods
With the recent addition of alloc and free methods, things became messier. This patch reorganizes them according to the followings. * ->pd_alloc_fn() Responsible for allocation and static initializations - the ones which can be done independent of where the pd might be attached. * ->pd_init_fn() Initializations which require the knowledge of where the pd is attached. * ->pd_free_fn() The counter part of pd_alloc_fn(). Static de-init and freeing. This leaves ->pd_exit_fn() without any users. Removed. While at it, collapse an one liner function throtl_pd_exit(), which has only one user, into its user. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-cgroup.c11
-rw-r--r--block/blk-throttle.c57
-rw-r--r--block/cfq-iosched.c15
-rw-r--r--include/linux/blk-cgroup.h2
4 files changed, 31 insertions, 54 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index d1bc6099bd1e..acfb09af58a5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -402,15 +402,6 @@ static void blkg_destroy_all(struct request_queue *q)
402void __blkg_release_rcu(struct rcu_head *rcu_head) 402void __blkg_release_rcu(struct rcu_head *rcu_head)
403{ 403{
404 struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); 404 struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
405 int i;
406
407 /* tell policies that this one is being freed */
408 for (i = 0; i < BLKCG_MAX_POLS; i++) {
409 struct blkcg_policy *pol = blkcg_policy[i];
410
411 if (blkg->pd[i] && pol->pd_exit_fn)
412 pol->pd_exit_fn(blkg);
413 }
414 405
415 /* release the blkcg and parent blkg refs this blkg has been holding */ 406 /* release the blkcg and parent blkg refs this blkg has been holding */
416 css_put(&blkg->blkcg->css); 407 css_put(&blkg->blkcg->css);
@@ -1127,8 +1118,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
1127 1118
1128 if (pol->pd_offline_fn) 1119 if (pol->pd_offline_fn)
1129 pol->pd_offline_fn(blkg); 1120 pol->pd_offline_fn(blkg);
1130 if (pol->pd_exit_fn)
1131 pol->pd_exit_fn(blkg);
1132 1121
1133 if (blkg->pd[pol->plid]) { 1122 if (blkg->pd[pol->plid]) {
1134 pol->pd_free_fn(blkg->pd[pol->plid]); 1123 pol->pd_free_fn(blkg->pd[pol->plid]);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 3c869768cfdd..c3a235b8ec7e 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -330,26 +330,19 @@ static struct bio *throtl_pop_queued(struct list_head *queued,
330} 330}
331 331
332/* init a service_queue, assumes the caller zeroed it */ 332/* init a service_queue, assumes the caller zeroed it */
333static void throtl_service_queue_init(struct throtl_service_queue *sq, 333static void throtl_service_queue_init(struct throtl_service_queue *sq)
334 struct throtl_service_queue *parent_sq)
335{ 334{
336 INIT_LIST_HEAD(&sq->queued[0]); 335 INIT_LIST_HEAD(&sq->queued[0]);
337 INIT_LIST_HEAD(&sq->queued[1]); 336 INIT_LIST_HEAD(&sq->queued[1]);
338 sq->pending_tree = RB_ROOT; 337 sq->pending_tree = RB_ROOT;
339 sq->parent_sq = parent_sq;
340 setup_timer(&sq->pending_timer, throtl_pending_timer_fn, 338 setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
341 (unsigned long)sq); 339 (unsigned long)sq);
342} 340}
343 341
344static void throtl_service_queue_exit(struct throtl_service_queue *sq)
345{
346 del_timer_sync(&sq->pending_timer);
347}
348
349static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node) 342static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
350{ 343{
351 struct throtl_grp *tg; 344 struct throtl_grp *tg;
352 int cpu; 345 int rw, cpu;
353 346
354 tg = kzalloc_node(sizeof(*tg), gfp, node); 347 tg = kzalloc_node(sizeof(*tg), gfp, node);
355 if (!tg) 348 if (!tg)
@@ -361,6 +354,19 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
361 return NULL; 354 return NULL;
362 } 355 }
363 356
357 throtl_service_queue_init(&tg->service_queue);
358
359 for (rw = READ; rw <= WRITE; rw++) {
360 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
361 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
362 }
363
364 RB_CLEAR_NODE(&tg->rb_node);
365 tg->bps[READ] = -1;
366 tg->bps[WRITE] = -1;
367 tg->iops[READ] = -1;
368 tg->iops[WRITE] = -1;
369
364 for_each_possible_cpu(cpu) { 370 for_each_possible_cpu(cpu) {
365 struct tg_stats_cpu *stats_cpu = per_cpu_ptr(tg->stats_cpu, cpu); 371 struct tg_stats_cpu *stats_cpu = per_cpu_ptr(tg->stats_cpu, cpu);
366 372
@@ -375,8 +381,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
375{ 381{
376 struct throtl_grp *tg = blkg_to_tg(blkg); 382 struct throtl_grp *tg = blkg_to_tg(blkg);
377 struct throtl_data *td = blkg->q->td; 383 struct throtl_data *td = blkg->q->td;
378 struct throtl_service_queue *parent_sq; 384 struct throtl_service_queue *sq = &tg->service_queue;
379 int rw;
380 385
381 /* 386 /*
382 * If on the default hierarchy, we switch to properly hierarchical 387 * If on the default hierarchy, we switch to properly hierarchical
@@ -391,25 +396,10 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
391 * Limits of a group don't interact with limits of other groups 396 * Limits of a group don't interact with limits of other groups
392 * regardless of the position of the group in the hierarchy. 397 * regardless of the position of the group in the hierarchy.
393 */ 398 */
394 parent_sq = &td->service_queue; 399 sq->parent_sq = &td->service_queue;
395
396 if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent) 400 if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent)
397 parent_sq = &blkg_to_tg(blkg->parent)->service_queue; 401 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
398
399 throtl_service_queue_init(&tg->service_queue, parent_sq);
400
401 for (rw = READ; rw <= WRITE; rw++) {
402 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
403 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
404 }
405
406 RB_CLEAR_NODE(&tg->rb_node);
407 tg->td = td; 402 tg->td = td;
408
409 tg->bps[READ] = -1;
410 tg->bps[WRITE] = -1;
411 tg->iops[READ] = -1;
412 tg->iops[WRITE] = -1;
413} 403}
414 404
415/* 405/*
@@ -436,17 +426,11 @@ static void throtl_pd_online(struct blkcg_gq *blkg)
436 tg_update_has_rules(blkg_to_tg(blkg)); 426 tg_update_has_rules(blkg_to_tg(blkg));
437} 427}
438 428
439static void throtl_pd_exit(struct blkcg_gq *blkg)
440{
441 struct throtl_grp *tg = blkg_to_tg(blkg);
442
443 throtl_service_queue_exit(&tg->service_queue);
444}
445
446static void throtl_pd_free(struct blkg_policy_data *pd) 429static void throtl_pd_free(struct blkg_policy_data *pd)
447{ 430{
448 struct throtl_grp *tg = pd_to_tg(pd); 431 struct throtl_grp *tg = pd_to_tg(pd);
449 432
433 del_timer_sync(&tg->service_queue.pending_timer);
450 free_percpu(tg->stats_cpu); 434 free_percpu(tg->stats_cpu);
451 kfree(tg); 435 kfree(tg);
452} 436}
@@ -1421,7 +1405,6 @@ static struct blkcg_policy blkcg_policy_throtl = {
1421 .pd_alloc_fn = throtl_pd_alloc, 1405 .pd_alloc_fn = throtl_pd_alloc,
1422 .pd_init_fn = throtl_pd_init, 1406 .pd_init_fn = throtl_pd_init,
1423 .pd_online_fn = throtl_pd_online, 1407 .pd_online_fn = throtl_pd_online,
1424 .pd_exit_fn = throtl_pd_exit,
1425 .pd_free_fn = throtl_pd_free, 1408 .pd_free_fn = throtl_pd_free,
1426 .pd_reset_stats_fn = throtl_pd_reset_stats, 1409 .pd_reset_stats_fn = throtl_pd_reset_stats,
1427}; 1410};
@@ -1616,7 +1599,7 @@ int blk_throtl_init(struct request_queue *q)
1616 return -ENOMEM; 1599 return -ENOMEM;
1617 1600
1618 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); 1601 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1619 throtl_service_queue_init(&td->service_queue, NULL); 1602 throtl_service_queue_init(&td->service_queue);
1620 1603
1621 q->td = td; 1604 q->td = td;
1622 td->queue = q; 1605 td->queue = q;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 69ce2883099e..4b795c7250ef 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1584,7 +1584,17 @@ static void cfq_cpd_init(const struct blkcg *blkcg)
1584 1584
1585static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node) 1585static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1586{ 1586{
1587 return kzalloc_node(sizeof(struct cfq_group), gfp, node); 1587 struct cfq_group *cfqg;
1588
1589 cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
1590 if (!cfqg)
1591 return NULL;
1592
1593 cfq_init_cfqg_base(cfqg);
1594 cfqg_stats_init(&cfqg->stats);
1595 cfqg_stats_init(&cfqg->dead_stats);
1596
1597 return &cfqg->pd;
1588} 1598}
1589 1599
1590static void cfq_pd_init(struct blkcg_gq *blkg) 1600static void cfq_pd_init(struct blkcg_gq *blkg)
@@ -1592,11 +1602,8 @@ static void cfq_pd_init(struct blkcg_gq *blkg)
1592 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1602 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1593 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkg->blkcg); 1603 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkg->blkcg);
1594 1604
1595 cfq_init_cfqg_base(cfqg);
1596 cfqg->weight = cgd->weight; 1605 cfqg->weight = cgd->weight;
1597 cfqg->leaf_weight = cgd->leaf_weight; 1606 cfqg->leaf_weight = cgd->leaf_weight;
1598 cfqg_stats_init(&cfqg->stats);
1599 cfqg_stats_init(&cfqg->dead_stats);
1600} 1607}
1601 1608
1602static void cfq_pd_offline(struct blkcg_gq *blkg) 1609static void cfq_pd_offline(struct blkcg_gq *blkg)
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index bd173ea360ce..9879469b1b38 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -128,7 +128,6 @@ typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
128typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg); 128typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
129typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg); 129typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
130typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg); 130typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
131typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
132typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); 131typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
133typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg); 132typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
134 133
@@ -145,7 +144,6 @@ struct blkcg_policy {
145 blkcg_pol_init_pd_fn *pd_init_fn; 144 blkcg_pol_init_pd_fn *pd_init_fn;
146 blkcg_pol_online_pd_fn *pd_online_fn; 145 blkcg_pol_online_pd_fn *pd_online_fn;
147 blkcg_pol_offline_pd_fn *pd_offline_fn; 146 blkcg_pol_offline_pd_fn *pd_offline_fn;
148 blkcg_pol_exit_pd_fn *pd_exit_fn;
149 blkcg_pol_free_pd_fn *pd_free_fn; 147 blkcg_pol_free_pd_fn *pd_free_fn;
150 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 148 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
151}; 149};