diff options
author | Tejun Heo <tj@kernel.org> | 2015-08-18 17:55:22 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-08-18 18:49:17 -0400 |
commit | 24bdb8ef068ebdc2a57ce715f0ab22d5da32832a (patch) | |
tree | 466def9b935d1a40ce63cd07607e4a7be551f6bd /block/cfq-iosched.c | |
parent | e6269c44546755094979ab53609e6e203a68c8ff (diff) |
blkcg: make blkcg_[rw]stat per-cpu
blkcg_[rw]stat are used as stat counters for blkcg policies. It isn't
per-cpu by itself and blk-throttle makes it per-cpu by wrapping around
it. This patch makes blkcg_[rw]stat per-cpu and drop the ad-hoc
per-cpu wrapping in blk-throttle.
* blkg_[rw]stat->cnt is replaced with cpu_cnt which is struct
percpu_counter. This makes syncp unnecessary as remote accesses are
handled by percpu_counter itself.
* blkg_[rw]stat_init() can now fail due to percpu allocation failure
and thus are updated to return int.
* percpu_counters need explicit freeing. blkg_[rw]stat_exit() added.
* As blkg_rwstat->cpu_cnt[] can't be read directly anymore, reading
and summing results are stored in ->aux_cnt[] instead.
* Custom per-cpu stat implementation in blk-throttle is removed.
This makes all blkcg stat counters per-cpu without complicating policy
implmentations.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 70 |
1 files changed, 52 insertions, 18 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index b272cfff7364..71e55c91ee98 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1542,27 +1542,55 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg) | |||
1542 | } | 1542 | } |
1543 | 1543 | ||
1544 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | 1544 | #ifdef CONFIG_CFQ_GROUP_IOSCHED |
1545 | static void cfqg_stats_init(struct cfqg_stats *stats) | 1545 | static void cfqg_stats_exit(struct cfqg_stats *stats) |
1546 | { | 1546 | { |
1547 | blkg_rwstat_init(&stats->service_bytes); | 1547 | blkg_rwstat_exit(&stats->service_bytes); |
1548 | blkg_rwstat_init(&stats->serviced); | 1548 | blkg_rwstat_exit(&stats->serviced); |
1549 | blkg_rwstat_init(&stats->merged); | 1549 | blkg_rwstat_exit(&stats->merged); |
1550 | blkg_rwstat_init(&stats->service_time); | 1550 | blkg_rwstat_exit(&stats->service_time); |
1551 | blkg_rwstat_init(&stats->wait_time); | 1551 | blkg_rwstat_exit(&stats->wait_time); |
1552 | blkg_rwstat_init(&stats->queued); | 1552 | blkg_rwstat_exit(&stats->queued); |
1553 | 1553 | ||
1554 | blkg_stat_init(&stats->sectors); | 1554 | blkg_stat_exit(&stats->sectors); |
1555 | blkg_stat_init(&stats->time); | 1555 | blkg_stat_exit(&stats->time); |
1556 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
1557 | blkg_stat_exit(&stats->unaccounted_time); | ||
1558 | blkg_stat_exit(&stats->avg_queue_size_sum); | ||
1559 | blkg_stat_exit(&stats->avg_queue_size_samples); | ||
1560 | blkg_stat_exit(&stats->dequeue); | ||
1561 | blkg_stat_exit(&stats->group_wait_time); | ||
1562 | blkg_stat_exit(&stats->idle_time); | ||
1563 | blkg_stat_exit(&stats->empty_time); | ||
1564 | #endif | ||
1565 | } | ||
1566 | |||
1567 | static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp) | ||
1568 | { | ||
1569 | if (blkg_rwstat_init(&stats->service_bytes, gfp) || | ||
1570 | blkg_rwstat_init(&stats->serviced, gfp) || | ||
1571 | blkg_rwstat_init(&stats->merged, gfp) || | ||
1572 | blkg_rwstat_init(&stats->service_time, gfp) || | ||
1573 | blkg_rwstat_init(&stats->wait_time, gfp) || | ||
1574 | blkg_rwstat_init(&stats->queued, gfp) || | ||
1575 | |||
1576 | blkg_stat_init(&stats->sectors, gfp) || | ||
1577 | blkg_stat_init(&stats->time, gfp)) | ||
1578 | goto err; | ||
1556 | 1579 | ||
1557 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 1580 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1558 | blkg_stat_init(&stats->unaccounted_time); | 1581 | if (blkg_stat_init(&stats->unaccounted_time, gfp) || |
1559 | blkg_stat_init(&stats->avg_queue_size_sum); | 1582 | blkg_stat_init(&stats->avg_queue_size_sum, gfp) || |
1560 | blkg_stat_init(&stats->avg_queue_size_samples); | 1583 | blkg_stat_init(&stats->avg_queue_size_samples, gfp) || |
1561 | blkg_stat_init(&stats->dequeue); | 1584 | blkg_stat_init(&stats->dequeue, gfp) || |
1562 | blkg_stat_init(&stats->group_wait_time); | 1585 | blkg_stat_init(&stats->group_wait_time, gfp) || |
1563 | blkg_stat_init(&stats->idle_time); | 1586 | blkg_stat_init(&stats->idle_time, gfp) || |
1564 | blkg_stat_init(&stats->empty_time); | 1587 | blkg_stat_init(&stats->empty_time, gfp)) |
1588 | goto err; | ||
1565 | #endif | 1589 | #endif |
1590 | return 0; | ||
1591 | err: | ||
1592 | cfqg_stats_exit(stats); | ||
1593 | return -ENOMEM; | ||
1566 | } | 1594 | } |
1567 | 1595 | ||
1568 | static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp) | 1596 | static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp) |
@@ -1602,7 +1630,10 @@ static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node) | |||
1602 | return NULL; | 1630 | return NULL; |
1603 | 1631 | ||
1604 | cfq_init_cfqg_base(cfqg); | 1632 | cfq_init_cfqg_base(cfqg); |
1605 | cfqg_stats_init(&cfqg->stats); | 1633 | if (cfqg_stats_init(&cfqg->stats, gfp)) { |
1634 | kfree(cfqg); | ||
1635 | return NULL; | ||
1636 | } | ||
1606 | 1637 | ||
1607 | return &cfqg->pd; | 1638 | return &cfqg->pd; |
1608 | } | 1639 | } |
@@ -1642,7 +1673,10 @@ static void cfq_pd_offline(struct blkg_policy_data *pd) | |||
1642 | 1673 | ||
1643 | static void cfq_pd_free(struct blkg_policy_data *pd) | 1674 | static void cfq_pd_free(struct blkg_policy_data *pd) |
1644 | { | 1675 | { |
1645 | return kfree(pd); | 1676 | struct cfq_group *cfqg = pd_to_cfqg(pd); |
1677 | |||
1678 | cfqg_stats_exit(&cfqg->stats); | ||
1679 | return kfree(cfqg); | ||
1646 | } | 1680 | } |
1647 | 1681 | ||
1648 | static void cfq_pd_reset_stats(struct blkg_policy_data *pd) | 1682 | static void cfq_pd_reset_stats(struct blkg_policy_data *pd) |