diff options
author | Vivek Goyal <vgoyal@redhat.com> | 2011-05-19 15:38:28 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-05-20 14:34:52 -0400 |
commit | 5624a4e445e2ec27582984b068d7bf7f127cee10 (patch) | |
tree | 0827c83c6e5f5fa83bd0dadc1bc395c0f0657dae /block/blk-throttle.c | |
parent | 4843c69d496a8d2e4caab6182fe016b9a79136e0 (diff) |
blk-throttle: Make dispatch stats per cpu
Currently we take blkg_stat lock for even updating the stats. So even if
a group has no throttling rules (common case for root group), we end
up taking blkg_lock, for updating the stats.
Make dispatch stats per cpu so that these can be updated without taking
blkg lock.
If cpu goes offline, these stats simply disappear. No protection has
been provided for that yet. Do we really need anything for that?
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 9 |
1 files changed, 9 insertions, 0 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 90ad40735f73..c29a5a8cc18c 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -158,6 +158,7 @@ static void throtl_free_tg(struct rcu_head *head) | |||
158 | struct throtl_grp *tg; | 158 | struct throtl_grp *tg; |
159 | 159 | ||
160 | tg = container_of(head, struct throtl_grp, rcu_head); | 160 | tg = container_of(head, struct throtl_grp, rcu_head); |
161 | free_percpu(tg->blkg.stats_cpu); | ||
161 | kfree(tg); | 162 | kfree(tg); |
162 | } | 163 | } |
163 | 164 | ||
@@ -249,11 +250,19 @@ static void throtl_init_add_tg_lists(struct throtl_data *td, | |||
249 | static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td) | 250 | static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td) |
250 | { | 251 | { |
251 | struct throtl_grp *tg = NULL; | 252 | struct throtl_grp *tg = NULL; |
253 | int ret; | ||
252 | 254 | ||
253 | tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node); | 255 | tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node); |
254 | if (!tg) | 256 | if (!tg) |
255 | return NULL; | 257 | return NULL; |
256 | 258 | ||
259 | ret = blkio_alloc_blkg_stats(&tg->blkg); | ||
260 | |||
261 | if (ret) { | ||
262 | kfree(tg); | ||
263 | return NULL; | ||
264 | } | ||
265 | |||
257 | throtl_init_group(tg); | 266 | throtl_init_group(tg); |
258 | return tg; | 267 | return tg; |
259 | } | 268 | } |