aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2011-05-19 15:38:28 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-05-20 14:34:52 -0400
commit5624a4e445e2ec27582984b068d7bf7f127cee10 (patch)
tree0827c83c6e5f5fa83bd0dadc1bc395c0f0657dae /block/cfq-iosched.c
parent4843c69d496a8d2e4caab6182fe016b9a79136e0 (diff)
blk-throttle: Make dispatch stats per cpu
Currently we take blkg_stat lock for even updating the stats. So even if a group has no throttling rules (common case for root group), we end up taking blkg_lock, for updating the stats. Make dispatch stats per cpu so that these can be updated without taking blkg lock. If cpu goes offline, these stats simply disappear. No protection has been provided for that yet. Do we really need anything for that? Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 606020fe93f3..d646b279c8bb 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1051,7 +1051,7 @@ static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
1051static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd) 1051static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
1052{ 1052{
1053 struct cfq_group *cfqg = NULL; 1053 struct cfq_group *cfqg = NULL;
1054 int i, j; 1054 int i, j, ret;
1055 struct cfq_rb_root *st; 1055 struct cfq_rb_root *st;
1056 1056
1057 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); 1057 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
@@ -1069,6 +1069,13 @@ static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
1069 * or cgroup deletion path depending on who is exiting first. 1069 * or cgroup deletion path depending on who is exiting first.
1070 */ 1070 */
1071 cfqg->ref = 1; 1071 cfqg->ref = 1;
1072
1073 ret = blkio_alloc_blkg_stats(&cfqg->blkg);
1074 if (ret) {
1075 kfree(cfqg);
1076 return NULL;
1077 }
1078
1072 return cfqg; 1079 return cfqg;
1073} 1080}
1074 1081
@@ -1183,6 +1190,7 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
1183 return; 1190 return;
1184 for_each_cfqg_st(cfqg, i, j, st) 1191 for_each_cfqg_st(cfqg, i, j, st)
1185 BUG_ON(!RB_EMPTY_ROOT(&st->rb)); 1192 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1193 free_percpu(cfqg->blkg.stats_cpu);
1186 kfree(cfqg); 1194 kfree(cfqg);
1187} 1195}
1188 1196
@@ -3995,7 +4003,15 @@ static void *cfq_init_queue(struct request_queue *q)
3995 * throtl_data goes away. 4003 * throtl_data goes away.
3996 */ 4004 */
3997 cfqg->ref = 2; 4005 cfqg->ref = 2;
4006
4007 if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
4008 kfree(cfqg);
4009 kfree(cfqd);
4010 return NULL;
4011 }
4012
3998 rcu_read_lock(); 4013 rcu_read_lock();
4014
3999 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, 4015 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
4000 (void *)cfqd, 0); 4016 (void *)cfqd, 0);
4001 rcu_read_unlock(); 4017 rcu_read_unlock();