aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-09 11:05:11 -0500
committerTejun Heo <tj@kernel.org>2013-01-09 11:05:11 -0500
commit41cad6ab2cb9ccb3b11546ad56b8b285e47c6279 (patch)
treec6308080ab464a5f7a5da52035e948431061949a
parent1d3650f713e7f6392b02fde450c5bae40291e65b (diff)
cfq-iosched: convert cfq_group_slice() to use cfqg->vfraction
cfq_group_slice() calculates slice by taking a fraction of cfq_target_latency according to the ratio of cfqg->weight against service_tree->total_weight. This currently works only because all cfqgs are treated to be at the same level. To prepare for proper hierarchy support, convert cfq_group_slice() to base the calculation on cfqg->vfraction. As cfqg->vfraction is always a fraction of 1 and represents the fraction allocated to the cfqg with hierarchy considered, the slice can be simply calculated by multiplying cfqg->vfraction to cfq_target_latency (with fixed point shift factored in). As vfraction calculation currently treats all non-root cfqgs as children of the root cfqg, this patch doesn't introduce noticeable behavior difference. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
-rw-r--r--block/cfq-iosched.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index b24acf66d5b5..ee342826fd98 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -85,7 +85,6 @@ struct cfq_rb_root {
85 struct rb_root rb; 85 struct rb_root rb;
86 struct rb_node *left; 86 struct rb_node *left;
87 unsigned count; 87 unsigned count;
88 unsigned total_weight;
89 u64 min_vdisktime; 88 u64 min_vdisktime;
90 struct cfq_ttime ttime; 89 struct cfq_ttime ttime;
91}; 90};
@@ -979,9 +978,7 @@ static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
979static inline unsigned 978static inline unsigned
980cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) 979cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
981{ 980{
982 struct cfq_rb_root *st = &cfqd->grp_service_tree; 981 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
983
984 return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
985} 982}
986 983
987static inline unsigned 984static inline unsigned
@@ -1273,7 +1270,6 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1273 1270
1274 cfq_update_group_weight(cfqg); 1271 cfq_update_group_weight(cfqg);
1275 __cfq_group_service_tree_add(st, cfqg); 1272 __cfq_group_service_tree_add(st, cfqg);
1276 st->total_weight += cfqg->weight;
1277 1273
1278 /* 1274 /*
1279 * Activate @cfqg and calculate the portion of vfraction @cfqg is 1275 * Activate @cfqg and calculate the portion of vfraction @cfqg is
@@ -1360,7 +1356,6 @@ cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1360 } 1356 }
1361 1357
1362 /* remove from the service tree */ 1358 /* remove from the service tree */
1363 st->total_weight -= cfqg->weight;
1364 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 1359 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1365 cfq_rb_erase(&cfqg->rb_node, st); 1360 cfq_rb_erase(&cfqg->rb_node, st);
1366} 1361}