aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2009-12-03 12:59:54 -0500
committerJens Axboe <jens.axboe@oracle.com>2009-12-03 13:28:53 -0500
commitf26bd1f0a3a31bc5e16d285f5e1b00a56abf6238 (patch)
tree8e49302c146eacdd1cfbe78a6bd3aad2b81c3050 /block/cfq-iosched.c
parentf75edf2dc828802d358393be80a6c89e919f8273 (diff)
blkio: Determine async workload length based on total number of queues
o Async queues are not per group. Instead these are system wide and maintained in root group. Hence their workload slice length should be calculated based on total number of queues in the system and not just queues in the root group. o As root group's default weight is 1000, make sure to charge async queue more in terms of vtime so that it does not get more time on disk because root group has higher weight. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c36
1 files changed, 31 insertions, 5 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1cc10489eaf0..b9e483d9031e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -408,6 +408,13 @@ static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
408 + cfqg->service_trees[wl][SYNC_WORKLOAD].count; 408 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
409} 409}
410 410
411static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
412 struct cfq_group *cfqg)
413{
414 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
415 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
416}
417
411static void cfq_dispatch_insert(struct request_queue *, struct request *); 418static void cfq_dispatch_insert(struct request_queue *, struct request *);
412static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, 419static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
413 struct io_context *, gfp_t); 420 struct io_context *, gfp_t);
@@ -895,13 +902,19 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
895 struct cfq_queue *cfqq) 902 struct cfq_queue *cfqq)
896{ 903{
897 struct cfq_rb_root *st = &cfqd->grp_service_tree; 904 struct cfq_rb_root *st = &cfqd->grp_service_tree;
898 unsigned int used_sl; 905 unsigned int used_sl, charge_sl;
906 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
907 - cfqg->service_tree_idle.count;
908
909 BUG_ON(nr_sync < 0);
910 used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
899 911
900 used_sl = cfq_cfqq_slice_usage(cfqq); 912 if (!cfq_cfqq_sync(cfqq) && !nr_sync)
913 charge_sl = cfqq->allocated_slice;
901 914
902 /* Can't update vdisktime while group is on service tree */ 915 /* Can't update vdisktime while group is on service tree */
903 cfq_rb_erase(&cfqg->rb_node, st); 916 cfq_rb_erase(&cfqg->rb_node, st);
904 cfqg->vdisktime += cfq_scale_slice(used_sl, cfqg); 917 cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
905 __cfq_group_service_tree_add(st, cfqg); 918 __cfq_group_service_tree_add(st, cfqg);
906 919
907 /* This group is being expired. Save the context */ 920 /* This group is being expired. Save the context */
@@ -2016,11 +2029,24 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2016 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio], 2029 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2017 cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg)); 2030 cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2018 2031
2019 if (cfqd->serving_type == ASYNC_WORKLOAD) 2032 if (cfqd->serving_type == ASYNC_WORKLOAD) {
2033 unsigned int tmp;
2034
2035 /*
2036 * Async queues are currently system wide. Just taking
2037 * proportion of queues with-in same group will lead to higher
2038 * async ratio system wide as generally root group is going
2039 * to have higher weight. A more accurate thing would be to
2040 * calculate system wide asnc/sync ratio.
2041 */
2042 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2043 tmp = tmp/cfqd->busy_queues;
2044 slice = min_t(unsigned, slice, tmp);
2045
2020 /* async workload slice is scaled down according to 2046 /* async workload slice is scaled down according to
2021 * the sync/async slice ratio. */ 2047 * the sync/async slice ratio. */
2022 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; 2048 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2023 else 2049 } else
2024 /* sync workload slice is at least 2 * cfq_slice_idle */ 2050 /* sync workload slice is at least 2 * cfq_slice_idle */
2025 slice = max(slice, 2 * cfqd->cfq_slice_idle); 2051 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2026 2052