aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2009-12-16 17:52:59 -0500
committerJens Axboe <jens.axboe@oracle.com>2009-12-18 06:40:21 -0500
commit65b32a573eefa1cdd3cbe5ea59326308e6c3b9ad (patch)
treebdcca029d184056ffc9eab0a9c0b95cdf123c9fb /block/cfq-iosched.c
parentfb104db41e6e006c85ce1097f372cd1e10c1755c (diff)
cfq-iosched: Remove prio_change logic for workload selection
o CFQ now internally divides cfq queues in therr workload categories. sync-idle, sync-noidle and async. Which workload to run depends primarily on rb_key offset across three service trees. Which is a combination of mulitiple things including what time queue got queued on the service tree. There is one exception though. That is if we switched the prio class, say we served some RT tasks and again started serving BE class, then with-in BE class we always started with sync-noidle workload irrespective of rb_key offset in service trees. This can provide better latencies for sync-noidle workload in the presence of RT tasks. o This patch gets rid of that exception and which workload to run with-in class always depends on lowest rb_key across service trees. The reason being that now we have multiple BE class groups and if we always switch to sync-noidle workload with-in group, we can potentially starve a sync-idle workload with-in group. Same is true for async workload which will be in root group. Also the workload-switching with-in group will become very unpredictable as it now depends whether some RT workload was running in the system or not. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Reviewed-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com> Acked-by: Corrado Zoccolo <czoccolo@gmail.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c48
1 files changed, 12 insertions, 36 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d9bfa09e68c1..8df4fe58f4e7 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -292,8 +292,7 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
292 292
293static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, 293static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
294 enum wl_prio_t prio, 294 enum wl_prio_t prio,
295 enum wl_type_t type, 295 enum wl_type_t type)
296 struct cfq_data *cfqd)
297{ 296{
298 if (!cfqg) 297 if (!cfqg)
299 return NULL; 298 return NULL;
@@ -1146,7 +1145,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1146#endif 1145#endif
1147 1146
1148 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), 1147 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1149 cfqq_type(cfqq), cfqd); 1148 cfqq_type(cfqq));
1150 if (cfq_class_idle(cfqq)) { 1149 if (cfq_class_idle(cfqq)) {
1151 rb_key = CFQ_IDLE_DELAY; 1150 rb_key = CFQ_IDLE_DELAY;
1152 parent = rb_last(&service_tree->rb); 1151 parent = rb_last(&service_tree->rb);
@@ -1609,7 +1608,7 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1609{ 1608{
1610 struct cfq_rb_root *service_tree = 1609 struct cfq_rb_root *service_tree =
1611 service_tree_for(cfqd->serving_group, cfqd->serving_prio, 1610 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1612 cfqd->serving_type, cfqd); 1611 cfqd->serving_type);
1613 1612
1614 if (!cfqd->rq_queued) 1613 if (!cfqd->rq_queued)
1615 return NULL; 1614 return NULL;
@@ -1956,8 +1955,7 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1956} 1955}
1957 1956
1958static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, 1957static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
1959 struct cfq_group *cfqg, enum wl_prio_t prio, 1958 struct cfq_group *cfqg, enum wl_prio_t prio)
1960 bool prio_changed)
1961{ 1959{
1962 struct cfq_queue *queue; 1960 struct cfq_queue *queue;
1963 int i; 1961 int i;
@@ -1965,24 +1963,9 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
1965 unsigned long lowest_key = 0; 1963 unsigned long lowest_key = 0;
1966 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; 1964 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
1967 1965
1968 if (prio_changed) { 1966 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
1969 /* 1967 /* select the one with lowest rb_key */
1970 * When priorities switched, we prefer starting 1968 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
1971 * from SYNC_NOIDLE (first choice), or just SYNC
1972 * over ASYNC
1973 */
1974 if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
1975 return cur_best;
1976 cur_best = SYNC_WORKLOAD;
1977 if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
1978 return cur_best;
1979
1980 return ASYNC_WORKLOAD;
1981 }
1982
1983 for (i = 0; i < 3; ++i) {
1984 /* otherwise, select the one with lowest rb_key */
1985 queue = cfq_rb_first(service_tree_for(cfqg, prio, i, cfqd));
1986 if (queue && 1969 if (queue &&
1987 (!key_valid || time_before(queue->rb_key, lowest_key))) { 1970 (!key_valid || time_before(queue->rb_key, lowest_key))) {
1988 lowest_key = queue->rb_key; 1971 lowest_key = queue->rb_key;
@@ -1996,8 +1979,6 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
1996 1979
1997static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) 1980static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
1998{ 1981{
1999 enum wl_prio_t previous_prio = cfqd->serving_prio;
2000 bool prio_changed;
2001 unsigned slice; 1982 unsigned slice;
2002 unsigned count; 1983 unsigned count;
2003 struct cfq_rb_root *st; 1984 struct cfq_rb_root *st;
@@ -2025,24 +2006,19 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2025 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload 2006 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2026 * expiration time 2007 * expiration time
2027 */ 2008 */
2028 prio_changed = (cfqd->serving_prio != previous_prio); 2009 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2029 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type,
2030 cfqd);
2031 count = st->count; 2010 count = st->count;
2032 2011
2033 /* 2012 /*
2034 * If priority didn't change, check workload expiration, 2013 * check workload expiration, and that we still have other queues ready
2035 * and that we still have other queues ready
2036 */ 2014 */
2037 if (!prio_changed && count && 2015 if (count && !time_after(jiffies, cfqd->workload_expires))
2038 !time_after(jiffies, cfqd->workload_expires))
2039 return; 2016 return;
2040 2017
2041 /* otherwise select new workload type */ 2018 /* otherwise select new workload type */
2042 cfqd->serving_type = 2019 cfqd->serving_type =
2043 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio, prio_changed); 2020 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2044 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type, 2021 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2045 cfqd);
2046 count = st->count; 2022 count = st->count;
2047 2023
2048 /* 2024 /*