diff options
author | Corrado Zoccolo <czoccolo@gmail.com> | 2009-10-26 17:45:11 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-10-28 04:23:26 -0400 |
commit | a6d44e982d3734583b3b4e1d36921af8cfd61fc0 (patch) | |
tree | 1d9b409057361524af2e097fe5de1c505533d603 /block | |
parent | c0324a020e5b351f100569b128715985f1023af8 (diff) |
cfq-iosched: enable idling for last queue on priority class
cfq can disable idling for queues in various circumstances.
When workloads of different priorities are competing, if the higher
priority queue has idling disabled, lower priority queues may steal
its disk share. For example, in a scenario with an RT process
performing seeky reads vs a BE process performing sequential reads,
on an NCQ enabled hardware, with low_latency unset,
the RT process will dispatch only the few pending requests every full
slice of service for the BE process.
The patch solves this issue by always performing idle on the last
queue at a given priority class > idle. If the same process, or one
that can pre-empt it (so at the same priority or higher), submits a
new request within the idle window, the lower priority queue won't
dispatch, saving the disk bandwidth for higher priority ones.
Note: this doesn't touch the non_rotational + NCQ case (no hardware
to test if this is a benefit in that case).
Signed-off-by: Corrado Zoccolo <czoccolo@gmail.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 34 |
1 files changed, 31 insertions, 3 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 6e5c3d715ebe..76afa3696894 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1160,6 +1160,34 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, | |||
1160 | return cfqq; | 1160 | return cfqq; |
1161 | } | 1161 | } |
1162 | 1162 | ||
1163 | /* | ||
1164 | * Determine whether we should enforce idle window for this queue. | ||
1165 | */ | ||
1166 | |||
1167 | static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
1168 | { | ||
1169 | enum wl_prio_t prio = cfqq_prio(cfqq); | ||
1170 | struct cfq_rb_root *service_tree; | ||
1171 | |||
1172 | /* We never do for idle class queues. */ | ||
1173 | if (prio == IDLE_WORKLOAD) | ||
1174 | return false; | ||
1175 | |||
1176 | /* We do for queues that were marked with idle window flag. */ | ||
1177 | if (cfq_cfqq_idle_window(cfqq)) | ||
1178 | return true; | ||
1179 | |||
1180 | /* | ||
1181 | * Otherwise, we do only if they are the last ones | ||
1182 | * in their service tree. | ||
1183 | */ | ||
1184 | service_tree = service_tree_for(prio, cfqd); | ||
1185 | if (service_tree->count == 0) | ||
1186 | return true; | ||
1187 | |||
1188 | return (service_tree->count == 1 && cfq_rb_first(service_tree) == cfqq); | ||
1189 | } | ||
1190 | |||
1163 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) | 1191 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) |
1164 | { | 1192 | { |
1165 | struct cfq_queue *cfqq = cfqd->active_queue; | 1193 | struct cfq_queue *cfqq = cfqd->active_queue; |
@@ -1180,7 +1208,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1180 | /* | 1208 | /* |
1181 | * idle is disabled, either manually or by past process history | 1209 | * idle is disabled, either manually or by past process history |
1182 | */ | 1210 | */ |
1183 | if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) | 1211 | if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) |
1184 | return; | 1212 | return; |
1185 | 1213 | ||
1186 | /* | 1214 | /* |
@@ -1362,7 +1390,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
1362 | * conditions to happen (or time out) before selecting a new queue. | 1390 | * conditions to happen (or time out) before selecting a new queue. |
1363 | */ | 1391 | */ |
1364 | if (timer_pending(&cfqd->idle_slice_timer) || | 1392 | if (timer_pending(&cfqd->idle_slice_timer) || |
1365 | (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) { | 1393 | (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { |
1366 | cfqq = NULL; | 1394 | cfqq = NULL; |
1367 | goto keep_queue; | 1395 | goto keep_queue; |
1368 | } | 1396 | } |
@@ -1427,7 +1455,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1427 | /* | 1455 | /* |
1428 | * Drain async requests before we start sync IO | 1456 | * Drain async requests before we start sync IO |
1429 | */ | 1457 | */ |
1430 | if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) | 1458 | if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) |
1431 | return false; | 1459 | return false; |
1432 | 1460 | ||
1433 | /* | 1461 | /* |