diff options
author | Aaron Carroll <aaronc@gelato.unsw.edu.au> | 2007-10-30 05:40:13 -0400 |
---|---|---|
committer | Jens Axboe <axboe@carl.home.kernel.dk> | 2007-11-02 03:47:25 -0400 |
commit | 6f5d8aa6382eef2b26032c88656270bdae7f0c42 (patch) | |
tree | 6ee855ce6e8b0eb8cb04d41579ac11cb25a92759 | |
parent | dfb3d72a9aa519672c9ae06f0d2f93eccb35482f (diff) |
Deadline iosched: Fix batching fairness
After switching data directions, deadline always starts the next batch
from the lowest-sector request. This gives excessive deadline expiries
and large latency and throughput disparity between high- and low-sector
requests; an order of magnitude in some tests.
This patch changes the batching behaviour so new batches start from the
request whose expiry is earliest.
Signed-off-by: Aaron Carroll <aaronc@gelato.unsw.edu.au>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/deadline-iosched.c | 21 |
1 files changed, 7 insertions, 14 deletions
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index cb94c838087a..a054eef8dff6 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -306,27 +306,20 @@ dispatch_writes: | |||
306 | dispatch_find_request: | 306 | dispatch_find_request: |
307 | /* | 307 | /* |
308 | * we are not running a batch, find best request for selected data_dir | 308 | * we are not running a batch, find best request for selected data_dir |
309 | * and start a new batch | ||
310 | */ | 309 | */ |
311 | if (deadline_check_fifo(dd, data_dir)) { | 310 | if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) { |
312 | /* An expired request exists - satisfy it */ | 311 | /* |
312 | * A deadline has expired, the last request was in the other | ||
313 | * direction, or we have run out of higher-sectored requests. | ||
314 | * Start again from the request with the earliest expiry time. | ||
315 | */ | ||
313 | rq = rq_entry_fifo(dd->fifo_list[data_dir].next); | 316 | rq = rq_entry_fifo(dd->fifo_list[data_dir].next); |
314 | } else if (dd->next_rq[data_dir]) { | 317 | } else { |
315 | /* | 318 | /* |
316 | * The last req was the same dir and we have a next request in | 319 | * The last req was the same dir and we have a next request in |
317 | * sort order. No expired requests so continue on from here. | 320 | * sort order. No expired requests so continue on from here. |
318 | */ | 321 | */ |
319 | rq = dd->next_rq[data_dir]; | 322 | rq = dd->next_rq[data_dir]; |
320 | } else { | ||
321 | struct rb_node *node; | ||
322 | /* | ||
323 | * The last req was the other direction or we have run out of | ||
324 | * higher-sectored requests. Go back to the lowest sectored | ||
325 | * request (1 way elevator) and start a new batch. | ||
326 | */ | ||
327 | node = rb_first(&dd->sort_list[data_dir]); | ||
328 | if (node) | ||
329 | rq = rb_entry_rq(node); | ||
330 | } | 323 | } |
331 | 324 | ||
332 | dd->batching = 0; | 325 | dd->batching = 0; |