diff options
author | Vivek Goyal <vgoyal@redhat.com> | 2009-12-08 17:52:58 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-12-09 09:11:04 -0500 |
commit | 7667aa0630407bc07dc38dcc79d29cc0a65553c1 (patch) | |
tree | cd9007b2dfebec215d955952ea0f9914490bddaa /block/cfq-iosched.c | |
parent | c244bb50a9baa2ec47a458bbafb36b5e559ed5fa (diff) |
cfq-iosched: Take care of corner cases of group losing share due to deletion
If there is a sequential reader running in a group, we wait for next request
to come in that group after slice expiry and once new request is in, we expire
the queue. Otherwise we delete the group from service tree and group looses
its fair share.
So far I was marking a queue as wait_busy if it had consumed its slice and
it was last queue in the group. But this condition did not cover following
two cases.
1.If a request completed and slice has not expired yet. Next request comes
in and is dispatched to disk. Now select_queue() hits and slice has expired.
This group will be deleted. Because request is still in the disk, this queue
will never get a chance to wait_busy.
2.If request completed and slice has not expired yet. Before next request
comes in (delay due to think time), select_queue() hits and expires the
queue hence group. This queue never got a chance to wait busy.
Gui was hitting the boundary condition 1 and not getting fairness numbers
proportional to weight.
This patch puts the checks for above two conditions and improves the fairness
numbers for sequential workload on rotational media. Check in select_queue()
takes care of case 1 and additional check in should_wait_busy() takes care
of case 2.
Reported-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com>
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 54 |
1 files changed, 48 insertions, 6 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f41fdb5f3e0c..98b15b98b85d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -2141,8 +2141,22 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
2141 | /* | 2141 | /* |
2142 | * The active queue has run out of time, expire it and select new. | 2142 | * The active queue has run out of time, expire it and select new. |
2143 | */ | 2143 | */ |
2144 | if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) | 2144 | if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { |
2145 | goto expire; | 2145 | /* |
2146 | * If slice had not expired at the completion of last request | ||
2147 | * we might not have turned on wait_busy flag. Don't expire | ||
2148 | * the queue yet. Allow the group to get backlogged. | ||
2149 | * | ||
2150 | * The very fact that we have used the slice, that means we | ||
2151 | * have been idling all along on this queue and it should be | ||
2152 | * ok to wait for this request to complete. | ||
2153 | */ | ||
2154 | if (cfqq->cfqg->nr_cfqq == 1 && cfqq->dispatched | ||
2155 | && cfq_should_idle(cfqd, cfqq)) | ||
2156 | goto keep_queue; | ||
2157 | else | ||
2158 | goto expire; | ||
2159 | } | ||
2146 | 2160 | ||
2147 | /* | 2161 | /* |
2148 | * The active queue has requests and isn't expired, allow it to | 2162 | * The active queue has requests and isn't expired, allow it to |
@@ -3256,6 +3270,35 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) | |||
3256 | cfqd->hw_tag = 0; | 3270 | cfqd->hw_tag = 0; |
3257 | } | 3271 | } |
3258 | 3272 | ||
3273 | static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
3274 | { | ||
3275 | struct cfq_io_context *cic = cfqd->active_cic; | ||
3276 | |||
3277 | /* If there are other queues in the group, don't wait */ | ||
3278 | if (cfqq->cfqg->nr_cfqq > 1) | ||
3279 | return false; | ||
3280 | |||
3281 | if (cfq_slice_used(cfqq)) | ||
3282 | return true; | ||
3283 | |||
3284 | /* if slice left is less than think time, wait busy */ | ||
3285 | if (cic && sample_valid(cic->ttime_samples) | ||
3286 | && (cfqq->slice_end - jiffies < cic->ttime_mean)) | ||
3287 | return true; | ||
3288 | |||
3289 | /* | ||
3290 | * If think times is less than a jiffy than ttime_mean=0 and above | ||
3291 | * will not be true. It might happen that slice has not expired yet | ||
3292 | * but will expire soon (4-5 ns) during select_queue(). To cover the | ||
3293 | * case where think time is less than a jiffy, mark the queue wait | ||
3294 | * busy if only 1 jiffy is left in the slice. | ||
3295 | */ | ||
3296 | if (cfqq->slice_end - jiffies == 1) | ||
3297 | return true; | ||
3298 | |||
3299 | return false; | ||
3300 | } | ||
3301 | |||
3259 | static void cfq_completed_request(struct request_queue *q, struct request *rq) | 3302 | static void cfq_completed_request(struct request_queue *q, struct request *rq) |
3260 | { | 3303 | { |
3261 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 3304 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
@@ -3295,11 +3338,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3295 | } | 3338 | } |
3296 | 3339 | ||
3297 | /* | 3340 | /* |
3298 | * If this queue consumed its slice and this is last queue | 3341 | * Should we wait for next request to come in before we expire |
3299 | * in the group, wait for next request before we expire | 3342 | * the queue. |
3300 | * the queue | ||
3301 | */ | 3343 | */ |
3302 | if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) { | 3344 | if (cfq_should_wait_busy(cfqd, cfqq)) { |
3303 | cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; | 3345 | cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; |
3304 | cfq_mark_cfqq_wait_busy(cfqq); | 3346 | cfq_mark_cfqq_wait_busy(cfqq); |
3305 | } | 3347 | } |