diff options
author | Corrado Zoccolo <czoccolo@gmail.com> | 2009-12-06 05:48:52 -0500 |
---|---|---|
committer | Jens Axboe <axboe@carl.(none)> | 2009-12-09 06:32:55 -0500 |
commit | 573412b29586e58477adb70e022193a337763319 (patch) | |
tree | 757ebcc4da3ba7b8d8beb8e8e0ff6a4fe4428f52 /block/cfq-iosched.c | |
parent | 2b876f95d03e226394b5d360c86127cbefaf614b (diff) |
cfq-iosched: reduce write depth only if sync was delayed
The introduction of ramp-up formula for async queue depths has
slowed down dirty page reclaim, by reducing async write performance.
This patch makes sure the formula kicks in only when sync request
was recently delayed.
Signed-off-by: Corrado Zoccolo <czoccolo@gmail.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index cfb0b2f5f63d..5009af490a0c 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -283,7 +283,7 @@ struct cfq_data { | |||
283 | */ | 283 | */ |
284 | struct cfq_queue oom_cfqq; | 284 | struct cfq_queue oom_cfqq; |
285 | 285 | ||
286 | unsigned long last_end_sync_rq; | 286 | unsigned long last_delayed_sync; |
287 | 287 | ||
288 | /* List of cfq groups being managed on this device*/ | 288 | /* List of cfq groups being managed on this device*/ |
289 | struct hlist_head cfqg_list; | 289 | struct hlist_head cfqg_list; |
@@ -2264,7 +2264,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2264 | * based on the last sync IO we serviced | 2264 | * based on the last sync IO we serviced |
2265 | */ | 2265 | */ |
2266 | if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { | 2266 | if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { |
2267 | unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; | 2267 | unsigned long last_sync = jiffies - cfqd->last_delayed_sync; |
2268 | unsigned int depth; | 2268 | unsigned int depth; |
2269 | 2269 | ||
2270 | depth = last_sync / cfqd->cfq_slice[1]; | 2270 | depth = last_sync / cfqd->cfq_slice[1]; |
@@ -3273,7 +3273,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3273 | 3273 | ||
3274 | if (sync) { | 3274 | if (sync) { |
3275 | RQ_CIC(rq)->last_end_request = now; | 3275 | RQ_CIC(rq)->last_end_request = now; |
3276 | cfqd->last_end_sync_rq = now; | 3276 | if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) |
3277 | cfqd->last_delayed_sync = now; | ||
3277 | } | 3278 | } |
3278 | 3279 | ||
3279 | /* | 3280 | /* |
@@ -3711,7 +3712,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3711 | cfqd->cfq_latency = 1; | 3712 | cfqd->cfq_latency = 1; |
3712 | cfqd->cfq_group_isolation = 0; | 3713 | cfqd->cfq_group_isolation = 0; |
3713 | cfqd->hw_tag = -1; | 3714 | cfqd->hw_tag = -1; |
3714 | cfqd->last_end_sync_rq = jiffies; | 3715 | cfqd->last_delayed_sync = jiffies - HZ; |
3715 | INIT_RCU_HEAD(&cfqd->rcu); | 3716 | INIT_RCU_HEAD(&cfqd->rcu); |
3716 | return cfqd; | 3717 | return cfqd; |
3717 | } | 3718 | } |