aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-04-20 02:55:52 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-21 01:56:29 -0400
commita993800655ee516b6f6a6fc4c2ee13fedfb0590b (patch)
tree023b362b4b30bddf2232ade83d162a84ac88f74b /block/cfq-iosched.c
parent46fcc86dd71d70211e965102fb69414c90381880 (diff)
cfq-iosched: fix sequential write regression
We have a 10-15% performance regression for sequential writes on TCQ/NCQ enabled drives in 2.6.21-rcX after the CFQ update went in. It has been reported by Valerie Clement <valerie.clement@bull.net> and the Intel testing folks. The regression is because of CFQ's now more aggressive queue control, limiting the depth available to the device. This patches fixes that regression by allowing a greater depth when only one queue is busy. It has been tested to not impact sync-vs-async workloads too much - we still do a lot better than 2.6.20. Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index b6491c020f26..9e3797167c81 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -986,9 +986,9 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
986 * expire an async queue immediately if it has used up its slice. idle 986 * expire an async queue immediately if it has used up its slice. idle
987 * queue always expire after 1 dispatch round. 987 * queue always expire after 1 dispatch round.
988 */ 988 */
989 if ((!cfq_cfqq_sync(cfqq) && 989 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
990 cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || 990 cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
991 cfq_class_idle(cfqq)) { 991 cfq_class_idle(cfqq))) {
992 cfqq->slice_end = jiffies + 1; 992 cfqq->slice_end = jiffies + 1;
993 cfq_slice_expired(cfqd, 0, 0); 993 cfq_slice_expired(cfqd, 0, 0);
994 } 994 }
@@ -1051,19 +1051,21 @@ cfq_dispatch_requests(request_queue_t *q, int force)
1051 while ((cfqq = cfq_select_queue(cfqd)) != NULL) { 1051 while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1052 int max_dispatch; 1052 int max_dispatch;
1053 1053
1054 /* 1054 if (cfqd->busy_queues > 1) {
1055 * Don't repeat dispatch from the previous queue. 1055 /*
1056 */ 1056 * Don't repeat dispatch from the previous queue.
1057 if (prev_cfqq == cfqq) 1057 */
1058 break; 1058 if (prev_cfqq == cfqq)
1059 break;
1059 1060
1060 /* 1061 /*
1061 * So we have dispatched before in this round, if the 1062 * So we have dispatched before in this round, if the
1062 * next queue has idling enabled (must be sync), don't 1063 * next queue has idling enabled (must be sync), don't
1063 * allow it service until the previous have continued. 1064 * allow it service until the previous have continued.
1064 */ 1065 */
1065 if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq)) 1066 if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
1066 break; 1067 break;
1068 }
1067 1069
1068 cfq_clear_cfqq_must_dispatch(cfqq); 1070 cfq_clear_cfqq_must_dispatch(cfqq);
1069 cfq_clear_cfqq_wait_request(cfqq); 1071 cfq_clear_cfqq_wait_request(cfqq);
@@ -1370,7 +1372,9 @@ retry:
1370 atomic_set(&cfqq->ref, 0); 1372 atomic_set(&cfqq->ref, 0);
1371 cfqq->cfqd = cfqd; 1373 cfqq->cfqd = cfqd;
1372 1374
1373 cfq_mark_cfqq_idle_window(cfqq); 1375 if (key != CFQ_KEY_ASYNC)
1376 cfq_mark_cfqq_idle_window(cfqq);
1377
1374 cfq_mark_cfqq_prio_changed(cfqq); 1378 cfq_mark_cfqq_prio_changed(cfqq);
1375 cfq_mark_cfqq_queue_new(cfqq); 1379 cfq_mark_cfqq_queue_new(cfqq);
1376 cfq_init_prio_data(cfqq); 1380 cfq_init_prio_data(cfqq);