aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-04-07 05:38:31 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-04-07 05:38:31 -0400
commitb029195dda0129b427c6e579a3bb3ae752da3a93 (patch)
tree7f8752d2fefac40b0b25720cc1b0fbbd8aed173a /block
parent2385327725419a76cfbca7258abd95908b8ba9eb (diff)
cfq-iosched: don't let idling interfere with plugging
When CFQ is waiting for a new request from a process, currently it'll immediately restart queuing when it sees such a request. This doesn't work very well with streamed IO, since we then end up splitting IO that would otherwise have been merged nicely. For a simple dd test, this causes 10x as many requests to be issued as we should have. Normally this goes unnoticed due to the low overhead of requests at the device side, but some hardware is very sensitive to request sizes and there it can cause big slow downs. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 11efcf196e74..a4809de6fea6 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -177,6 +177,7 @@ struct cfq_queue {
177enum cfqq_state_flags { 177enum cfqq_state_flags {
178 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 178 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
179 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 179 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
180 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
180 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ 181 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
181 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 182 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
182 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 183 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
@@ -202,6 +203,7 @@ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
202 203
203CFQ_CFQQ_FNS(on_rr); 204CFQ_CFQQ_FNS(on_rr);
204CFQ_CFQQ_FNS(wait_request); 205CFQ_CFQQ_FNS(wait_request);
206CFQ_CFQQ_FNS(must_dispatch);
205CFQ_CFQQ_FNS(must_alloc); 207CFQ_CFQQ_FNS(must_alloc);
206CFQ_CFQQ_FNS(must_alloc_slice); 208CFQ_CFQQ_FNS(must_alloc_slice);
207CFQ_CFQQ_FNS(fifo_expire); 209CFQ_CFQQ_FNS(fifo_expire);
@@ -774,6 +776,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
774 cfqq->slice_dispatch = 0; 776 cfqq->slice_dispatch = 0;
775 777
776 cfq_clear_cfqq_wait_request(cfqq); 778 cfq_clear_cfqq_wait_request(cfqq);
779 cfq_clear_cfqq_must_dispatch(cfqq);
777 cfq_clear_cfqq_must_alloc_slice(cfqq); 780 cfq_clear_cfqq_must_alloc_slice(cfqq);
778 cfq_clear_cfqq_fifo_expire(cfqq); 781 cfq_clear_cfqq_fifo_expire(cfqq);
779 cfq_mark_cfqq_slice_new(cfqq); 782 cfq_mark_cfqq_slice_new(cfqq);
@@ -1009,7 +1012,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1009 /* 1012 /*
1010 * The active queue has run out of time, expire it and select new. 1013 * The active queue has run out of time, expire it and select new.
1011 */ 1014 */
1012 if (cfq_slice_used(cfqq)) 1015 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
1013 goto expire; 1016 goto expire;
1014 1017
1015 /* 1018 /*
@@ -1173,6 +1176,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1173 */ 1176 */
1174 cfq_dispatch_request(cfqd, cfqq); 1177 cfq_dispatch_request(cfqd, cfqq);
1175 cfqq->slice_dispatch++; 1178 cfqq->slice_dispatch++;
1179 cfq_clear_cfqq_must_dispatch(cfqq);
1176 1180
1177 /* 1181 /*
1178 * expire an async queue immediately if it has used up its slice. idle 1182 * expire an async queue immediately if it has used up its slice. idle
@@ -1898,14 +1902,13 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1898 1902
1899 if (cfqq == cfqd->active_queue) { 1903 if (cfqq == cfqd->active_queue) {
1900 /* 1904 /*
1901 * if we are waiting for a request for this queue, let it rip 1905 * Remember that we saw a request from this process, but
1902 * immediately and flag that we must not expire this queue 1906 * don't start queuing just yet. Otherwise we risk seeing lots
1903 * just now 1907 * of tiny requests, because we disrupt the normal plugging
1908 * and merging.
1904 */ 1909 */
1905 if (cfq_cfqq_wait_request(cfqq)) { 1910 if (cfq_cfqq_wait_request(cfqq))
1906 del_timer(&cfqd->idle_slice_timer); 1911 cfq_mark_cfqq_must_dispatch(cfqq);
1907 blk_start_queueing(cfqd->queue);
1908 }
1909 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 1912 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1910 /* 1913 /*
1911 * not the active queue - expire current slice if it is 1914 * not the active queue - expire current slice if it is
@@ -2175,6 +2178,12 @@ static void cfq_idle_slice_timer(unsigned long data)
2175 timed_out = 0; 2178 timed_out = 0;
2176 2179
2177 /* 2180 /*
2181 * We saw a request before the queue expired, let it through
2182 */
2183 if (cfq_cfqq_must_dispatch(cfqq))
2184 goto out_kick;
2185
2186 /*
2178 * expired 2187 * expired
2179 */ 2188 */
2180 if (cfq_slice_used(cfqq)) 2189 if (cfq_slice_used(cfqq))