aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-04-14 08:18:16 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-04-15 02:28:12 -0400
commitd6ceb25e8d8bccf826848c2621a50d02c0a7f4ae (patch)
tree31dec01cb624b27a1c29a5886dd801a67bba525e /block
parent053c525fcf976810f023d96472f414c0d5e6339b (diff)
cfq-iosched: don't delay queue kick for a merged request
"Zhang, Yanmin" <yanmin_zhang@linux.intel.com> reports that commit b029195dda0129b427c6e579a3bb3ae752da3a93 introduced a regression of about 50% with sequential threaded read workloads. The test case is: tiotest -k0 -k1 -k3 -f 80 -t 32 which starts 32 threads each reading a 80MB file. Twiddle the kick queue logic so that we do start IO immediately, if it appears to be a fully merged request. We can't really detect that, so just check if the request is bigger than a page or not. The assumption is that since single bio issues will first queue a single request with just one page attached and then later do merges on that, if we already have more than a page worth of data in the request, then the request is most likely good to go. Verified that this doesn't cause a regression with the test case that commit b029195dda0129b427c6e579a3bb3ae752da3a93 was fixing. It does not, we still see maximum sized requests for the queue-then-merge cases. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index cba8a5d91bfa..56e9d8503cf1 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1903,10 +1903,17 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1903 * Remember that we saw a request from this process, but 1903 * Remember that we saw a request from this process, but
1904 * don't start queuing just yet. Otherwise we risk seeing lots 1904 * don't start queuing just yet. Otherwise we risk seeing lots
1905 * of tiny requests, because we disrupt the normal plugging 1905 * of tiny requests, because we disrupt the normal plugging
1906 * and merging. 1906 * and merging. If the request is already larger than a single
1907 * page, let it rip immediately. For that case we assume that
1908 * merging is already done.
1907 */ 1909 */
1908 if (cfq_cfqq_wait_request(cfqq)) 1910 if (cfq_cfqq_wait_request(cfqq)) {
1911 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE) {
1912 del_timer(&cfqd->idle_slice_timer);
1913 blk_start_queueing(cfqd->queue);
1914 }
1909 cfq_mark_cfqq_must_dispatch(cfqq); 1915 cfq_mark_cfqq_must_dispatch(cfqq);
1916 }
1910 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 1917 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1911 /* 1918 /*
1912 * not the active queue - expire current slice if it is 1919 * not the active queue - expire current slice if it is