aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2010-03-01 03:20:54 -0500
committerJens Axboe <jens.axboe@oracle.com>2010-03-01 03:20:54 -0500
commitabc3c744d0d7f4ad710a948ae73852ffea5fbc3b (patch)
treef3aebe4f1ee8138db560b049f84d30a4b7348e8a /block/cfq-iosched.c
parent9a8c28c8311e30ba97499447d5a11662f5aea094 (diff)
cfq-iosched: quantum check tweak
Currently a queue can only dispatch up to 4 requests if there are other queues. This isn't optimal, device can handle more requests, for example, AHCI can handle 31 requests. I can understand the limit is for fairness, but we could do a tweak: if the queue still has a lot of slice left, sounds we could ignore the limit. Test shows this boost my workload (two thread randread of a SSD) from 78m/s to 100m/s. Thanks for suggestions from Corrado and Vivek for the patch. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c30
1 files changed, 26 insertions, 4 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index f27e535ce262..0db07d7771b5 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -19,7 +19,7 @@
19 * tunables 19 * tunables
20 */ 20 */
21/* max queue in one round of service */ 21/* max queue in one round of service */
22static const int cfq_quantum = 4; 22static const int cfq_quantum = 8;
23static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 23static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
24/* maximum backwards seek, in KiB */ 24/* maximum backwards seek, in KiB */
25static const int cfq_back_max = 16 * 1024; 25static const int cfq_back_max = 16 * 1024;
@@ -2197,6 +2197,19 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
2197 return dispatched; 2197 return dispatched;
2198} 2198}
2199 2199
2200static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2201 struct cfq_queue *cfqq)
2202{
2203 /* the queue hasn't finished any request, can't estimate */
2204 if (cfq_cfqq_slice_new(cfqq))
2205 return 1;
2206 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2207 cfqq->slice_end))
2208 return 1;
2209
2210 return 0;
2211}
2212
2200static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2213static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2201{ 2214{
2202 unsigned int max_dispatch; 2215 unsigned int max_dispatch;
@@ -2213,7 +2226,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2213 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) 2226 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2214 return false; 2227 return false;
2215 2228
2216 max_dispatch = cfqd->cfq_quantum; 2229 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2217 if (cfq_class_idle(cfqq)) 2230 if (cfq_class_idle(cfqq))
2218 max_dispatch = 1; 2231 max_dispatch = 1;
2219 2232
@@ -2230,13 +2243,22 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2230 /* 2243 /*
2231 * We have other queues, don't allow more IO from this one 2244 * We have other queues, don't allow more IO from this one
2232 */ 2245 */
2233 if (cfqd->busy_queues > 1) 2246 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
2234 return false; 2247 return false;
2235 2248
2236 /* 2249 /*
2237 * Sole queue user, no limit 2250 * Sole queue user, no limit
2238 */ 2251 */
2239 max_dispatch = -1; 2252 if (cfqd->busy_queues == 1)
2253 max_dispatch = -1;
2254 else
2255 /*
2256 * Normally we start throttling cfqq when cfq_quantum/2
2257 * requests have been dispatched. But we can drive
2258 * deeper queue depths at the beginning of slice
2259 * subjected to upper limit of cfq_quantum.
2260 * */
2261 max_dispatch = cfqd->cfq_quantum;
2240 } 2262 }
2241 2263
2242 /* 2264 /*