aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorCorrado Zoccolo <czoccolo@gmail.com>2009-11-26 04:02:58 -0500
committerJens Axboe <jens.axboe@oracle.com>2009-11-26 04:39:31 -0500
commit76280aff1c7e9ae761cac4b48591c43cd7d69159 (patch)
treef8b354746a96cf45a4d0fc980df4e37e3c37b173 /block/cfq-iosched.c
parente4a229196a7c676514c78f6783f8994f64bf681c (diff)
cfq-iosched: idling on deep seeky sync queues
Seeky sync queues with large depth can gain unfairly big share of disk time, at the expense of other seeky queues. This patch ensures that idling will be enabled for queues with I/O depth at least 4, and small think time. The decision to enable idling is sticky, until an idle window times out without seeing a new request. The reasoning behind the decision is that, if an application is using large I/O depth, it is already optimized to make full utilization of the hardware, and therefore we reserve a slice of exclusive use for it. Reported-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Corrado Zoccolo <czoccolo@gmail.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 2c1086acddfa..15f7238f527f 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -260,6 +260,7 @@ enum cfqq_state_flags {
260 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 260 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
261 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 261 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
262 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ 262 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
263 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
263}; 264};
264 265
265#define CFQ_CFQQ_FNS(name) \ 266#define CFQ_CFQQ_FNS(name) \
@@ -286,6 +287,7 @@ CFQ_CFQQ_FNS(prio_changed);
286CFQ_CFQQ_FNS(slice_new); 287CFQ_CFQQ_FNS(slice_new);
287CFQ_CFQQ_FNS(sync); 288CFQ_CFQQ_FNS(sync);
288CFQ_CFQQ_FNS(coop); 289CFQ_CFQQ_FNS(coop);
290CFQ_CFQQ_FNS(deep);
289#undef CFQ_CFQQ_FNS 291#undef CFQ_CFQQ_FNS
290 292
291#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 293#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -2350,8 +2352,12 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2350 2352
2351 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 2353 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
2352 2354
2355 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
2356 cfq_mark_cfqq_deep(cfqq);
2357
2353 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 2358 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
2354 (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq))) 2359 (!cfq_cfqq_deep(cfqq) && sample_valid(cfqq->seek_samples)
2360 && CFQQ_SEEKY(cfqq)))
2355 enable_idle = 0; 2361 enable_idle = 0;
2356 else if (sample_valid(cic->ttime_samples)) { 2362 else if (sample_valid(cic->ttime_samples)) {
2357 if (cic->ttime_mean > cfqd->cfq_slice_idle) 2363 if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -2849,6 +2855,11 @@ static void cfq_idle_slice_timer(unsigned long data)
2849 */ 2855 */
2850 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 2856 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2851 goto out_kick; 2857 goto out_kick;
2858
2859 /*
2860 * Queue depth flag is reset only when the idle didn't succeed
2861 */
2862 cfq_clear_cfqq_deep(cfqq);
2852 } 2863 }
2853expire: 2864expire:
2854 cfq_slice_expired(cfqd, timed_out); 2865 cfq_slice_expired(cfqd, timed_out);