aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2009-11-03 14:25:02 -0500
committerJens Axboe <jens.axboe@oracle.com>2009-11-03 14:25:02 -0500
commit4b27e1bb442e964903f8a3fa6bdf33a602dc0941 (patch)
treed7eecb66f6a75dbff292fbd03b643b04ed075289
parente6ec4fe24572ee265723d895ec4159e5559c8266 (diff)
cfq-iosched: limit coop preemption
CFQ has an optimization for cooperated applications. if several io-context have close requests, they will get boost. But the optimization get abused. Considering thread a, b, which work on one file. a reads sectors s, s+2, s+4, ...; b reads sectors s+1, s+3, s +5, ... Both a and b are sequential read, so they can open idle window. a reads a sector s and goes to idle window and wakeup b. b reads sector s+1, since in current implementation, cfq_should_preempt() thinks a and b are cooperators, b will preempt a. b then reads sector s+1 and goes to idle window and wakeup a. for the same reason, a will preempt b and reads s+2. a and b will continue the circle. The circle will be very long, and a and b will occupy whole disk queue. Other applications will nearly have no chance to run. Fix this limiting coop preempt until a queue is scheduled normally again. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Acked-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/cfq-iosched.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5802e322b7ad..aa1e9535e358 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -196,6 +196,7 @@ enum cfqq_state_flags {
196 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 196 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
197 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 197 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
198 CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */ 198 CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */
199 CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */
199}; 200};
200 201
201#define CFQ_CFQQ_FNS(name) \ 202#define CFQ_CFQQ_FNS(name) \
@@ -222,6 +223,7 @@ CFQ_CFQQ_FNS(prio_changed);
222CFQ_CFQQ_FNS(slice_new); 223CFQ_CFQQ_FNS(slice_new);
223CFQ_CFQQ_FNS(sync); 224CFQ_CFQQ_FNS(sync);
224CFQ_CFQQ_FNS(coop); 225CFQ_CFQQ_FNS(coop);
226CFQ_CFQQ_FNS(coop_preempt);
225#undef CFQ_CFQQ_FNS 227#undef CFQ_CFQQ_FNS
226 228
227#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 229#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -945,10 +947,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
945{ 947{
946 if (!cfqq) { 948 if (!cfqq) {
947 cfqq = cfq_get_next_queue(cfqd); 949 cfqq = cfq_get_next_queue(cfqd);
948 if (cfqq) 950 if (cfqq && !cfq_cfqq_coop_preempt(cfqq))
949 cfq_clear_cfqq_coop(cfqq); 951 cfq_clear_cfqq_coop(cfqq);
950 } 952 }
951 953
954 if (cfqq)
955 cfq_clear_cfqq_coop_preempt(cfqq);
956
952 __cfq_set_active_queue(cfqd, cfqq); 957 __cfq_set_active_queue(cfqd, cfqq);
953 return cfqq; 958 return cfqq;
954} 959}
@@ -2066,8 +2071,16 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2066 * if this request is as-good as one we would expect from the 2071 * if this request is as-good as one we would expect from the
2067 * current cfqq, let it preempt 2072 * current cfqq, let it preempt
2068 */ 2073 */
2069 if (cfq_rq_close(cfqd, rq)) 2074 if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) ||
2075 cfqd->busy_queues == 1)) {
2076 /*
2077 * Mark new queue coop_preempt, so its coop flag will not be
2078 * cleared when new queue gets scheduled at the very first time
2079 */
2080 cfq_mark_cfqq_coop_preempt(new_cfqq);
2081 cfq_mark_cfqq_coop(new_cfqq);
2070 return true; 2082 return true;
2083 }
2071 2084
2072 return false; 2085 return false;
2073} 2086}