aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCorrado Zoccolo <czoccolo@gmail.com>2009-10-26 17:44:04 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-10-28 04:23:26 -0400
commit5db5d64277bf390056b1a87d0bb288c8b8553f96 (patch)
treeeced8cdeae49e6920bbb9ae18b33ed7001332233
parent1a1238a7dd48e48b3bba8f426a1d61c22c80d6d1 (diff)
cfq-iosched: adapt slice to number of processes doing I/O
When the number of processes performing I/O concurrently increases, a fixed time slice per process will cause large latencies. This patch, if low_latency mode is enabled, will scale the time slice assigned to each process according to a 300ms target latency. In order to keep fairness among processes: * The number of active processes is computed using a special form of running average, that quickly follows sudden increases (to keep latency low), and decrease slowly (to have fairness in spite of rapid decreases of this value). To safeguard sequential bandwidth, we impose a minimum time slice (computed using 2*cfq_slice_idle as base, adjusted according to priority and async-ness). Signed-off-by: Corrado Zoccolo <czoccolo@gmail.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/cfq-iosched.c53
1 files changed, 51 insertions, 2 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 418da9a49bb0..97d946585bc3 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -27,6 +27,8 @@ static const int cfq_slice_sync = HZ / 10;
27static int cfq_slice_async = HZ / 25; 27static int cfq_slice_async = HZ / 25;
28static const int cfq_slice_async_rq = 2; 28static const int cfq_slice_async_rq = 2;
29static int cfq_slice_idle = HZ / 125; 29static int cfq_slice_idle = HZ / 125;
30static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
31static const int cfq_hist_divisor = 4;
30 32
31/* 33/*
32 * offset from end of service tree 34 * offset from end of service tree
@@ -148,6 +150,8 @@ struct cfq_data {
148 struct rb_root prio_trees[CFQ_PRIO_LISTS]; 150 struct rb_root prio_trees[CFQ_PRIO_LISTS];
149 151
150 unsigned int busy_queues; 152 unsigned int busy_queues;
153 unsigned int busy_rt_queues;
154 unsigned int busy_queues_avg[2];
151 155
152 int rq_in_driver[2]; 156 int rq_in_driver[2];
153 int sync_flight; 157 int sync_flight;
@@ -315,10 +319,52 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
315 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); 319 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
316} 320}
317 321
322/*
323 * get averaged number of queues of RT/BE priority.
324 * average is updated, with a formula that gives more weight to higher numbers,
325 * to quickly follows sudden increases and decrease slowly
326 */
327
328static inline unsigned
329cfq_get_avg_queues(struct cfq_data *cfqd, bool rt) {
330 unsigned min_q, max_q;
331 unsigned mult = cfq_hist_divisor - 1;
332 unsigned round = cfq_hist_divisor / 2;
333 unsigned busy = cfqd->busy_rt_queues;
334
335 if (!rt)
336 busy = cfqd->busy_queues - cfqd->busy_rt_queues;
337
338 min_q = min(cfqd->busy_queues_avg[rt], busy);
339 max_q = max(cfqd->busy_queues_avg[rt], busy);
340 cfqd->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
341 cfq_hist_divisor;
342 return cfqd->busy_queues_avg[rt];
343}
344
318static inline void 345static inline void
319cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 346cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
320{ 347{
321 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; 348 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
349 if (cfqd->cfq_latency) {
350 /* interested queues (we consider only the ones with the same
351 * priority class) */
352 unsigned iq = cfq_get_avg_queues(cfqd, cfq_class_rt(cfqq));
353 unsigned sync_slice = cfqd->cfq_slice[1];
354 unsigned expect_latency = sync_slice * iq;
355 if (expect_latency > cfq_target_latency) {
356 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
357 /* scale low_slice according to IO priority
358 * and sync vs async */
359 unsigned low_slice =
360 min(slice, base_low_slice * slice / sync_slice);
361 /* the adapted slice value is scaled to fit all iqs
362 * into the target latency */
363 slice = max(slice * cfq_target_latency / expect_latency,
364 low_slice);
365 }
366 }
367 cfqq->slice_end = jiffies + slice;
322 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); 368 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
323} 369}
324 370
@@ -669,7 +715,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
669 BUG_ON(cfq_cfqq_on_rr(cfqq)); 715 BUG_ON(cfq_cfqq_on_rr(cfqq));
670 cfq_mark_cfqq_on_rr(cfqq); 716 cfq_mark_cfqq_on_rr(cfqq);
671 cfqd->busy_queues++; 717 cfqd->busy_queues++;
672 718 if (cfq_class_rt(cfqq))
719 cfqd->busy_rt_queues++;
673 cfq_resort_rr_list(cfqd, cfqq); 720 cfq_resort_rr_list(cfqd, cfqq);
674} 721}
675 722
@@ -692,6 +739,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
692 739
693 BUG_ON(!cfqd->busy_queues); 740 BUG_ON(!cfqd->busy_queues);
694 cfqd->busy_queues--; 741 cfqd->busy_queues--;
742 if (cfq_class_rt(cfqq))
743 cfqd->busy_rt_queues--;
695} 744}
696 745
697/* 746/*