aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig.iosched26
-rw-r--r--block/Makefile1
-rw-r--r--block/as-iosched.c1520
-rw-r--r--block/cfq-iosched.c626
-rw-r--r--block/elevator.c10
5 files changed, 538 insertions, 1645 deletions
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 7e803fc88770..8bd105115a69 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -12,24 +12,14 @@ config IOSCHED_NOOP
12 that do their own scheduling and require only minimal assistance from 12 that do their own scheduling and require only minimal assistance from
13 the kernel. 13 the kernel.
14 14
15config IOSCHED_AS
16 tristate "Anticipatory I/O scheduler"
17 default y
18 ---help---
19 The anticipatory I/O scheduler is generally a good choice for most
20 environments, but is quite large and complex when compared to the
21 deadline I/O scheduler, it can also be slower in some cases
22 especially some database loads.
23
24config IOSCHED_DEADLINE 15config IOSCHED_DEADLINE
25 tristate "Deadline I/O scheduler" 16 tristate "Deadline I/O scheduler"
26 default y 17 default y
27 ---help--- 18 ---help---
28 The deadline I/O scheduler is simple and compact, and is often as 19 The deadline I/O scheduler is simple and compact. It will provide
29 good as the anticipatory I/O scheduler, and in some database 20 CSCAN service with FIFO expiration of requests, switching to
30 workloads, better. In the case of a single process performing I/O to 21 a new point in the service tree and doing a batch of IO from there
31 a disk at any one time, its behaviour is almost identical to the 22 in case of expiry.
32 anticipatory I/O scheduler and so is a good choice.
33 23
34config IOSCHED_CFQ 24config IOSCHED_CFQ
35 tristate "CFQ I/O scheduler" 25 tristate "CFQ I/O scheduler"
@@ -37,7 +27,9 @@ config IOSCHED_CFQ
37 ---help--- 27 ---help---
38 The CFQ I/O scheduler tries to distribute bandwidth equally 28 The CFQ I/O scheduler tries to distribute bandwidth equally
39 among all processes in the system. It should provide a fair 29 among all processes in the system. It should provide a fair
40 working environment, suitable for desktop systems. 30 and low latency working environment, suitable for both desktop
31 and server systems.
32
41 This is the default I/O scheduler. 33 This is the default I/O scheduler.
42 34
43choice 35choice
@@ -47,9 +39,6 @@ choice
47 Select the I/O scheduler which will be used by default for all 39 Select the I/O scheduler which will be used by default for all
48 block devices. 40 block devices.
49 41
50 config DEFAULT_AS
51 bool "Anticipatory" if IOSCHED_AS=y
52
53 config DEFAULT_DEADLINE 42 config DEFAULT_DEADLINE
54 bool "Deadline" if IOSCHED_DEADLINE=y 43 bool "Deadline" if IOSCHED_DEADLINE=y
55 44
@@ -63,7 +52,6 @@ endchoice
63 52
64config DEFAULT_IOSCHED 53config DEFAULT_IOSCHED
65 string 54 string
66 default "anticipatory" if DEFAULT_AS
67 default "deadline" if DEFAULT_DEADLINE 55 default "deadline" if DEFAULT_DEADLINE
68 default "cfq" if DEFAULT_CFQ 56 default "cfq" if DEFAULT_CFQ
69 default "noop" if DEFAULT_NOOP 57 default "noop" if DEFAULT_NOOP
diff --git a/block/Makefile b/block/Makefile
index ba74ca6bfa14..7914108952f2 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
9 9
10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o 10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
11obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o 11obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
12obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
13obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o 12obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
14obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o 13obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
15 14
diff --git a/block/as-iosched.c b/block/as-iosched.c
deleted file mode 100644
index ce8ba57c6557..000000000000
--- a/block/as-iosched.c
+++ /dev/null
@@ -1,1520 +0,0 @@
1/*
2 * Anticipatory & deadline i/o scheduler.
3 *
4 * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
5 * Nick Piggin <nickpiggin@yahoo.com.au>
6 *
7 */
8#include <linux/kernel.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/elevator.h>
12#include <linux/bio.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/compiler.h>
17#include <linux/rbtree.h>
18#include <linux/interrupt.h>
19
20/*
21 * See Documentation/block/as-iosched.txt
22 */
23
24/*
25 * max time before a read is submitted.
26 */
27#define default_read_expire (HZ / 8)
28
29/*
30 * ditto for writes, these limits are not hard, even
31 * if the disk is capable of satisfying them.
32 */
33#define default_write_expire (HZ / 4)
34
35/*
36 * read_batch_expire describes how long we will allow a stream of reads to
37 * persist before looking to see whether it is time to switch over to writes.
38 */
39#define default_read_batch_expire (HZ / 2)
40
41/*
42 * write_batch_expire describes how long we want a stream of writes to run for.
43 * This is not a hard limit, but a target we set for the auto-tuning thingy.
44 * See, the problem is: we can send a lot of writes to disk cache / TCQ in
45 * a short amount of time...
46 */
47#define default_write_batch_expire (HZ / 8)
48
49/*
50 * max time we may wait to anticipate a read (default around 6ms)
51 */
52#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
53
54/*
55 * Keep track of up to 20ms thinktimes. We can go as big as we like here,
56 * however huge values tend to interfere and not decay fast enough. A program
57 * might be in a non-io phase of operation. Waiting on user input for example,
58 * or doing a lengthy computation. A small penalty can be justified there, and
59 * will still catch out those processes that constantly have large thinktimes.
60 */
61#define MAX_THINKTIME (HZ/50UL)
62
63/* Bits in as_io_context.state */
64enum as_io_states {
65 AS_TASK_RUNNING=0, /* Process has not exited */
66 AS_TASK_IOSTARTED, /* Process has started some IO */
67 AS_TASK_IORUNNING, /* Process has completed some IO */
68};
69
70enum anticipation_status {
71 ANTIC_OFF=0, /* Not anticipating (normal operation) */
72 ANTIC_WAIT_REQ, /* The last read has not yet completed */
73 ANTIC_WAIT_NEXT, /* Currently anticipating a request vs
74 last read (which has completed) */
75 ANTIC_FINISHED, /* Anticipating but have found a candidate
76 * or timed out */
77};
78
79struct as_data {
80 /*
81 * run time data
82 */
83
84 struct request_queue *q; /* the "owner" queue */
85
86 /*
87 * requests (as_rq s) are present on both sort_list and fifo_list
88 */
89 struct rb_root sort_list[2];
90 struct list_head fifo_list[2];
91
92 struct request *next_rq[2]; /* next in sort order */
93 sector_t last_sector[2]; /* last SYNC & ASYNC sectors */
94
95 unsigned long exit_prob; /* probability a task will exit while
96 being waited on */
97 unsigned long exit_no_coop; /* probablility an exited task will
98 not be part of a later cooperating
99 request */
100 unsigned long new_ttime_total; /* mean thinktime on new proc */
101 unsigned long new_ttime_mean;
102 u64 new_seek_total; /* mean seek on new proc */
103 sector_t new_seek_mean;
104
105 unsigned long current_batch_expires;
106 unsigned long last_check_fifo[2];
107 int changed_batch; /* 1: waiting for old batch to end */
108 int new_batch; /* 1: waiting on first read complete */
109 int batch_data_dir; /* current batch SYNC / ASYNC */
110 int write_batch_count; /* max # of reqs in a write batch */
111 int current_write_count; /* how many requests left this batch */
112 int write_batch_idled; /* has the write batch gone idle? */
113
114 enum anticipation_status antic_status;
115 unsigned long antic_start; /* jiffies: when it started */
116 struct timer_list antic_timer; /* anticipatory scheduling timer */
117 struct work_struct antic_work; /* Deferred unplugging */
118 struct io_context *io_context; /* Identify the expected process */
119 int ioc_finished; /* IO associated with io_context is finished */
120 int nr_dispatched;
121
122 /*
123 * settings that change how the i/o scheduler behaves
124 */
125 unsigned long fifo_expire[2];
126 unsigned long batch_expire[2];
127 unsigned long antic_expire;
128};
129
130/*
131 * per-request data.
132 */
133enum arq_state {
134 AS_RQ_NEW=0, /* New - not referenced and not on any lists */
135 AS_RQ_QUEUED, /* In the request queue. It belongs to the
136 scheduler */
137 AS_RQ_DISPATCHED, /* On the dispatch list. It belongs to the
138 driver now */
139 AS_RQ_PRESCHED, /* Debug poisoning for requests being used */
140 AS_RQ_REMOVED,
141 AS_RQ_MERGED,
142 AS_RQ_POSTSCHED, /* when they shouldn't be */
143};
144
145#define RQ_IOC(rq) ((struct io_context *) (rq)->elevator_private)
146#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
147#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
148
149static DEFINE_PER_CPU(unsigned long, as_ioc_count);
150static struct completion *ioc_gone;
151static DEFINE_SPINLOCK(ioc_gone_lock);
152
153static void as_move_to_dispatch(struct as_data *ad, struct request *rq);
154static void as_antic_stop(struct as_data *ad);
155
156/*
157 * IO Context helper functions
158 */
159
160/* Called to deallocate the as_io_context */
161static void free_as_io_context(struct as_io_context *aic)
162{
163 kfree(aic);
164 elv_ioc_count_dec(as_ioc_count);
165 if (ioc_gone) {
166 /*
167 * AS scheduler is exiting, grab exit lock and check
168 * the pending io context count. If it hits zero,
169 * complete ioc_gone and set it back to NULL.
170 */
171 spin_lock(&ioc_gone_lock);
172 if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) {
173 complete(ioc_gone);
174 ioc_gone = NULL;
175 }
176 spin_unlock(&ioc_gone_lock);
177 }
178}
179
180static void as_trim(struct io_context *ioc)
181{
182 spin_lock_irq(&ioc->lock);
183 if (ioc->aic)
184 free_as_io_context(ioc->aic);
185 ioc->aic = NULL;
186 spin_unlock_irq(&ioc->lock);
187}
188
189/* Called when the task exits */
190static void exit_as_io_context(struct as_io_context *aic)
191{
192 WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state));
193 clear_bit(AS_TASK_RUNNING, &aic->state);
194}
195
196static struct as_io_context *alloc_as_io_context(void)
197{
198 struct as_io_context *ret;
199
200 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
201 if (ret) {
202 ret->dtor = free_as_io_context;
203 ret->exit = exit_as_io_context;
204 ret->state = 1 << AS_TASK_RUNNING;
205 atomic_set(&ret->nr_queued, 0);
206 atomic_set(&ret->nr_dispatched, 0);
207 spin_lock_init(&ret->lock);
208 ret->ttime_total = 0;
209 ret->ttime_samples = 0;
210 ret->ttime_mean = 0;
211 ret->seek_total = 0;
212 ret->seek_samples = 0;
213 ret->seek_mean = 0;
214 elv_ioc_count_inc(as_ioc_count);
215 }
216
217 return ret;
218}
219
220/*
221 * If the current task has no AS IO context then create one and initialise it.
222 * Then take a ref on the task's io context and return it.
223 */
224static struct io_context *as_get_io_context(int node)
225{
226 struct io_context *ioc = get_io_context(GFP_ATOMIC, node);
227 if (ioc && !ioc->aic) {
228 ioc->aic = alloc_as_io_context();
229 if (!ioc->aic) {
230 put_io_context(ioc);
231 ioc = NULL;
232 }
233 }
234 return ioc;
235}
236
237static void as_put_io_context(struct request *rq)
238{
239 struct as_io_context *aic;
240
241 if (unlikely(!RQ_IOC(rq)))
242 return;
243
244 aic = RQ_IOC(rq)->aic;
245
246 if (rq_is_sync(rq) && aic) {
247 unsigned long flags;
248
249 spin_lock_irqsave(&aic->lock, flags);
250 set_bit(AS_TASK_IORUNNING, &aic->state);
251 aic->last_end_request = jiffies;
252 spin_unlock_irqrestore(&aic->lock, flags);
253 }
254
255 put_io_context(RQ_IOC(rq));
256}
257
258/*
259 * rb tree support functions
260 */
261#define RQ_RB_ROOT(ad, rq) (&(ad)->sort_list[rq_is_sync((rq))])
262
263static void as_add_rq_rb(struct as_data *ad, struct request *rq)
264{
265 struct request *alias;
266
267 while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) {
268 as_move_to_dispatch(ad, alias);
269 as_antic_stop(ad);
270 }
271}
272
273static inline void as_del_rq_rb(struct as_data *ad, struct request *rq)
274{
275 elv_rb_del(RQ_RB_ROOT(ad, rq), rq);
276}
277
278/*
279 * IO Scheduler proper
280 */
281
282#define MAXBACK (1024 * 1024) /*
283 * Maximum distance the disk will go backward
284 * for a request.
285 */
286
287#define BACK_PENALTY 2
288
289/*
290 * as_choose_req selects the preferred one of two requests of the same data_dir
291 * ignoring time - eg. timeouts, which is the job of as_dispatch_request
292 */
293static struct request *
294as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
295{
296 int data_dir;
297 sector_t last, s1, s2, d1, d2;
298 int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */
299 const sector_t maxback = MAXBACK;
300
301 if (rq1 == NULL || rq1 == rq2)
302 return rq2;
303 if (rq2 == NULL)
304 return rq1;
305
306 data_dir = rq_is_sync(rq1);
307
308 last = ad->last_sector[data_dir];
309 s1 = blk_rq_pos(rq1);
310 s2 = blk_rq_pos(rq2);
311
312 BUG_ON(data_dir != rq_is_sync(rq2));
313
314 /*
315 * Strict one way elevator _except_ in the case where we allow
316 * short backward seeks which are biased as twice the cost of a
317 * similar forward seek.
318 */
319 if (s1 >= last)
320 d1 = s1 - last;
321 else if (s1+maxback >= last)
322 d1 = (last - s1)*BACK_PENALTY;
323 else {
324 r1_wrap = 1;
325 d1 = 0; /* shut up, gcc */
326 }
327
328 if (s2 >= last)
329 d2 = s2 - last;
330 else if (s2+maxback >= last)
331 d2 = (last - s2)*BACK_PENALTY;
332 else {
333 r2_wrap = 1;
334 d2 = 0;
335 }
336
337 /* Found required data */
338 if (!r1_wrap && r2_wrap)
339 return rq1;
340 else if (!r2_wrap && r1_wrap)
341 return rq2;
342 else if (r1_wrap && r2_wrap) {
343 /* both behind the head */
344 if (s1 <= s2)
345 return rq1;
346 else
347 return rq2;
348 }
349
350 /* Both requests in front of the head */
351 if (d1 < d2)
352 return rq1;
353 else if (d2 < d1)
354 return rq2;
355 else {
356 if (s1 >= s2)
357 return rq1;
358 else
359 return rq2;
360 }
361}
362
363/*
364 * as_find_next_rq finds the next request after @prev in elevator order.
365 * this with as_choose_req form the basis for how the scheduler chooses
366 * what request to process next. Anticipation works on top of this.
367 */
368static struct request *
369as_find_next_rq(struct as_data *ad, struct request *last)
370{
371 struct rb_node *rbnext = rb_next(&last->rb_node);
372 struct rb_node *rbprev = rb_prev(&last->rb_node);
373 struct request *next = NULL, *prev = NULL;
374
375 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
376
377 if (rbprev)
378 prev = rb_entry_rq(rbprev);
379
380 if (rbnext)
381 next = rb_entry_rq(rbnext);
382 else {
383 const int data_dir = rq_is_sync(last);
384
385 rbnext = rb_first(&ad->sort_list[data_dir]);
386 if (rbnext && rbnext != &last->rb_node)
387 next = rb_entry_rq(rbnext);
388 }
389
390 return as_choose_req(ad, next, prev);
391}
392
393/*
394 * anticipatory scheduling functions follow
395 */
396
397/*
398 * as_antic_expired tells us when we have anticipated too long.
399 * The funny "absolute difference" math on the elapsed time is to handle
400 * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
401 */
402static int as_antic_expired(struct as_data *ad)
403{
404 long delta_jif;
405
406 delta_jif = jiffies - ad->antic_start;
407 if (unlikely(delta_jif < 0))
408 delta_jif = -delta_jif;
409 if (delta_jif < ad->antic_expire)
410 return 0;
411
412 return 1;
413}
414
415/*
416 * as_antic_waitnext starts anticipating that a nice request will soon be
417 * submitted. See also as_antic_waitreq
418 */
419static void as_antic_waitnext(struct as_data *ad)
420{
421 unsigned long timeout;
422
423 BUG_ON(ad->antic_status != ANTIC_OFF
424 && ad->antic_status != ANTIC_WAIT_REQ);
425
426 timeout = ad->antic_start + ad->antic_expire;
427
428 mod_timer(&ad->antic_timer, timeout);
429
430 ad->antic_status = ANTIC_WAIT_NEXT;
431}
432
433/*
434 * as_antic_waitreq starts anticipating. We don't start timing the anticipation
435 * until the request that we're anticipating on has finished. This means we
436 * are timing from when the candidate process wakes up hopefully.
437 */
438static void as_antic_waitreq(struct as_data *ad)
439{
440 BUG_ON(ad->antic_status == ANTIC_FINISHED);
441 if (ad->antic_status == ANTIC_OFF) {
442 if (!ad->io_context || ad->ioc_finished)
443 as_antic_waitnext(ad);
444 else
445 ad->antic_status = ANTIC_WAIT_REQ;
446 }
447}
448
449/*
450 * This is called directly by the functions in this file to stop anticipation.
451 * We kill the timer and schedule a call to the request_fn asap.
452 */
453static void as_antic_stop(struct as_data *ad)
454{
455 int status = ad->antic_status;
456
457 if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
458 if (status == ANTIC_WAIT_NEXT)
459 del_timer(&ad->antic_timer);
460 ad->antic_status = ANTIC_FINISHED;
461 /* see as_work_handler */
462 kblockd_schedule_work(ad->q, &ad->antic_work);
463 }
464}
465
466/*
467 * as_antic_timeout is the timer function set by as_antic_waitnext.
468 */
469static void as_antic_timeout(unsigned long data)
470{
471 struct request_queue *q = (struct request_queue *)data;
472 struct as_data *ad = q->elevator->elevator_data;
473 unsigned long flags;
474
475 spin_lock_irqsave(q->queue_lock, flags);
476 if (ad->antic_status == ANTIC_WAIT_REQ
477 || ad->antic_status == ANTIC_WAIT_NEXT) {
478 struct as_io_context *aic;
479 spin_lock(&ad->io_context->lock);
480 aic = ad->io_context->aic;
481
482 ad->antic_status = ANTIC_FINISHED;
483 kblockd_schedule_work(q, &ad->antic_work);
484
485 if (aic->ttime_samples == 0) {
486 /* process anticipated on has exited or timed out*/
487 ad->exit_prob = (7*ad->exit_prob + 256)/8;
488 }
489 if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
490 /* process not "saved" by a cooperating request */
491 ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8;
492 }
493 spin_unlock(&ad->io_context->lock);
494 }
495 spin_unlock_irqrestore(q->queue_lock, flags);
496}
497
498static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic,
499 unsigned long ttime)
500{
501 /* fixed point: 1.0 == 1<<8 */
502 if (aic->ttime_samples == 0) {
503 ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8;
504 ad->new_ttime_mean = ad->new_ttime_total / 256;
505
506 ad->exit_prob = (7*ad->exit_prob)/8;
507 }
508 aic->ttime_samples = (7*aic->ttime_samples + 256) / 8;
509 aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8;
510 aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples;
511}
512
513static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic,
514 sector_t sdist)
515{
516 u64 total;
517
518 if (aic->seek_samples == 0) {
519 ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8;
520 ad->new_seek_mean = ad->new_seek_total / 256;
521 }
522
523 /*
524 * Don't allow the seek distance to get too large from the
525 * odd fragment, pagein, etc
526 */
527 if (aic->seek_samples <= 60) /* second&third seek */
528 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024);
529 else
530 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64);
531
532 aic->seek_samples = (7*aic->seek_samples + 256) / 8;
533 aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8;
534 total = aic->seek_total + (aic->seek_samples/2);
535 do_div(total, aic->seek_samples);
536 aic->seek_mean = (sector_t)total;
537}
538
539/*
540 * as_update_iohist keeps a decaying histogram of IO thinktimes, and
541 * updates @aic->ttime_mean based on that. It is called when a new
542 * request is queued.
543 */
544static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
545 struct request *rq)
546{
547 int data_dir = rq_is_sync(rq);
548 unsigned long thinktime = 0;
549 sector_t seek_dist;
550
551 if (aic == NULL)
552 return;
553
554 if (data_dir == BLK_RW_SYNC) {
555 unsigned long in_flight = atomic_read(&aic->nr_queued)
556 + atomic_read(&aic->nr_dispatched);
557 spin_lock(&aic->lock);
558 if (test_bit(AS_TASK_IORUNNING, &aic->state) ||
559 test_bit(AS_TASK_IOSTARTED, &aic->state)) {
560 /* Calculate read -> read thinktime */
561 if (test_bit(AS_TASK_IORUNNING, &aic->state)
562 && in_flight == 0) {
563 thinktime = jiffies - aic->last_end_request;
564 thinktime = min(thinktime, MAX_THINKTIME-1);
565 }
566 as_update_thinktime(ad, aic, thinktime);
567
568 /* Calculate read -> read seek distance */
569 if (aic->last_request_pos < blk_rq_pos(rq))
570 seek_dist = blk_rq_pos(rq) -
571 aic->last_request_pos;
572 else
573 seek_dist = aic->last_request_pos -
574 blk_rq_pos(rq);
575 as_update_seekdist(ad, aic, seek_dist);
576 }
577 aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
578 set_bit(AS_TASK_IOSTARTED, &aic->state);
579 spin_unlock(&aic->lock);
580 }
581}
582
583/*
584 * as_close_req decides if one request is considered "close" to the
585 * previous one issued.
586 */
587static int as_close_req(struct as_data *ad, struct as_io_context *aic,
588 struct request *rq)
589{
590 unsigned long delay; /* jiffies */
591 sector_t last = ad->last_sector[ad->batch_data_dir];
592 sector_t next = blk_rq_pos(rq);
593 sector_t delta; /* acceptable close offset (in sectors) */
594 sector_t s;
595
596 if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished)
597 delay = 0;
598 else
599 delay = jiffies - ad->antic_start;
600
601 if (delay == 0)
602 delta = 8192;
603 else if (delay <= (20 * HZ / 1000) && delay <= ad->antic_expire)
604 delta = 8192 << delay;
605 else
606 return 1;
607
608 if ((last <= next + (delta>>1)) && (next <= last + delta))
609 return 1;
610
611 if (last < next)
612 s = next - last;
613 else
614 s = last - next;
615
616 if (aic->seek_samples == 0) {
617 /*
618 * Process has just started IO. Use past statistics to
619 * gauge success possibility
620 */
621 if (ad->new_seek_mean > s) {
622 /* this request is better than what we're expecting */
623 return 1;
624 }
625
626 } else {
627 if (aic->seek_mean > s) {
628 /* this request is better than what we're expecting */
629 return 1;
630 }
631 }
632
633 return 0;
634}
635
636/*
637 * as_can_break_anticipation returns true if we have been anticipating this
638 * request.
639 *
640 * It also returns true if the process against which we are anticipating
641 * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
642 * dispatch it ASAP, because we know that application will not be submitting
643 * any new reads.
644 *
645 * If the task which has submitted the request has exited, break anticipation.
646 *
647 * If this task has queued some other IO, do not enter enticipation.
648 */
649static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
650{
651 struct io_context *ioc;
652 struct as_io_context *aic;
653
654 ioc = ad->io_context;
655 BUG_ON(!ioc);
656 spin_lock(&ioc->lock);
657
658 if (rq && ioc == RQ_IOC(rq)) {
659 /* request from same process */
660 spin_unlock(&ioc->lock);
661 return 1;
662 }
663
664 if (ad->ioc_finished && as_antic_expired(ad)) {
665 /*
666 * In this situation status should really be FINISHED,
667 * however the timer hasn't had the chance to run yet.
668 */
669 spin_unlock(&ioc->lock);
670 return 1;
671 }
672
673 aic = ioc->aic;
674 if (!aic) {
675 spin_unlock(&ioc->lock);
676 return 0;
677 }
678
679 if (atomic_read(&aic->nr_queued) > 0) {
680 /* process has more requests queued */
681 spin_unlock(&ioc->lock);
682 return 1;
683 }
684
685 if (atomic_read(&aic->nr_dispatched) > 0) {
686 /* process has more requests dispatched */
687 spin_unlock(&ioc->lock);
688 return 1;
689 }
690
691 if (rq && rq_is_sync(rq) && as_close_req(ad, aic, rq)) {
692 /*
693 * Found a close request that is not one of ours.
694 *
695 * This makes close requests from another process update
696 * our IO history. Is generally useful when there are
697 * two or more cooperating processes working in the same
698 * area.
699 */
700 if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
701 if (aic->ttime_samples == 0)
702 ad->exit_prob = (7*ad->exit_prob + 256)/8;
703
704 ad->exit_no_coop = (7*ad->exit_no_coop)/8;
705 }
706
707 as_update_iohist(ad, aic, rq);
708 spin_unlock(&ioc->lock);
709 return 1;
710 }
711
712 if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
713 /* process anticipated on has exited */
714 if (aic->ttime_samples == 0)
715 ad->exit_prob = (7*ad->exit_prob + 256)/8;
716
717 if (ad->exit_no_coop > 128) {
718 spin_unlock(&ioc->lock);
719 return 1;
720 }
721 }
722
723 if (aic->ttime_samples == 0) {
724 if (ad->new_ttime_mean > ad->antic_expire) {
725 spin_unlock(&ioc->lock);
726 return 1;
727 }
728 if (ad->exit_prob * ad->exit_no_coop > 128*256) {
729 spin_unlock(&ioc->lock);
730 return 1;
731 }
732 } else if (aic->ttime_mean > ad->antic_expire) {
733 /* the process thinks too much between requests */
734 spin_unlock(&ioc->lock);
735 return 1;
736 }
737 spin_unlock(&ioc->lock);
738 return 0;
739}
740
741/*
742 * as_can_anticipate indicates whether we should either run rq
743 * or keep anticipating a better request.
744 */
745static int as_can_anticipate(struct as_data *ad, struct request *rq)
746{
747#if 0 /* disable for now, we need to check tag level as well */
748 /*
749 * SSD device without seek penalty, disable idling
750 */
751 if (blk_queue_nonrot(ad->q)) axman
752 return 0;
753#endif
754
755 if (!ad->io_context)
756 /*
757 * Last request submitted was a write
758 */
759 return 0;
760
761 if (ad->antic_status == ANTIC_FINISHED)
762 /*
763 * Don't restart if we have just finished. Run the next request
764 */
765 return 0;
766
767 if (as_can_break_anticipation(ad, rq))
768 /*
769 * This request is a good candidate. Don't keep anticipating,
770 * run it.
771 */
772 return 0;
773
774 /*
775 * OK from here, we haven't finished, and don't have a decent request!
776 * Status is either ANTIC_OFF so start waiting,
777 * ANTIC_WAIT_REQ so continue waiting for request to finish
778 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
779 */
780
781 return 1;
782}
783
784/*
785 * as_update_rq must be called whenever a request (rq) is added to
786 * the sort_list. This function keeps caches up to date, and checks if the
787 * request might be one we are "anticipating"
788 */
789static void as_update_rq(struct as_data *ad, struct request *rq)
790{
791 const int data_dir = rq_is_sync(rq);
792
793 /* keep the next_rq cache up to date */
794 ad->next_rq[data_dir] = as_choose_req(ad, rq, ad->next_rq[data_dir]);
795
796 /*
797 * have we been anticipating this request?
798 * or does it come from the same process as the one we are anticipating
799 * for?
800 */
801 if (ad->antic_status == ANTIC_WAIT_REQ
802 || ad->antic_status == ANTIC_WAIT_NEXT) {
803 if (as_can_break_anticipation(ad, rq))
804 as_antic_stop(ad);
805 }
806}
807
808/*
809 * Gathers timings and resizes the write batch automatically
810 */
811static void update_write_batch(struct as_data *ad)
812{
813 unsigned long batch = ad->batch_expire[BLK_RW_ASYNC];
814 long write_time;
815
816 write_time = (jiffies - ad->current_batch_expires) + batch;
817 if (write_time < 0)
818 write_time = 0;
819
820 if (write_time > batch && !ad->write_batch_idled) {
821 if (write_time > batch * 3)
822 ad->write_batch_count /= 2;
823 else
824 ad->write_batch_count--;
825 } else if (write_time < batch && ad->current_write_count == 0) {
826 if (batch > write_time * 3)
827 ad->write_batch_count *= 2;
828 else
829 ad->write_batch_count++;
830 }
831
832 if (ad->write_batch_count < 1)
833 ad->write_batch_count = 1;
834}
835
836/*
837 * as_completed_request is to be called when a request has completed and
838 * returned something to the requesting process, be it an error or data.
839 */
840static void as_completed_request(struct request_queue *q, struct request *rq)
841{
842 struct as_data *ad = q->elevator->elevator_data;
843
844 WARN_ON(!list_empty(&rq->queuelist));
845
846 if (RQ_STATE(rq) != AS_RQ_REMOVED) {
847 WARN(1, "rq->state %d\n", RQ_STATE(rq));
848 goto out;
849 }
850
851 if (ad->changed_batch && ad->nr_dispatched == 1) {
852 ad->current_batch_expires = jiffies +
853 ad->batch_expire[ad->batch_data_dir];
854 kblockd_schedule_work(q, &ad->antic_work);
855 ad->changed_batch = 0;
856
857 if (ad->batch_data_dir == BLK_RW_SYNC)
858 ad->new_batch = 1;
859 }
860 WARN_ON(ad->nr_dispatched == 0);
861 ad->nr_dispatched--;
862
863 /*
864 * Start counting the batch from when a request of that direction is
865 * actually serviced. This should help devices with big TCQ windows
866 * and writeback caches
867 */
868 if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) {
869 update_write_batch(ad);
870 ad->current_batch_expires = jiffies +
871 ad->batch_expire[BLK_RW_SYNC];
872 ad->new_batch = 0;
873 }
874
875 if (ad->io_context == RQ_IOC(rq) && ad->io_context) {
876 ad->antic_start = jiffies;
877 ad->ioc_finished = 1;
878 if (ad->antic_status == ANTIC_WAIT_REQ) {
879 /*
880 * We were waiting on this request, now anticipate
881 * the next one
882 */
883 as_antic_waitnext(ad);
884 }
885 }
886
887 as_put_io_context(rq);
888out:
889 RQ_SET_STATE(rq, AS_RQ_POSTSCHED);
890}
891
892/*
893 * as_remove_queued_request removes a request from the pre dispatch queue
894 * without updating refcounts. It is expected the caller will drop the
895 * reference unless it replaces the request at somepart of the elevator
896 * (ie. the dispatch queue)
897 */
898static void as_remove_queued_request(struct request_queue *q,
899 struct request *rq)
900{
901 const int data_dir = rq_is_sync(rq);
902 struct as_data *ad = q->elevator->elevator_data;
903 struct io_context *ioc;
904
905 WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
906
907 ioc = RQ_IOC(rq);
908 if (ioc && ioc->aic) {
909 BUG_ON(!atomic_read(&ioc->aic->nr_queued));
910 atomic_dec(&ioc->aic->nr_queued);
911 }
912
913 /*
914 * Update the "next_rq" cache if we are about to remove its
915 * entry
916 */
917 if (ad->next_rq[data_dir] == rq)
918 ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
919
920 rq_fifo_clear(rq);
921 as_del_rq_rb(ad, rq);
922}
923
924/*
925 * as_fifo_expired returns 0 if there are no expired requests on the fifo,
926 * 1 otherwise. It is ratelimited so that we only perform the check once per
927 * `fifo_expire' interval. Otherwise a large number of expired requests
928 * would create a hopeless seekstorm.
929 *
930 * See as_antic_expired comment.
931 */
932static int as_fifo_expired(struct as_data *ad, int adir)
933{
934 struct request *rq;
935 long delta_jif;
936
937 delta_jif = jiffies - ad->last_check_fifo[adir];
938 if (unlikely(delta_jif < 0))
939 delta_jif = -delta_jif;
940 if (delta_jif < ad->fifo_expire[adir])
941 return 0;
942
943 ad->last_check_fifo[adir] = jiffies;
944
945 if (list_empty(&ad->fifo_list[adir]))
946 return 0;
947
948 rq = rq_entry_fifo(ad->fifo_list[adir].next);
949
950 return time_after(jiffies, rq_fifo_time(rq));
951}
952
953/*
954 * as_batch_expired returns true if the current batch has expired. A batch
955 * is a set of reads or a set of writes.
956 */
957static inline int as_batch_expired(struct as_data *ad)
958{
959 if (ad->changed_batch || ad->new_batch)
960 return 0;
961
962 if (ad->batch_data_dir == BLK_RW_SYNC)
963 /* TODO! add a check so a complete fifo gets written? */
964 return time_after(jiffies, ad->current_batch_expires);
965
966 return time_after(jiffies, ad->current_batch_expires)
967 || ad->current_write_count == 0;
968}
969
970/*
971 * move an entry to dispatch queue
972 */
973static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
974{
975 const int data_dir = rq_is_sync(rq);
976
977 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
978
979 as_antic_stop(ad);
980 ad->antic_status = ANTIC_OFF;
981
982 /*
983 * This has to be set in order to be correctly updated by
984 * as_find_next_rq
985 */
986 ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq);
987
988 if (data_dir == BLK_RW_SYNC) {
989 struct io_context *ioc = RQ_IOC(rq);
990 /* In case we have to anticipate after this */
991 copy_io_context(&ad->io_context, &ioc);
992 } else {
993 if (ad->io_context) {
994 put_io_context(ad->io_context);
995 ad->io_context = NULL;
996 }
997
998 if (ad->current_write_count != 0)
999 ad->current_write_count--;
1000 }
1001 ad->ioc_finished = 0;
1002
1003 ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
1004
1005 /*
1006 * take it off the sort and fifo list, add to dispatch queue
1007 */
1008 as_remove_queued_request(ad->q, rq);
1009 WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
1010
1011 elv_dispatch_sort(ad->q, rq);
1012
1013 RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
1014 if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
1015 atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
1016 ad->nr_dispatched++;
1017}
1018
1019/*
1020 * as_dispatch_request selects the best request according to
1021 * read/write expire, batch expire, etc, and moves it to the dispatch
1022 * queue. Returns 1 if a request was found, 0 otherwise.
1023 */
1024static int as_dispatch_request(struct request_queue *q, int force)
1025{
1026 struct as_data *ad = q->elevator->elevator_data;
1027 const int reads = !list_empty(&ad->fifo_list[BLK_RW_SYNC]);
1028 const int writes = !list_empty(&ad->fifo_list[BLK_RW_ASYNC]);
1029 struct request *rq;
1030
1031 if (unlikely(force)) {
1032 /*
1033 * Forced dispatch, accounting is useless. Reset
1034 * accounting states and dump fifo_lists. Note that
1035 * batch_data_dir is reset to BLK_RW_SYNC to avoid
1036 * screwing write batch accounting as write batch
1037 * accounting occurs on W->R transition.
1038 */
1039 int dispatched = 0;
1040
1041 ad->batch_data_dir = BLK_RW_SYNC;
1042 ad->changed_batch = 0;
1043 ad->new_batch = 0;
1044
1045 while (ad->next_rq[BLK_RW_SYNC]) {
1046 as_move_to_dispatch(ad, ad->next_rq[BLK_RW_SYNC]);
1047 dispatched++;
1048 }
1049 ad->last_check_fifo[BLK_RW_SYNC] = jiffies;
1050
1051 while (ad->next_rq[BLK_RW_ASYNC]) {
1052 as_move_to_dispatch(ad, ad->next_rq[BLK_RW_ASYNC]);
1053 dispatched++;
1054 }
1055 ad->last_check_fifo[BLK_RW_ASYNC] = jiffies;
1056
1057 return dispatched;
1058 }
1059
1060 /* Signal that the write batch was uncontended, so we can't time it */
1061 if (ad->batch_data_dir == BLK_RW_ASYNC && !reads) {
1062 if (ad->current_write_count == 0 || !writes)
1063 ad->write_batch_idled = 1;
1064 }
1065
1066 if (!(reads || writes)
1067 || ad->antic_status == ANTIC_WAIT_REQ
1068 || ad->antic_status == ANTIC_WAIT_NEXT
1069 || ad->changed_batch)
1070 return 0;
1071
1072 if (!(reads && writes && as_batch_expired(ad))) {
1073 /*
1074 * batch is still running or no reads or no writes
1075 */
1076 rq = ad->next_rq[ad->batch_data_dir];
1077
1078 if (ad->batch_data_dir == BLK_RW_SYNC && ad->antic_expire) {
1079 if (as_fifo_expired(ad, BLK_RW_SYNC))
1080 goto fifo_expired;
1081
1082 if (as_can_anticipate(ad, rq)) {
1083 as_antic_waitreq(ad);
1084 return 0;
1085 }
1086 }
1087
1088 if (rq) {
1089 /* we have a "next request" */
1090 if (reads && !writes)
1091 ad->current_batch_expires =
1092 jiffies + ad->batch_expire[BLK_RW_SYNC];
1093 goto dispatch_request;
1094 }
1095 }
1096
1097 /*
1098 * at this point we are not running a batch. select the appropriate
1099 * data direction (read / write)
1100 */
1101
1102 if (reads) {
1103 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_SYNC]));
1104
1105 if (writes && ad->batch_data_dir == BLK_RW_SYNC)
1106 /*
1107 * Last batch was a read, switch to writes
1108 */
1109 goto dispatch_writes;
1110
1111 if (ad->batch_data_dir == BLK_RW_ASYNC) {
1112 WARN_ON(ad->new_batch);
1113 ad->changed_batch = 1;
1114 }
1115 ad->batch_data_dir = BLK_RW_SYNC;
1116 rq = rq_entry_fifo(ad->fifo_list[BLK_RW_SYNC].next);
1117 ad->last_check_fifo[ad->batch_data_dir] = jiffies;
1118 goto dispatch_request;
1119 }
1120
1121 /*
1122 * the last batch was a read
1123 */
1124
1125 if (writes) {
1126dispatch_writes:
1127 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_ASYNC]));
1128
1129 if (ad->batch_data_dir == BLK_RW_SYNC) {
1130 ad->changed_batch = 1;
1131
1132 /*
1133 * new_batch might be 1 when the queue runs out of
1134 * reads. A subsequent submission of a write might
1135 * cause a change of batch before the read is finished.
1136 */
1137 ad->new_batch = 0;
1138 }
1139 ad->batch_data_dir = BLK_RW_ASYNC;
1140 ad->current_write_count = ad->write_batch_count;
1141 ad->write_batch_idled = 0;
1142 rq = rq_entry_fifo(ad->fifo_list[BLK_RW_ASYNC].next);
1143 ad->last_check_fifo[BLK_RW_ASYNC] = jiffies;
1144 goto dispatch_request;
1145 }
1146
1147 BUG();
1148 return 0;
1149
1150dispatch_request:
1151 /*
1152 * If a request has expired, service it.
1153 */
1154
1155 if (as_fifo_expired(ad, ad->batch_data_dir)) {
1156fifo_expired:
1157 rq = rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
1158 }
1159
1160 if (ad->changed_batch) {
1161 WARN_ON(ad->new_batch);
1162
1163 if (ad->nr_dispatched)
1164 return 0;
1165
1166 if (ad->batch_data_dir == BLK_RW_ASYNC)
1167 ad->current_batch_expires = jiffies +
1168 ad->batch_expire[BLK_RW_ASYNC];
1169 else
1170 ad->new_batch = 1;
1171
1172 ad->changed_batch = 0;
1173 }
1174
1175 /*
1176 * rq is the selected appropriate request.
1177 */
1178 as_move_to_dispatch(ad, rq);
1179
1180 return 1;
1181}
1182
1183/*
1184 * add rq to rbtree and fifo
1185 */
1186static void as_add_request(struct request_queue *q, struct request *rq)
1187{
1188 struct as_data *ad = q->elevator->elevator_data;
1189 int data_dir;
1190
1191 RQ_SET_STATE(rq, AS_RQ_NEW);
1192
1193 data_dir = rq_is_sync(rq);
1194
1195 rq->elevator_private = as_get_io_context(q->node);
1196
1197 if (RQ_IOC(rq)) {
1198 as_update_iohist(ad, RQ_IOC(rq)->aic, rq);
1199 atomic_inc(&RQ_IOC(rq)->aic->nr_queued);
1200 }
1201
1202 as_add_rq_rb(ad, rq);
1203
1204 /*
1205 * set expire time and add to fifo list
1206 */
1207 rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]);
1208 list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]);
1209
1210 as_update_rq(ad, rq); /* keep state machine up to date */
1211 RQ_SET_STATE(rq, AS_RQ_QUEUED);
1212}
1213
1214static void as_activate_request(struct request_queue *q, struct request *rq)
1215{
1216 WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
1217 RQ_SET_STATE(rq, AS_RQ_REMOVED);
1218 if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
1219 atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
1220}
1221
1222static void as_deactivate_request(struct request_queue *q, struct request *rq)
1223{
1224 WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
1225 RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
1226 if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
1227 atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
1228}
1229
1230/*
1231 * as_queue_empty tells us if there are requests left in the device. It may
1232 * not be the case that a driver can get the next request even if the queue
1233 * is not empty - it is used in the block layer to check for plugging and
1234 * merging opportunities
1235 */
1236static int as_queue_empty(struct request_queue *q)
1237{
1238 struct as_data *ad = q->elevator->elevator_data;
1239
1240 return list_empty(&ad->fifo_list[BLK_RW_ASYNC])
1241 && list_empty(&ad->fifo_list[BLK_RW_SYNC]);
1242}
1243
1244static int
1245as_merge(struct request_queue *q, struct request **req, struct bio *bio)
1246{
1247 struct as_data *ad = q->elevator->elevator_data;
1248 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
1249 struct request *__rq;
1250
1251 /*
1252 * check for front merge
1253 */
1254 __rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key);
1255 if (__rq && elv_rq_merge_ok(__rq, bio)) {
1256 *req = __rq;
1257 return ELEVATOR_FRONT_MERGE;
1258 }
1259
1260 return ELEVATOR_NO_MERGE;
1261}
1262
1263static void as_merged_request(struct request_queue *q, struct request *req,
1264 int type)
1265{
1266 struct as_data *ad = q->elevator->elevator_data;
1267
1268 /*
1269 * if the merge was a front merge, we need to reposition request
1270 */
1271 if (type == ELEVATOR_FRONT_MERGE) {
1272 as_del_rq_rb(ad, req);
1273 as_add_rq_rb(ad, req);
1274 /*
1275 * Note! At this stage of this and the next function, our next
1276 * request may not be optimal - eg the request may have "grown"
1277 * behind the disk head. We currently don't bother adjusting.
1278 */
1279 }
1280}
1281
1282static void as_merged_requests(struct request_queue *q, struct request *req,
1283 struct request *next)
1284{
1285 /*
1286 * if next expires before rq, assign its expire time to arq
1287 * and move into next position (next will be deleted) in fifo
1288 */
1289 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
1290 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
1291 list_move(&req->queuelist, &next->queuelist);
1292 rq_set_fifo_time(req, rq_fifo_time(next));
1293 }
1294 }
1295
1296 /*
1297 * kill knowledge of next, this one is a goner
1298 */
1299 as_remove_queued_request(q, next);
1300 as_put_io_context(next);
1301
1302 RQ_SET_STATE(next, AS_RQ_MERGED);
1303}
1304
1305/*
1306 * This is executed in a "deferred" process context, by kblockd. It calls the
1307 * driver's request_fn so the driver can submit that request.
1308 *
1309 * IMPORTANT! This guy will reenter the elevator, so set up all queue global
1310 * state before calling, and don't rely on any state over calls.
1311 *
1312 * FIXME! dispatch queue is not a queue at all!
1313 */
1314static void as_work_handler(struct work_struct *work)
1315{
1316 struct as_data *ad = container_of(work, struct as_data, antic_work);
1317
1318 blk_run_queue(ad->q);
1319}
1320
1321static int as_may_queue(struct request_queue *q, int rw)
1322{
1323 int ret = ELV_MQUEUE_MAY;
1324 struct as_data *ad = q->elevator->elevator_data;
1325 struct io_context *ioc;
1326 if (ad->antic_status == ANTIC_WAIT_REQ ||
1327 ad->antic_status == ANTIC_WAIT_NEXT) {
1328 ioc = as_get_io_context(q->node);
1329 if (ad->io_context == ioc)
1330 ret = ELV_MQUEUE_MUST;
1331 put_io_context(ioc);
1332 }
1333
1334 return ret;
1335}
1336
1337static void as_exit_queue(struct elevator_queue *e)
1338{
1339 struct as_data *ad = e->elevator_data;
1340
1341 del_timer_sync(&ad->antic_timer);
1342 cancel_work_sync(&ad->antic_work);
1343
1344 BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_SYNC]));
1345 BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_ASYNC]));
1346
1347 put_io_context(ad->io_context);
1348 kfree(ad);
1349}
1350
1351/*
1352 * initialize elevator private data (as_data).
1353 */
1354static void *as_init_queue(struct request_queue *q)
1355{
1356 struct as_data *ad;
1357
1358 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node);
1359 if (!ad)
1360 return NULL;
1361
1362 ad->q = q; /* Identify what queue the data belongs to */
1363
1364 /* anticipatory scheduling helpers */
1365 ad->antic_timer.function = as_antic_timeout;
1366 ad->antic_timer.data = (unsigned long)q;
1367 init_timer(&ad->antic_timer);
1368 INIT_WORK(&ad->antic_work, as_work_handler);
1369
1370 INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_SYNC]);
1371 INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_ASYNC]);
1372 ad->sort_list[BLK_RW_SYNC] = RB_ROOT;
1373 ad->sort_list[BLK_RW_ASYNC] = RB_ROOT;
1374 ad->fifo_expire[BLK_RW_SYNC] = default_read_expire;
1375 ad->fifo_expire[BLK_RW_ASYNC] = default_write_expire;
1376 ad->antic_expire = default_antic_expire;
1377 ad->batch_expire[BLK_RW_SYNC] = default_read_batch_expire;
1378 ad->batch_expire[BLK_RW_ASYNC] = default_write_batch_expire;
1379
1380 ad->current_batch_expires = jiffies + ad->batch_expire[BLK_RW_SYNC];
1381 ad->write_batch_count = ad->batch_expire[BLK_RW_ASYNC] / 10;
1382 if (ad->write_batch_count < 2)
1383 ad->write_batch_count = 2;
1384
1385 return ad;
1386}
1387
1388/*
1389 * sysfs parts below
1390 */
1391
1392static ssize_t
1393as_var_show(unsigned int var, char *page)
1394{
1395 return sprintf(page, "%d\n", var);
1396}
1397
1398static ssize_t
1399as_var_store(unsigned long *var, const char *page, size_t count)
1400{
1401 char *p = (char *) page;
1402
1403 *var = simple_strtoul(p, &p, 10);
1404 return count;
1405}
1406
1407static ssize_t est_time_show(struct elevator_queue *e, char *page)
1408{
1409 struct as_data *ad = e->elevator_data;
1410 int pos = 0;
1411
1412 pos += sprintf(page+pos, "%lu %% exit probability\n",
1413 100*ad->exit_prob/256);
1414 pos += sprintf(page+pos, "%lu %% probability of exiting without a "
1415 "cooperating process submitting IO\n",
1416 100*ad->exit_no_coop/256);
1417 pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean);
1418 pos += sprintf(page+pos, "%llu sectors new seek distance\n",
1419 (unsigned long long)ad->new_seek_mean);
1420
1421 return pos;
1422}
1423
1424#define SHOW_FUNCTION(__FUNC, __VAR) \
1425static ssize_t __FUNC(struct elevator_queue *e, char *page) \
1426{ \
1427 struct as_data *ad = e->elevator_data; \
1428 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
1429}
1430SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[BLK_RW_SYNC]);
1431SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[BLK_RW_ASYNC]);
1432SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
1433SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[BLK_RW_SYNC]);
1434SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[BLK_RW_ASYNC]);
1435#undef SHOW_FUNCTION
1436
1437#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
1438static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
1439{ \
1440 struct as_data *ad = e->elevator_data; \
1441 int ret = as_var_store(__PTR, (page), count); \
1442 if (*(__PTR) < (MIN)) \
1443 *(__PTR) = (MIN); \
1444 else if (*(__PTR) > (MAX)) \
1445 *(__PTR) = (MAX); \
1446 *(__PTR) = msecs_to_jiffies(*(__PTR)); \
1447 return ret; \
1448}
1449STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[BLK_RW_SYNC], 0, INT_MAX);
1450STORE_FUNCTION(as_write_expire_store,
1451 &ad->fifo_expire[BLK_RW_ASYNC], 0, INT_MAX);
1452STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
1453STORE_FUNCTION(as_read_batch_expire_store,
1454 &ad->batch_expire[BLK_RW_SYNC], 0, INT_MAX);
1455STORE_FUNCTION(as_write_batch_expire_store,
1456 &ad->batch_expire[BLK_RW_ASYNC], 0, INT_MAX);
1457#undef STORE_FUNCTION
1458
1459#define AS_ATTR(name) \
1460 __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)
1461
1462static struct elv_fs_entry as_attrs[] = {
1463 __ATTR_RO(est_time),
1464 AS_ATTR(read_expire),
1465 AS_ATTR(write_expire),
1466 AS_ATTR(antic_expire),
1467 AS_ATTR(read_batch_expire),
1468 AS_ATTR(write_batch_expire),
1469 __ATTR_NULL
1470};
1471
1472static struct elevator_type iosched_as = {
1473 .ops = {
1474 .elevator_merge_fn = as_merge,
1475 .elevator_merged_fn = as_merged_request,
1476 .elevator_merge_req_fn = as_merged_requests,
1477 .elevator_dispatch_fn = as_dispatch_request,
1478 .elevator_add_req_fn = as_add_request,
1479 .elevator_activate_req_fn = as_activate_request,
1480 .elevator_deactivate_req_fn = as_deactivate_request,
1481 .elevator_queue_empty_fn = as_queue_empty,
1482 .elevator_completed_req_fn = as_completed_request,
1483 .elevator_former_req_fn = elv_rb_former_request,
1484 .elevator_latter_req_fn = elv_rb_latter_request,
1485 .elevator_may_queue_fn = as_may_queue,
1486 .elevator_init_fn = as_init_queue,
1487 .elevator_exit_fn = as_exit_queue,
1488 .trim = as_trim,
1489 },
1490
1491 .elevator_attrs = as_attrs,
1492 .elevator_name = "anticipatory",
1493 .elevator_owner = THIS_MODULE,
1494};
1495
1496static int __init as_init(void)
1497{
1498 elv_register(&iosched_as);
1499
1500 return 0;
1501}
1502
1503static void __exit as_exit(void)
1504{
1505 DECLARE_COMPLETION_ONSTACK(all_gone);
1506 elv_unregister(&iosched_as);
1507 ioc_gone = &all_gone;
1508 /* ioc_gone's update must be visible before reading ioc_count */
1509 smp_wmb();
1510 if (elv_ioc_count_read(as_ioc_count))
1511 wait_for_completion(&all_gone);
1512 synchronize_rcu();
1513}
1514
1515module_init(as_init);
1516module_exit(as_exit);
1517
1518MODULE_AUTHOR("Nick Piggin");
1519MODULE_LICENSE("GPL");
1520MODULE_DESCRIPTION("anticipatory IO scheduler");
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index aa1e9535e358..13b612f9f27a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -27,6 +27,8 @@ static const int cfq_slice_sync = HZ / 10;
27static int cfq_slice_async = HZ / 25; 27static int cfq_slice_async = HZ / 25;
28static const int cfq_slice_async_rq = 2; 28static const int cfq_slice_async_rq = 2;
29static int cfq_slice_idle = HZ / 125; 29static int cfq_slice_idle = HZ / 125;
30static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
31static const int cfq_hist_divisor = 4;
30 32
31/* 33/*
32 * offset from end of service tree 34 * offset from end of service tree
@@ -38,6 +40,12 @@ static int cfq_slice_idle = HZ / 125;
38 */ 40 */
39#define CFQ_MIN_TT (2) 41#define CFQ_MIN_TT (2)
40 42
43/*
44 * Allow merged cfqqs to perform this amount of seeky I/O before
45 * deciding to break the queues up again.
46 */
47#define CFQQ_COOP_TOUT (HZ)
48
41#define CFQ_SLICE_SCALE (5) 49#define CFQ_SLICE_SCALE (5)
42#define CFQ_HW_QUEUE_MIN (5) 50#define CFQ_HW_QUEUE_MIN (5)
43 51
@@ -67,8 +75,9 @@ static DEFINE_SPINLOCK(ioc_gone_lock);
67struct cfq_rb_root { 75struct cfq_rb_root {
68 struct rb_root rb; 76 struct rb_root rb;
69 struct rb_node *left; 77 struct rb_node *left;
78 unsigned count;
70}; 79};
71#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } 80#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, }
72 81
73/* 82/*
74 * Per process-grouping structure 83 * Per process-grouping structure
@@ -112,19 +121,56 @@ struct cfq_queue {
112 unsigned short ioprio, org_ioprio; 121 unsigned short ioprio, org_ioprio;
113 unsigned short ioprio_class, org_ioprio_class; 122 unsigned short ioprio_class, org_ioprio_class;
114 123
124 unsigned int seek_samples;
125 u64 seek_total;
126 sector_t seek_mean;
127 sector_t last_request_pos;
128 unsigned long seeky_start;
129
115 pid_t pid; 130 pid_t pid;
131
132 struct cfq_rb_root *service_tree;
133 struct cfq_queue *new_cfqq;
116}; 134};
117 135
118/* 136/*
137 * First index in the service_trees.
138 * IDLE is handled separately, so it has negative index
139 */
140enum wl_prio_t {
141 IDLE_WORKLOAD = -1,
142 BE_WORKLOAD = 0,
143 RT_WORKLOAD = 1
144};
145
146/*
147 * Second index in the service_trees.
148 */
149enum wl_type_t {
150 ASYNC_WORKLOAD = 0,
151 SYNC_NOIDLE_WORKLOAD = 1,
152 SYNC_WORKLOAD = 2
153};
154
155
156/*
119 * Per block device queue structure 157 * Per block device queue structure
120 */ 158 */
121struct cfq_data { 159struct cfq_data {
122 struct request_queue *queue; 160 struct request_queue *queue;
123 161
124 /* 162 /*
125 * rr list of queues with requests and the count of them 163 * rr lists of queues with requests, onle rr for each priority class.
164 * Counts are embedded in the cfq_rb_root
165 */
166 struct cfq_rb_root service_trees[2][3];
167 struct cfq_rb_root service_tree_idle;
168 /*
169 * The priority currently being served
126 */ 170 */
127 struct cfq_rb_root service_tree; 171 enum wl_prio_t serving_prio;
172 enum wl_type_t serving_type;
173 unsigned long workload_expires;
128 174
129 /* 175 /*
130 * Each priority tree is sorted by next_request position. These 176 * Each priority tree is sorted by next_request position. These
@@ -134,6 +180,7 @@ struct cfq_data {
134 struct rb_root prio_trees[CFQ_PRIO_LISTS]; 180 struct rb_root prio_trees[CFQ_PRIO_LISTS];
135 181
136 unsigned int busy_queues; 182 unsigned int busy_queues;
183 unsigned int busy_queues_avg[2];
137 184
138 int rq_in_driver[2]; 185 int rq_in_driver[2];
139 int sync_flight; 186 int sync_flight;
@@ -185,6 +232,16 @@ struct cfq_data {
185 unsigned long last_end_sync_rq; 232 unsigned long last_end_sync_rq;
186}; 233};
187 234
235static struct cfq_rb_root *service_tree_for(enum wl_prio_t prio,
236 enum wl_type_t type,
237 struct cfq_data *cfqd)
238{
239 if (prio == IDLE_WORKLOAD)
240 return &cfqd->service_tree_idle;
241
242 return &cfqd->service_trees[prio][type];
243}
244
188enum cfqq_state_flags { 245enum cfqq_state_flags {
189 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 246 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
190 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 247 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
@@ -195,7 +252,7 @@ enum cfqq_state_flags {
195 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 252 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
196 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 253 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
197 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 254 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
198 CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */ 255 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
199 CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */ 256 CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */
200}; 257};
201 258
@@ -231,6 +288,35 @@ CFQ_CFQQ_FNS(coop_preempt);
231#define cfq_log(cfqd, fmt, args...) \ 288#define cfq_log(cfqd, fmt, args...) \
232 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 289 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
233 290
291static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
292{
293 if (cfq_class_idle(cfqq))
294 return IDLE_WORKLOAD;
295 if (cfq_class_rt(cfqq))
296 return RT_WORKLOAD;
297 return BE_WORKLOAD;
298}
299
300
301static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
302{
303 if (!cfq_cfqq_sync(cfqq))
304 return ASYNC_WORKLOAD;
305 if (!cfq_cfqq_idle_window(cfqq))
306 return SYNC_NOIDLE_WORKLOAD;
307 return SYNC_WORKLOAD;
308}
309
310static inline int cfq_busy_queues_wl(enum wl_prio_t wl, struct cfq_data *cfqd)
311{
312 if (wl == IDLE_WORKLOAD)
313 return cfqd->service_tree_idle.count;
314
315 return cfqd->service_trees[wl][ASYNC_WORKLOAD].count
316 + cfqd->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
317 + cfqd->service_trees[wl][SYNC_WORKLOAD].count;
318}
319
234static void cfq_dispatch_insert(struct request_queue *, struct request *); 320static void cfq_dispatch_insert(struct request_queue *, struct request *);
235static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, 321static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
236 struct io_context *, gfp_t); 322 struct io_context *, gfp_t);
@@ -303,10 +389,49 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
303 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); 389 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
304} 390}
305 391
392/*
393 * get averaged number of queues of RT/BE priority.
394 * average is updated, with a formula that gives more weight to higher numbers,
395 * to quickly follows sudden increases and decrease slowly
396 */
397
398static inline unsigned cfq_get_avg_queues(struct cfq_data *cfqd, bool rt)
399{
400 unsigned min_q, max_q;
401 unsigned mult = cfq_hist_divisor - 1;
402 unsigned round = cfq_hist_divisor / 2;
403 unsigned busy = cfq_busy_queues_wl(rt, cfqd);
404
405 min_q = min(cfqd->busy_queues_avg[rt], busy);
406 max_q = max(cfqd->busy_queues_avg[rt], busy);
407 cfqd->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
408 cfq_hist_divisor;
409 return cfqd->busy_queues_avg[rt];
410}
411
306static inline void 412static inline void
307cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 413cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
308{ 414{
309 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; 415 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
416 if (cfqd->cfq_latency) {
417 /* interested queues (we consider only the ones with the same
418 * priority class) */
419 unsigned iq = cfq_get_avg_queues(cfqd, cfq_class_rt(cfqq));
420 unsigned sync_slice = cfqd->cfq_slice[1];
421 unsigned expect_latency = sync_slice * iq;
422 if (expect_latency > cfq_target_latency) {
423 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
424 /* scale low_slice according to IO priority
425 * and sync vs async */
426 unsigned low_slice =
427 min(slice, base_low_slice * slice / sync_slice);
428 /* the adapted slice value is scaled to fit all iqs
429 * into the target latency */
430 slice = max(slice * cfq_target_latency / expect_latency,
431 low_slice);
432 }
433 }
434 cfqq->slice_end = jiffies + slice;
310 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); 435 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
311} 436}
312 437
@@ -445,6 +570,7 @@ static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
445 if (root->left == n) 570 if (root->left == n)
446 root->left = NULL; 571 root->left = NULL;
447 rb_erase_init(n, &root->rb); 572 rb_erase_init(n, &root->rb);
573 --root->count;
448} 574}
449 575
450/* 576/*
@@ -485,7 +611,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
485} 611}
486 612
487/* 613/*
488 * The cfqd->service_tree holds all pending cfq_queue's that have 614 * The cfqd->service_trees holds all pending cfq_queue's that have
489 * requests waiting to be processed. It is sorted in the order that 615 * requests waiting to be processed. It is sorted in the order that
490 * we will service the queues. 616 * we will service the queues.
491 */ 617 */
@@ -495,11 +621,13 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
495 struct rb_node **p, *parent; 621 struct rb_node **p, *parent;
496 struct cfq_queue *__cfqq; 622 struct cfq_queue *__cfqq;
497 unsigned long rb_key; 623 unsigned long rb_key;
624 struct cfq_rb_root *service_tree;
498 int left; 625 int left;
499 626
627 service_tree = service_tree_for(cfqq_prio(cfqq), cfqq_type(cfqq), cfqd);
500 if (cfq_class_idle(cfqq)) { 628 if (cfq_class_idle(cfqq)) {
501 rb_key = CFQ_IDLE_DELAY; 629 rb_key = CFQ_IDLE_DELAY;
502 parent = rb_last(&cfqd->service_tree.rb); 630 parent = rb_last(&service_tree->rb);
503 if (parent && parent != &cfqq->rb_node) { 631 if (parent && parent != &cfqq->rb_node) {
504 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 632 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
505 rb_key += __cfqq->rb_key; 633 rb_key += __cfqq->rb_key;
@@ -517,7 +645,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
517 cfqq->slice_resid = 0; 645 cfqq->slice_resid = 0;
518 } else { 646 } else {
519 rb_key = -HZ; 647 rb_key = -HZ;
520 __cfqq = cfq_rb_first(&cfqd->service_tree); 648 __cfqq = cfq_rb_first(service_tree);
521 rb_key += __cfqq ? __cfqq->rb_key : jiffies; 649 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
522 } 650 }
523 651
@@ -525,15 +653,18 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
525 /* 653 /*
526 * same position, nothing more to do 654 * same position, nothing more to do
527 */ 655 */
528 if (rb_key == cfqq->rb_key) 656 if (rb_key == cfqq->rb_key &&
657 cfqq->service_tree == service_tree)
529 return; 658 return;
530 659
531 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 660 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
661 cfqq->service_tree = NULL;
532 } 662 }
533 663
534 left = 1; 664 left = 1;
535 parent = NULL; 665 parent = NULL;
536 p = &cfqd->service_tree.rb.rb_node; 666 cfqq->service_tree = service_tree;
667 p = &service_tree->rb.rb_node;
537 while (*p) { 668 while (*p) {
538 struct rb_node **n; 669 struct rb_node **n;
539 670
@@ -541,35 +672,25 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
541 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 672 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
542 673
543 /* 674 /*
544 * sort RT queues first, we always want to give 675 * sort by key, that represents service time.
545 * preference to them. IDLE queues goes to the back.
546 * after that, sort on the next service time.
547 */ 676 */
548 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) 677 if (time_before(rb_key, __cfqq->rb_key))
549 n = &(*p)->rb_left;
550 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
551 n = &(*p)->rb_right;
552 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
553 n = &(*p)->rb_left; 678 n = &(*p)->rb_left;
554 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) 679 else {
555 n = &(*p)->rb_right;
556 else if (time_before(rb_key, __cfqq->rb_key))
557 n = &(*p)->rb_left;
558 else
559 n = &(*p)->rb_right; 680 n = &(*p)->rb_right;
560
561 if (n == &(*p)->rb_right)
562 left = 0; 681 left = 0;
682 }
563 683
564 p = n; 684 p = n;
565 } 685 }
566 686
567 if (left) 687 if (left)
568 cfqd->service_tree.left = &cfqq->rb_node; 688 service_tree->left = &cfqq->rb_node;
569 689
570 cfqq->rb_key = rb_key; 690 cfqq->rb_key = rb_key;
571 rb_link_node(&cfqq->rb_node, parent, p); 691 rb_link_node(&cfqq->rb_node, parent, p);
572 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); 692 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
693 service_tree->count++;
573} 694}
574 695
575static struct cfq_queue * 696static struct cfq_queue *
@@ -671,8 +792,10 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
671 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 792 BUG_ON(!cfq_cfqq_on_rr(cfqq));
672 cfq_clear_cfqq_on_rr(cfqq); 793 cfq_clear_cfqq_on_rr(cfqq);
673 794
674 if (!RB_EMPTY_NODE(&cfqq->rb_node)) 795 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
675 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 796 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
797 cfqq->service_tree = NULL;
798 }
676 if (cfqq->p_root) { 799 if (cfqq->p_root) {
677 rb_erase(&cfqq->p_node, cfqq->p_root); 800 rb_erase(&cfqq->p_node, cfqq->p_root);
678 cfqq->p_root = NULL; 801 cfqq->p_root = NULL;
@@ -933,10 +1056,12 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
933 */ 1056 */
934static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 1057static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
935{ 1058{
936 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) 1059 struct cfq_rb_root *service_tree =
937 return NULL; 1060 service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd);
938 1061
939 return cfq_rb_first(&cfqd->service_tree); 1062 if (RB_EMPTY_ROOT(&service_tree->rb))
1063 return NULL;
1064 return cfq_rb_first(service_tree);
940} 1065}
941 1066
942/* 1067/*
@@ -947,6 +1072,7 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
947{ 1072{
948 if (!cfqq) { 1073 if (!cfqq) {
949 cfqq = cfq_get_next_queue(cfqd); 1074 cfqq = cfq_get_next_queue(cfqd);
1075
950 if (cfqq && !cfq_cfqq_coop_preempt(cfqq)) 1076 if (cfqq && !cfq_cfqq_coop_preempt(cfqq))
951 cfq_clear_cfqq_coop(cfqq); 1077 cfq_clear_cfqq_coop(cfqq);
952 } 1078 }
@@ -967,16 +1093,16 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
967 return cfqd->last_position - blk_rq_pos(rq); 1093 return cfqd->last_position - blk_rq_pos(rq);
968} 1094}
969 1095
970#define CIC_SEEK_THR 8 * 1024 1096#define CFQQ_SEEK_THR 8 * 1024
971#define CIC_SEEKY(cic) ((cic)->seek_mean > CIC_SEEK_THR) 1097#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
972 1098
973static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) 1099static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1100 struct request *rq)
974{ 1101{
975 struct cfq_io_context *cic = cfqd->active_cic; 1102 sector_t sdist = cfqq->seek_mean;
976 sector_t sdist = cic->seek_mean;
977 1103
978 if (!sample_valid(cic->seek_samples)) 1104 if (!sample_valid(cfqq->seek_samples))
979 sdist = CIC_SEEK_THR; 1105 sdist = CFQQ_SEEK_THR;
980 1106
981 return cfq_dist_from_last(cfqd, rq) <= sdist; 1107 return cfq_dist_from_last(cfqd, rq) <= sdist;
982} 1108}
@@ -1005,7 +1131,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1005 * will contain the closest sector. 1131 * will contain the closest sector.
1006 */ 1132 */
1007 __cfqq = rb_entry(parent, struct cfq_queue, p_node); 1133 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1008 if (cfq_rq_close(cfqd, __cfqq->next_rq)) 1134 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1009 return __cfqq; 1135 return __cfqq;
1010 1136
1011 if (blk_rq_pos(__cfqq->next_rq) < sector) 1137 if (blk_rq_pos(__cfqq->next_rq) < sector)
@@ -1016,7 +1142,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1016 return NULL; 1142 return NULL;
1017 1143
1018 __cfqq = rb_entry(node, struct cfq_queue, p_node); 1144 __cfqq = rb_entry(node, struct cfq_queue, p_node);
1019 if (cfq_rq_close(cfqd, __cfqq->next_rq)) 1145 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1020 return __cfqq; 1146 return __cfqq;
1021 1147
1022 return NULL; 1148 return NULL;
@@ -1033,16 +1159,13 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1033 * assumption. 1159 * assumption.
1034 */ 1160 */
1035static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, 1161static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1036 struct cfq_queue *cur_cfqq, 1162 struct cfq_queue *cur_cfqq)
1037 bool probe)
1038{ 1163{
1039 struct cfq_queue *cfqq; 1164 struct cfq_queue *cfqq;
1040 1165
1041 /* 1166 if (!cfq_cfqq_sync(cur_cfqq))
1042 * A valid cfq_io_context is necessary to compare requests against 1167 return NULL;
1043 * the seek_mean of the current cfqq. 1168 if (CFQQ_SEEKY(cur_cfqq))
1044 */
1045 if (!cfqd->active_cic)
1046 return NULL; 1169 return NULL;
1047 1170
1048 /* 1171 /*
@@ -1054,14 +1177,53 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1054 if (!cfqq) 1177 if (!cfqq)
1055 return NULL; 1178 return NULL;
1056 1179
1057 if (cfq_cfqq_coop(cfqq)) 1180 /*
1181 * It only makes sense to merge sync queues.
1182 */
1183 if (!cfq_cfqq_sync(cfqq))
1184 return NULL;
1185 if (CFQQ_SEEKY(cfqq))
1186 return NULL;
1187
1188 /*
1189 * Do not merge queues of different priority classes
1190 */
1191 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1058 return NULL; 1192 return NULL;
1059 1193
1060 if (!probe)
1061 cfq_mark_cfqq_coop(cfqq);
1062 return cfqq; 1194 return cfqq;
1063} 1195}
1064 1196
1197/*
1198 * Determine whether we should enforce idle window for this queue.
1199 */
1200
1201static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1202{
1203 enum wl_prio_t prio = cfqq_prio(cfqq);
1204 struct cfq_rb_root *service_tree = cfqq->service_tree;
1205
1206 /* We never do for idle class queues. */
1207 if (prio == IDLE_WORKLOAD)
1208 return false;
1209
1210 /* We do for queues that were marked with idle window flag. */
1211 if (cfq_cfqq_idle_window(cfqq))
1212 return true;
1213
1214 /*
1215 * Otherwise, we do only if they are the last ones
1216 * in their service tree.
1217 */
1218 if (!service_tree)
1219 service_tree = service_tree_for(prio, cfqq_type(cfqq), cfqd);
1220
1221 if (service_tree->count == 0)
1222 return true;
1223
1224 return (service_tree->count == 1 && cfq_rb_first(service_tree) == cfqq);
1225}
1226
1065static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1227static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1066{ 1228{
1067 struct cfq_queue *cfqq = cfqd->active_queue; 1229 struct cfq_queue *cfqq = cfqd->active_queue;
@@ -1082,7 +1244,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1082 /* 1244 /*
1083 * idle is disabled, either manually or by past process history 1245 * idle is disabled, either manually or by past process history
1084 */ 1246 */
1085 if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) 1247 if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
1086 return; 1248 return;
1087 1249
1088 /* 1250 /*
@@ -1109,14 +1271,20 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1109 1271
1110 cfq_mark_cfqq_wait_request(cfqq); 1272 cfq_mark_cfqq_wait_request(cfqq);
1111 1273
1112 /*
1113 * we don't want to idle for seeks, but we do want to allow
1114 * fair distribution of slice time for a process doing back-to-back
1115 * seeks. so allow a little bit of time for him to submit a new rq
1116 */
1117 sl = cfqd->cfq_slice_idle; 1274 sl = cfqd->cfq_slice_idle;
1118 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 1275 /* are we servicing noidle tree, and there are more queues?
1276 * non-rotational or NCQ: no idle
1277 * non-NCQ rotational : very small idle, to allow
1278 * fair distribution of slice time for a process doing back-to-back
1279 * seeks.
1280 */
1281 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
1282 service_tree_for(cfqd->serving_prio, SYNC_NOIDLE_WORKLOAD, cfqd)
1283 ->count > 0) {
1284 if (blk_queue_nonrot(cfqd->queue) || cfqd->hw_tag)
1285 return;
1119 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); 1286 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
1287 }
1120 1288
1121 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1289 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1122 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1290 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
@@ -1175,6 +1343,152 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1175} 1343}
1176 1344
1177/* 1345/*
1346 * Must be called with the queue_lock held.
1347 */
1348static int cfqq_process_refs(struct cfq_queue *cfqq)
1349{
1350 int process_refs, io_refs;
1351
1352 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
1353 process_refs = atomic_read(&cfqq->ref) - io_refs;
1354 BUG_ON(process_refs < 0);
1355 return process_refs;
1356}
1357
1358static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1359{
1360 int process_refs, new_process_refs;
1361 struct cfq_queue *__cfqq;
1362
1363 /* Avoid a circular list and skip interim queue merges */
1364 while ((__cfqq = new_cfqq->new_cfqq)) {
1365 if (__cfqq == cfqq)
1366 return;
1367 new_cfqq = __cfqq;
1368 }
1369
1370 process_refs = cfqq_process_refs(cfqq);
1371 /*
1372 * If the process for the cfqq has gone away, there is no
1373 * sense in merging the queues.
1374 */
1375 if (process_refs == 0)
1376 return;
1377
1378 /*
1379 * Merge in the direction of the lesser amount of work.
1380 */
1381 new_process_refs = cfqq_process_refs(new_cfqq);
1382 if (new_process_refs >= process_refs) {
1383 cfqq->new_cfqq = new_cfqq;
1384 atomic_add(process_refs, &new_cfqq->ref);
1385 } else {
1386 new_cfqq->new_cfqq = cfqq;
1387 atomic_add(new_process_refs, &cfqq->ref);
1388 }
1389}
1390
1391static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, enum wl_prio_t prio,
1392 bool prio_changed)
1393{
1394 struct cfq_queue *queue;
1395 int i;
1396 bool key_valid = false;
1397 unsigned long lowest_key = 0;
1398 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
1399
1400 if (prio_changed) {
1401 /*
1402 * When priorities switched, we prefer starting
1403 * from SYNC_NOIDLE (first choice), or just SYNC
1404 * over ASYNC
1405 */
1406 if (service_tree_for(prio, cur_best, cfqd)->count)
1407 return cur_best;
1408 cur_best = SYNC_WORKLOAD;
1409 if (service_tree_for(prio, cur_best, cfqd)->count)
1410 return cur_best;
1411
1412 return ASYNC_WORKLOAD;
1413 }
1414
1415 for (i = 0; i < 3; ++i) {
1416 /* otherwise, select the one with lowest rb_key */
1417 queue = cfq_rb_first(service_tree_for(prio, i, cfqd));
1418 if (queue &&
1419 (!key_valid || time_before(queue->rb_key, lowest_key))) {
1420 lowest_key = queue->rb_key;
1421 cur_best = i;
1422 key_valid = true;
1423 }
1424 }
1425
1426 return cur_best;
1427}
1428
1429static void choose_service_tree(struct cfq_data *cfqd)
1430{
1431 enum wl_prio_t previous_prio = cfqd->serving_prio;
1432 bool prio_changed;
1433 unsigned slice;
1434 unsigned count;
1435
1436 /* Choose next priority. RT > BE > IDLE */
1437 if (cfq_busy_queues_wl(RT_WORKLOAD, cfqd))
1438 cfqd->serving_prio = RT_WORKLOAD;
1439 else if (cfq_busy_queues_wl(BE_WORKLOAD, cfqd))
1440 cfqd->serving_prio = BE_WORKLOAD;
1441 else {
1442 cfqd->serving_prio = IDLE_WORKLOAD;
1443 cfqd->workload_expires = jiffies + 1;
1444 return;
1445 }
1446
1447 /*
1448 * For RT and BE, we have to choose also the type
1449 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
1450 * expiration time
1451 */
1452 prio_changed = (cfqd->serving_prio != previous_prio);
1453 count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd)
1454 ->count;
1455
1456 /*
1457 * If priority didn't change, check workload expiration,
1458 * and that we still have other queues ready
1459 */
1460 if (!prio_changed && count &&
1461 !time_after(jiffies, cfqd->workload_expires))
1462 return;
1463
1464 /* otherwise select new workload type */
1465 cfqd->serving_type =
1466 cfq_choose_wl(cfqd, cfqd->serving_prio, prio_changed);
1467 count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd)
1468 ->count;
1469
1470 /*
1471 * the workload slice is computed as a fraction of target latency
1472 * proportional to the number of queues in that workload, over
1473 * all the queues in the same priority class
1474 */
1475 slice = cfq_target_latency * count /
1476 max_t(unsigned, cfqd->busy_queues_avg[cfqd->serving_prio],
1477 cfq_busy_queues_wl(cfqd->serving_prio, cfqd));
1478
1479 if (cfqd->serving_type == ASYNC_WORKLOAD)
1480 /* async workload slice is scaled down according to
1481 * the sync/async slice ratio. */
1482 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
1483 else
1484 /* sync workload slice is at least 2 * cfq_slice_idle */
1485 slice = max(slice, 2 * cfqd->cfq_slice_idle);
1486
1487 slice = max_t(unsigned, slice, CFQ_MIN_TT);
1488 cfqd->workload_expires = jiffies + slice;
1489}
1490
1491/*
1178 * Select a queue for service. If we have a current active queue, 1492 * Select a queue for service. If we have a current active queue,
1179 * check whether to continue servicing it, or retrieve and set a new one. 1493 * check whether to continue servicing it, or retrieve and set a new one.
1180 */ 1494 */
@@ -1203,11 +1517,14 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1203 * If another queue has a request waiting within our mean seek 1517 * If another queue has a request waiting within our mean seek
1204 * distance, let it run. The expire code will check for close 1518 * distance, let it run. The expire code will check for close
1205 * cooperators and put the close queue at the front of the service 1519 * cooperators and put the close queue at the front of the service
1206 * tree. 1520 * tree. If possible, merge the expiring queue with the new cfqq.
1207 */ 1521 */
1208 new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0); 1522 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
1209 if (new_cfqq) 1523 if (new_cfqq) {
1524 if (!cfqq->new_cfqq)
1525 cfq_setup_merge(cfqq, new_cfqq);
1210 goto expire; 1526 goto expire;
1527 }
1211 1528
1212 /* 1529 /*
1213 * No requests pending. If the active queue still has requests in 1530 * No requests pending. If the active queue still has requests in
@@ -1215,7 +1532,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1215 * conditions to happen (or time out) before selecting a new queue. 1532 * conditions to happen (or time out) before selecting a new queue.
1216 */ 1533 */
1217 if (timer_pending(&cfqd->idle_slice_timer) || 1534 if (timer_pending(&cfqd->idle_slice_timer) ||
1218 (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) { 1535 (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
1219 cfqq = NULL; 1536 cfqq = NULL;
1220 goto keep_queue; 1537 goto keep_queue;
1221 } 1538 }
@@ -1223,6 +1540,13 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1223expire: 1540expire:
1224 cfq_slice_expired(cfqd, 0); 1541 cfq_slice_expired(cfqd, 0);
1225new_queue: 1542new_queue:
1543 /*
1544 * Current queue expired. Check if we have to switch to a new
1545 * service tree
1546 */
1547 if (!new_cfqq)
1548 choose_service_tree(cfqd);
1549
1226 cfqq = cfq_set_active_queue(cfqd, new_cfqq); 1550 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
1227keep_queue: 1551keep_queue:
1228 return cfqq; 1552 return cfqq;
@@ -1249,8 +1573,14 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1249{ 1573{
1250 struct cfq_queue *cfqq; 1574 struct cfq_queue *cfqq;
1251 int dispatched = 0; 1575 int dispatched = 0;
1252 1576 int i, j;
1253 while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL) 1577 for (i = 0; i < 2; ++i)
1578 for (j = 0; j < 3; ++j)
1579 while ((cfqq = cfq_rb_first(&cfqd->service_trees[i][j]))
1580 != NULL)
1581 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1582
1583 while ((cfqq = cfq_rb_first(&cfqd->service_tree_idle)) != NULL)
1254 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1584 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1255 1585
1256 cfq_slice_expired(cfqd, 0); 1586 cfq_slice_expired(cfqd, 0);
@@ -1268,7 +1598,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1268 /* 1598 /*
1269 * Drain async requests before we start sync IO 1599 * Drain async requests before we start sync IO
1270 */ 1600 */
1271 if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) 1601 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
1272 return false; 1602 return false;
1273 1603
1274 /* 1604 /*
@@ -1518,11 +1848,29 @@ static void cfq_free_io_context(struct io_context *ioc)
1518 1848
1519static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1849static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1520{ 1850{
1851 struct cfq_queue *__cfqq, *next;
1852
1521 if (unlikely(cfqq == cfqd->active_queue)) { 1853 if (unlikely(cfqq == cfqd->active_queue)) {
1522 __cfq_slice_expired(cfqd, cfqq, 0); 1854 __cfq_slice_expired(cfqd, cfqq, 0);
1523 cfq_schedule_dispatch(cfqd); 1855 cfq_schedule_dispatch(cfqd);
1524 } 1856 }
1525 1857
1858 /*
1859 * If this queue was scheduled to merge with another queue, be
1860 * sure to drop the reference taken on that queue (and others in
1861 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
1862 */
1863 __cfqq = cfqq->new_cfqq;
1864 while (__cfqq) {
1865 if (__cfqq == cfqq) {
1866 WARN(1, "cfqq->new_cfqq loop detected\n");
1867 break;
1868 }
1869 next = __cfqq->new_cfqq;
1870 cfq_put_queue(__cfqq);
1871 __cfqq = next;
1872 }
1873
1526 cfq_put_queue(cfqq); 1874 cfq_put_queue(cfqq);
1527} 1875}
1528 1876
@@ -1952,33 +2300,46 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1952} 2300}
1953 2301
1954static void 2302static void
1955cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, 2303cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1956 struct request *rq) 2304 struct request *rq)
1957{ 2305{
1958 sector_t sdist; 2306 sector_t sdist;
1959 u64 total; 2307 u64 total;
1960 2308
1961 if (!cic->last_request_pos) 2309 if (!cfqq->last_request_pos)
1962 sdist = 0; 2310 sdist = 0;
1963 else if (cic->last_request_pos < blk_rq_pos(rq)) 2311 else if (cfqq->last_request_pos < blk_rq_pos(rq))
1964 sdist = blk_rq_pos(rq) - cic->last_request_pos; 2312 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
1965 else 2313 else
1966 sdist = cic->last_request_pos - blk_rq_pos(rq); 2314 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
1967 2315
1968 /* 2316 /*
1969 * Don't allow the seek distance to get too large from the 2317 * Don't allow the seek distance to get too large from the
1970 * odd fragment, pagein, etc 2318 * odd fragment, pagein, etc
1971 */ 2319 */
1972 if (cic->seek_samples <= 60) /* second&third seek */ 2320 if (cfqq->seek_samples <= 60) /* second&third seek */
1973 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); 2321 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
1974 else 2322 else
1975 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); 2323 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
1976 2324
1977 cic->seek_samples = (7*cic->seek_samples + 256) / 8; 2325 cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
1978 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; 2326 cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
1979 total = cic->seek_total + (cic->seek_samples/2); 2327 total = cfqq->seek_total + (cfqq->seek_samples/2);
1980 do_div(total, cic->seek_samples); 2328 do_div(total, cfqq->seek_samples);
1981 cic->seek_mean = (sector_t)total; 2329 cfqq->seek_mean = (sector_t)total;
2330
2331 /*
2332 * If this cfqq is shared between multiple processes, check to
2333 * make sure that those processes are still issuing I/Os within
2334 * the mean seek distance. If not, it may be time to break the
2335 * queues apart again.
2336 */
2337 if (cfq_cfqq_coop(cfqq)) {
2338 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
2339 cfqq->seeky_start = jiffies;
2340 else if (!CFQQ_SEEKY(cfqq))
2341 cfqq->seeky_start = 0;
2342 }
1982} 2343}
1983 2344
1984/* 2345/*
@@ -2000,13 +2361,10 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2000 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 2361 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
2001 2362
2002 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 2363 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
2003 (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) 2364 (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq)))
2004 enable_idle = 0; 2365 enable_idle = 0;
2005 else if (sample_valid(cic->ttime_samples)) { 2366 else if (sample_valid(cic->ttime_samples)) {
2006 unsigned int slice_idle = cfqd->cfq_slice_idle; 2367 if (cic->ttime_mean > cfqd->cfq_slice_idle)
2007 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
2008 slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
2009 if (cic->ttime_mean > slice_idle)
2010 enable_idle = 0; 2368 enable_idle = 0;
2011 else 2369 else
2012 enable_idle = 1; 2370 enable_idle = 1;
@@ -2044,6 +2402,10 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2044 if (cfq_class_idle(cfqq)) 2402 if (cfq_class_idle(cfqq))
2045 return true; 2403 return true;
2046 2404
2405 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD
2406 && new_cfqq->service_tree == cfqq->service_tree)
2407 return true;
2408
2047 /* 2409 /*
2048 * if the new request is sync, but the currently running queue is 2410 * if the new request is sync, but the currently running queue is
2049 * not, let the sync request have priority. 2411 * not, let the sync request have priority.
@@ -2071,7 +2433,8 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2071 * if this request is as-good as one we would expect from the 2433 * if this request is as-good as one we would expect from the
2072 * current cfqq, let it preempt 2434 * current cfqq, let it preempt
2073 */ 2435 */
2074 if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) || 2436 if (cfq_rq_close(cfqd, cfqq, rq))
2437 if (cfq_rq_close(cfqd, cfqq, rq) && (!cfq_cfqq_coop(new_cfqq) ||
2075 cfqd->busy_queues == 1)) { 2438 cfqd->busy_queues == 1)) {
2076 /* 2439 /*
2077 * Mark new queue coop_preempt, so its coop flag will not be 2440 * Mark new queue coop_preempt, so its coop flag will not be
@@ -2121,10 +2484,10 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2121 cfqq->meta_pending++; 2484 cfqq->meta_pending++;
2122 2485
2123 cfq_update_io_thinktime(cfqd, cic); 2486 cfq_update_io_thinktime(cfqd, cic);
2124 cfq_update_io_seektime(cfqd, cic, rq); 2487 cfq_update_io_seektime(cfqd, cfqq, rq);
2125 cfq_update_idle_window(cfqd, cfqq, cic); 2488 cfq_update_idle_window(cfqd, cfqq, cic);
2126 2489
2127 cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 2490 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2128 2491
2129 if (cfqq == cfqd->active_queue) { 2492 if (cfqq == cfqd->active_queue) {
2130 /* 2493 /*
@@ -2165,10 +2528,9 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
2165 cfq_log_cfqq(cfqd, cfqq, "insert_request"); 2528 cfq_log_cfqq(cfqd, cfqq, "insert_request");
2166 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); 2529 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
2167 2530
2168 cfq_add_rq_rb(rq);
2169
2170 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 2531 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
2171 list_add_tail(&rq->queuelist, &cfqq->fifo); 2532 list_add_tail(&rq->queuelist, &cfqq->fifo);
2533 cfq_add_rq_rb(rq);
2172 2534
2173 cfq_rq_enqueued(cfqd, cfqq, rq); 2535 cfq_rq_enqueued(cfqd, cfqq, rq);
2174} 2536}
@@ -2179,6 +2541,8 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
2179 */ 2541 */
2180static void cfq_update_hw_tag(struct cfq_data *cfqd) 2542static void cfq_update_hw_tag(struct cfq_data *cfqd)
2181{ 2543{
2544 struct cfq_queue *cfqq = cfqd->active_queue;
2545
2182 if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak) 2546 if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
2183 cfqd->rq_in_driver_peak = rq_in_driver(cfqd); 2547 cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
2184 2548
@@ -2186,6 +2550,16 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
2186 rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN) 2550 rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
2187 return; 2551 return;
2188 2552
2553 /*
2554 * If active queue hasn't enough requests and can idle, cfq might not
2555 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
2556 * case
2557 */
2558 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
2559 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
2560 CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN)
2561 return;
2562
2189 if (cfqd->hw_tag_samples++ < 50) 2563 if (cfqd->hw_tag_samples++ < 50)
2190 return; 2564 return;
2191 2565
@@ -2243,7 +2617,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2243 */ 2617 */
2244 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 2618 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
2245 cfq_slice_expired(cfqd, 1); 2619 cfq_slice_expired(cfqd, 1);
2246 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) && 2620 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq) &&
2247 sync && !rq_noidle(rq)) 2621 sync && !rq_noidle(rq))
2248 cfq_arm_slice_timer(cfqd); 2622 cfq_arm_slice_timer(cfqd);
2249 } 2623 }
@@ -2269,12 +2643,10 @@ static void cfq_prio_boost(struct cfq_queue *cfqq)
2269 cfqq->ioprio = IOPRIO_NORM; 2643 cfqq->ioprio = IOPRIO_NORM;
2270 } else { 2644 } else {
2271 /* 2645 /*
2272 * check if we need to unboost the queue 2646 * unboost the queue (if needed)
2273 */ 2647 */
2274 if (cfqq->ioprio_class != cfqq->org_ioprio_class) 2648 cfqq->ioprio_class = cfqq->org_ioprio_class;
2275 cfqq->ioprio_class = cfqq->org_ioprio_class; 2649 cfqq->ioprio = cfqq->org_ioprio;
2276 if (cfqq->ioprio != cfqq->org_ioprio)
2277 cfqq->ioprio = cfqq->org_ioprio;
2278 } 2650 }
2279} 2651}
2280 2652
@@ -2338,6 +2710,43 @@ static void cfq_put_request(struct request *rq)
2338 } 2710 }
2339} 2711}
2340 2712
2713static struct cfq_queue *
2714cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
2715 struct cfq_queue *cfqq)
2716{
2717 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
2718 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
2719 cfq_mark_cfqq_coop(cfqq->new_cfqq);
2720 cfq_put_queue(cfqq);
2721 return cic_to_cfqq(cic, 1);
2722}
2723
2724static int should_split_cfqq(struct cfq_queue *cfqq)
2725{
2726 if (cfqq->seeky_start &&
2727 time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
2728 return 1;
2729 return 0;
2730}
2731
2732/*
2733 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
2734 * was the last process referring to said cfqq.
2735 */
2736static struct cfq_queue *
2737split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
2738{
2739 if (cfqq_process_refs(cfqq) == 1) {
2740 cfqq->seeky_start = 0;
2741 cfqq->pid = current->pid;
2742 cfq_clear_cfqq_coop(cfqq);
2743 return cfqq;
2744 }
2745
2746 cic_set_cfqq(cic, NULL, 1);
2747 cfq_put_queue(cfqq);
2748 return NULL;
2749}
2341/* 2750/*
2342 * Allocate cfq data structures associated with this request. 2751 * Allocate cfq data structures associated with this request.
2343 */ 2752 */
@@ -2360,10 +2769,30 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2360 if (!cic) 2769 if (!cic)
2361 goto queue_fail; 2770 goto queue_fail;
2362 2771
2772new_queue:
2363 cfqq = cic_to_cfqq(cic, is_sync); 2773 cfqq = cic_to_cfqq(cic, is_sync);
2364 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 2774 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2365 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); 2775 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2366 cic_set_cfqq(cic, cfqq, is_sync); 2776 cic_set_cfqq(cic, cfqq, is_sync);
2777 } else {
2778 /*
2779 * If the queue was seeky for too long, break it apart.
2780 */
2781 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
2782 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
2783 cfqq = split_cfqq(cic, cfqq);
2784 if (!cfqq)
2785 goto new_queue;
2786 }
2787
2788 /*
2789 * Check to see if this queue is scheduled to merge with
2790 * another, closely cooperating queue. The merging of
2791 * queues happens here as it must be done in process context.
2792 * The reference on new_cfqq was taken in merge_cfqqs.
2793 */
2794 if (cfqq->new_cfqq)
2795 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
2367 } 2796 }
2368 2797
2369 cfqq->allocated[rw]++; 2798 cfqq->allocated[rw]++;
@@ -2500,13 +2929,16 @@ static void cfq_exit_queue(struct elevator_queue *e)
2500static void *cfq_init_queue(struct request_queue *q) 2929static void *cfq_init_queue(struct request_queue *q)
2501{ 2930{
2502 struct cfq_data *cfqd; 2931 struct cfq_data *cfqd;
2503 int i; 2932 int i, j;
2504 2933
2505 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 2934 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2506 if (!cfqd) 2935 if (!cfqd)
2507 return NULL; 2936 return NULL;
2508 2937
2509 cfqd->service_tree = CFQ_RB_ROOT; 2938 for (i = 0; i < 2; ++i)
2939 for (j = 0; j < 3; ++j)
2940 cfqd->service_trees[i][j] = CFQ_RB_ROOT;
2941 cfqd->service_tree_idle = CFQ_RB_ROOT;
2510 2942
2511 /* 2943 /*
2512 * Not strictly needed (since RB_ROOT just clears the node and we 2944 * Not strictly needed (since RB_ROOT just clears the node and we
diff --git a/block/elevator.c b/block/elevator.c
index a847046c6e53..9ad5ccc4c5ee 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -154,10 +154,7 @@ static struct elevator_type *elevator_get(const char *name)
154 154
155 spin_unlock(&elv_list_lock); 155 spin_unlock(&elv_list_lock);
156 156
157 if (!strcmp(name, "anticipatory")) 157 sprintf(elv, "%s-iosched", name);
158 sprintf(elv, "as-iosched");
159 else
160 sprintf(elv, "%s-iosched", name);
161 158
162 request_module("%s", elv); 159 request_module("%s", elv);
163 spin_lock(&elv_list_lock); 160 spin_lock(&elv_list_lock);
@@ -193,10 +190,7 @@ static int __init elevator_setup(char *str)
193 * Be backwards-compatible with previous kernels, so users 190 * Be backwards-compatible with previous kernels, so users
194 * won't get the wrong elevator. 191 * won't get the wrong elevator.
195 */ 192 */
196 if (!strcmp(str, "as")) 193 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
197 strcpy(chosen_elevator, "anticipatory");
198 else
199 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
200 return 1; 194 return 1;
201} 195}
202 196