aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/cfq-iosched.c')
-rw-r--r--drivers/block/cfq-iosched.c2081
1 files changed, 1431 insertions, 650 deletions
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index 3ac47dde64da..de5746e38af9 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -21,22 +21,34 @@
21#include <linux/hash.h> 21#include <linux/hash.h>
22#include <linux/rbtree.h> 22#include <linux/rbtree.h>
23#include <linux/mempool.h> 23#include <linux/mempool.h>
24 24#include <linux/ioprio.h>
25static unsigned long max_elapsed_crq; 25#include <linux/writeback.h>
26static unsigned long max_elapsed_dispatch;
27 26
28/* 27/*
29 * tunables 28 * tunables
30 */ 29 */
31static int cfq_quantum = 4; /* max queue in one round of service */ 30static int cfq_quantum = 4; /* max queue in one round of service */
32static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/ 31static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
33static int cfq_service = HZ; /* period over which service is avg */ 32static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
34static int cfq_fifo_expire_r = HZ / 2; /* fifo timeout for sync requests */
35static int cfq_fifo_expire_w = 5 * HZ; /* fifo timeout for async requests */
36static int cfq_fifo_rate = HZ / 8; /* fifo expiry rate */
37static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ 33static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
38static int cfq_back_penalty = 2; /* penalty of a backwards seek */ 34static int cfq_back_penalty = 2; /* penalty of a backwards seek */
39 35
36static int cfq_slice_sync = HZ / 10;
37static int cfq_slice_async = HZ / 25;
38static int cfq_slice_async_rq = 2;
39static int cfq_slice_idle = HZ / 100;
40
41#define CFQ_IDLE_GRACE (HZ / 10)
42#define CFQ_SLICE_SCALE (5)
43
44#define CFQ_KEY_ASYNC (0)
45#define CFQ_KEY_ANY (0xffff)
46
47/*
48 * disable queueing at the driver/hardware level
49 */
50static int cfq_max_depth = 1;
51
40/* 52/*
41 * for the hash of cfqq inside the cfqd 53 * for the hash of cfqq inside the cfqd
42 */ 54 */
@@ -55,6 +67,7 @@ static int cfq_back_penalty = 2; /* penalty of a backwards seek */
55#define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash) 67#define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash)
56 68
57#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) 69#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
70#define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
58 71
59#define RQ_DATA(rq) (rq)->elevator_private 72#define RQ_DATA(rq) (rq)->elevator_private
60 73
@@ -75,78 +88,110 @@ static int cfq_back_penalty = 2; /* penalty of a backwards seek */
75#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) 88#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
76#define rq_rb_key(rq) (rq)->sector 89#define rq_rb_key(rq) (rq)->sector
77 90
78/*
79 * threshold for switching off non-tag accounting
80 */
81#define CFQ_MAX_TAG (4)
82
83/*
84 * sort key types and names
85 */
86enum {
87 CFQ_KEY_PGID,
88 CFQ_KEY_TGID,
89 CFQ_KEY_UID,
90 CFQ_KEY_GID,
91 CFQ_KEY_LAST,
92};
93
94static char *cfq_key_types[] = { "pgid", "tgid", "uid", "gid", NULL };
95
96static kmem_cache_t *crq_pool; 91static kmem_cache_t *crq_pool;
97static kmem_cache_t *cfq_pool; 92static kmem_cache_t *cfq_pool;
98static kmem_cache_t *cfq_ioc_pool; 93static kmem_cache_t *cfq_ioc_pool;
99 94
95#define CFQ_PRIO_LISTS IOPRIO_BE_NR
96#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
97#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
98#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
99
100#define ASYNC (0)
101#define SYNC (1)
102
103#define cfq_cfqq_dispatched(cfqq) \
104 ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
105
106#define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC)
107
108#define cfq_cfqq_sync(cfqq) \
109 (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
110
111/*
112 * Per block device queue structure
113 */
100struct cfq_data { 114struct cfq_data {
101 struct list_head rr_list; 115 atomic_t ref;
116 request_queue_t *queue;
117
118 /*
119 * rr list of queues with requests and the count of them
120 */
121 struct list_head rr_list[CFQ_PRIO_LISTS];
122 struct list_head busy_rr;
123 struct list_head cur_rr;
124 struct list_head idle_rr;
125 unsigned int busy_queues;
126
127 /*
128 * non-ordered list of empty cfqq's
129 */
102 struct list_head empty_list; 130 struct list_head empty_list;
103 131
132 /*
133 * cfqq lookup hash
134 */
104 struct hlist_head *cfq_hash; 135 struct hlist_head *cfq_hash;
105 struct hlist_head *crq_hash;
106 136
107 /* queues on rr_list (ie they have pending requests */ 137 /*
108 unsigned int busy_queues; 138 * global crq hash for all queues
139 */
140 struct hlist_head *crq_hash;
109 141
110 unsigned int max_queued; 142 unsigned int max_queued;
111 143
112 atomic_t ref; 144 mempool_t *crq_pool;
113 145
114 int key_type; 146 int rq_in_driver;
115 147
116 mempool_t *crq_pool; 148 /*
149 * schedule slice state info
150 */
151 /*
152 * idle window management
153 */
154 struct timer_list idle_slice_timer;
155 struct work_struct unplug_work;
117 156
118 request_queue_t *queue; 157 struct cfq_queue *active_queue;
158 struct cfq_io_context *active_cic;
159 int cur_prio, cur_end_prio;
160 unsigned int dispatch_slice;
161
162 struct timer_list idle_class_timer;
119 163
120 sector_t last_sector; 164 sector_t last_sector;
165 unsigned long last_end_request;
121 166
122 int rq_in_driver; 167 unsigned int rq_starved;
123 168
124 /* 169 /*
125 * tunables, see top of file 170 * tunables, see top of file
126 */ 171 */
127 unsigned int cfq_quantum; 172 unsigned int cfq_quantum;
128 unsigned int cfq_queued; 173 unsigned int cfq_queued;
129 unsigned int cfq_fifo_expire_r; 174 unsigned int cfq_fifo_expire[2];
130 unsigned int cfq_fifo_expire_w;
131 unsigned int cfq_fifo_batch_expire;
132 unsigned int cfq_back_penalty; 175 unsigned int cfq_back_penalty;
133 unsigned int cfq_back_max; 176 unsigned int cfq_back_max;
134 unsigned int find_best_crq; 177 unsigned int cfq_slice[2];
135 178 unsigned int cfq_slice_async_rq;
136 unsigned int cfq_tagged; 179 unsigned int cfq_slice_idle;
180 unsigned int cfq_max_depth;
137}; 181};
138 182
183/*
184 * Per process-grouping structure
185 */
139struct cfq_queue { 186struct cfq_queue {
140 /* reference count */ 187 /* reference count */
141 atomic_t ref; 188 atomic_t ref;
142 /* parent cfq_data */ 189 /* parent cfq_data */
143 struct cfq_data *cfqd; 190 struct cfq_data *cfqd;
144 /* hash of mergeable requests */ 191 /* cfqq lookup hash */
145 struct hlist_node cfq_hash; 192 struct hlist_node cfq_hash;
146 /* hash key */ 193 /* hash key */
147 unsigned long key; 194 unsigned int key;
148 /* whether queue is on rr (or empty) list */
149 int on_rr;
150 /* on either rr or empty list of cfqd */ 195 /* on either rr or empty list of cfqd */
151 struct list_head cfq_list; 196 struct list_head cfq_list;
152 /* sorted list of pending requests */ 197 /* sorted list of pending requests */
@@ -158,21 +203,22 @@ struct cfq_queue {
158 /* currently allocated requests */ 203 /* currently allocated requests */
159 int allocated[2]; 204 int allocated[2];
160 /* fifo list of requests in sort_list */ 205 /* fifo list of requests in sort_list */
161 struct list_head fifo[2]; 206 struct list_head fifo;
162 /* last time fifo expired */
163 unsigned long last_fifo_expire;
164 207
165 int key_type; 208 unsigned long slice_start;
209 unsigned long slice_end;
210 unsigned long slice_left;
211 unsigned long service_last;
166 212
167 unsigned long service_start; 213 /* number of requests that are on the dispatch list */
168 unsigned long service_used; 214 int on_dispatch[2];
169 215
170 unsigned int max_rate; 216 /* io prio of this group */
217 unsigned short ioprio, org_ioprio;
218 unsigned short ioprio_class, org_ioprio_class;
171 219
172 /* number of requests that have been handed to the driver */ 220 /* various state flags, see below */
173 int in_flight; 221 unsigned int flags;
174 /* number of currently allocated requests */
175 int alloc_limit[2];
176}; 222};
177 223
178struct cfq_rq { 224struct cfq_rq {
@@ -184,42 +230,78 @@ struct cfq_rq {
184 struct cfq_queue *cfq_queue; 230 struct cfq_queue *cfq_queue;
185 struct cfq_io_context *io_context; 231 struct cfq_io_context *io_context;
186 232
187 unsigned long service_start; 233 unsigned int crq_flags;
188 unsigned long queue_start; 234};
235
236enum cfqq_state_flags {
237 CFQ_CFQQ_FLAG_on_rr = 0,
238 CFQ_CFQQ_FLAG_wait_request,
239 CFQ_CFQQ_FLAG_must_alloc,
240 CFQ_CFQQ_FLAG_must_alloc_slice,
241 CFQ_CFQQ_FLAG_must_dispatch,
242 CFQ_CFQQ_FLAG_fifo_expire,
243 CFQ_CFQQ_FLAG_idle_window,
244 CFQ_CFQQ_FLAG_prio_changed,
245 CFQ_CFQQ_FLAG_expired,
246};
189 247
190 unsigned int in_flight : 1; 248#define CFQ_CFQQ_FNS(name) \
191 unsigned int accounted : 1; 249static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
192 unsigned int is_sync : 1; 250{ \
193 unsigned int is_write : 1; 251 cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
252} \
253static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
254{ \
255 cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
256} \
257static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
258{ \
259 return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
260}
261
262CFQ_CFQQ_FNS(on_rr);
263CFQ_CFQQ_FNS(wait_request);
264CFQ_CFQQ_FNS(must_alloc);
265CFQ_CFQQ_FNS(must_alloc_slice);
266CFQ_CFQQ_FNS(must_dispatch);
267CFQ_CFQQ_FNS(fifo_expire);
268CFQ_CFQQ_FNS(idle_window);
269CFQ_CFQQ_FNS(prio_changed);
270CFQ_CFQQ_FNS(expired);
271#undef CFQ_CFQQ_FNS
272
273enum cfq_rq_state_flags {
274 CFQ_CRQ_FLAG_in_flight = 0,
275 CFQ_CRQ_FLAG_in_driver,
276 CFQ_CRQ_FLAG_is_sync,
277 CFQ_CRQ_FLAG_requeued,
194}; 278};
195 279
196static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned long); 280#define CFQ_CRQ_FNS(name) \
281static inline void cfq_mark_crq_##name(struct cfq_rq *crq) \
282{ \
283 crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name); \
284} \
285static inline void cfq_clear_crq_##name(struct cfq_rq *crq) \
286{ \
287 crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name); \
288} \
289static inline int cfq_crq_##name(const struct cfq_rq *crq) \
290{ \
291 return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \
292}
293
294CFQ_CRQ_FNS(in_flight);
295CFQ_CRQ_FNS(in_driver);
296CFQ_CRQ_FNS(is_sync);
297CFQ_CRQ_FNS(requeued);
298#undef CFQ_CRQ_FNS
299
300static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
197static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); 301static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *);
198static void cfq_update_next_crq(struct cfq_rq *);
199static void cfq_put_cfqd(struct cfq_data *cfqd); 302static void cfq_put_cfqd(struct cfq_data *cfqd);
200 303
201/* 304#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
202 * what the fairness is based on (ie how processes are grouped and
203 * differentiated)
204 */
205static inline unsigned long
206cfq_hash_key(struct cfq_data *cfqd, struct task_struct *tsk)
207{
208 /*
209 * optimize this so that ->key_type is the offset into the struct
210 */
211 switch (cfqd->key_type) {
212 case CFQ_KEY_PGID:
213 return process_group(tsk);
214 default:
215 case CFQ_KEY_TGID:
216 return tsk->tgid;
217 case CFQ_KEY_UID:
218 return tsk->uid;
219 case CFQ_KEY_GID:
220 return tsk->gid;
221 }
222}
223 305
224/* 306/*
225 * lots of deadline iosched dupes, can be abstracted later... 307 * lots of deadline iosched dupes, can be abstracted later...
@@ -235,16 +317,12 @@ static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
235 317
236 if (q->last_merge == crq->request) 318 if (q->last_merge == crq->request)
237 q->last_merge = NULL; 319 q->last_merge = NULL;
238
239 cfq_update_next_crq(crq);
240} 320}
241 321
242static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) 322static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
243{ 323{
244 const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); 324 const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
245 325
246 BUG_ON(!hlist_unhashed(&crq->hash));
247
248 hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]); 326 hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
249} 327}
250 328
@@ -257,8 +335,6 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
257 struct cfq_rq *crq = list_entry_hash(entry); 335 struct cfq_rq *crq = list_entry_hash(entry);
258 struct request *__rq = crq->request; 336 struct request *__rq = crq->request;
259 337
260 BUG_ON(hlist_unhashed(&crq->hash));
261
262 if (!rq_mergeable(__rq)) { 338 if (!rq_mergeable(__rq)) {
263 cfq_del_crq_hash(crq); 339 cfq_del_crq_hash(crq);
264 continue; 340 continue;
@@ -271,6 +347,28 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
271 return NULL; 347 return NULL;
272} 348}
273 349
350static inline int cfq_pending_requests(struct cfq_data *cfqd)
351{
352 return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
353}
354
355/*
356 * scheduler run of queue, if there are requests pending and no one in the
357 * driver that will restart queueing
358 */
359static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
360{
361 if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd))
362 kblockd_schedule_work(&cfqd->unplug_work);
363}
364
365static int cfq_queue_empty(request_queue_t *q)
366{
367 struct cfq_data *cfqd = q->elevator->elevator_data;
368
369 return !cfq_pending_requests(cfqd);
370}
371
274/* 372/*
275 * Lifted from AS - choose which of crq1 and crq2 that is best served now. 373 * Lifted from AS - choose which of crq1 and crq2 that is best served now.
276 * We choose the request that is closest to the head right now. Distance 374 * We choose the request that is closest to the head right now. Distance
@@ -287,36 +385,16 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
287 return crq2; 385 return crq2;
288 if (crq2 == NULL) 386 if (crq2 == NULL)
289 return crq1; 387 return crq1;
388 if (cfq_crq_requeued(crq1))
389 return crq1;
390 if (cfq_crq_requeued(crq2))
391 return crq2;
290 392
291 s1 = crq1->request->sector; 393 s1 = crq1->request->sector;
292 s2 = crq2->request->sector; 394 s2 = crq2->request->sector;
293 395
294 last = cfqd->last_sector; 396 last = cfqd->last_sector;
295 397
296#if 0
297 if (!list_empty(&cfqd->queue->queue_head)) {
298 struct list_head *entry = &cfqd->queue->queue_head;
299 unsigned long distance = ~0UL;
300 struct request *rq;
301
302 while ((entry = entry->prev) != &cfqd->queue->queue_head) {
303 rq = list_entry_rq(entry);
304
305 if (blk_barrier_rq(rq))
306 break;
307
308 if (distance < abs(s1 - rq->sector + rq->nr_sectors)) {
309 distance = abs(s1 - rq->sector +rq->nr_sectors);
310 last = rq->sector + rq->nr_sectors;
311 }
312 if (distance < abs(s2 - rq->sector + rq->nr_sectors)) {
313 distance = abs(s2 - rq->sector +rq->nr_sectors);
314 last = rq->sector + rq->nr_sectors;
315 }
316 }
317 }
318#endif
319
320 /* 398 /*
321 * by definition, 1KiB is 2 sectors 399 * by definition, 1KiB is 2 sectors
322 */ 400 */
@@ -377,11 +455,14 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
377 struct cfq_rq *crq_next = NULL, *crq_prev = NULL; 455 struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
378 struct rb_node *rbnext, *rbprev; 456 struct rb_node *rbnext, *rbprev;
379 457
380 if (!ON_RB(&last->rb_node)) 458 rbnext = NULL;
381 return NULL; 459 if (ON_RB(&last->rb_node))
382 460 rbnext = rb_next(&last->rb_node);
383 if ((rbnext = rb_next(&last->rb_node)) == NULL) 461 if (!rbnext) {
384 rbnext = rb_first(&cfqq->sort_list); 462 rbnext = rb_first(&cfqq->sort_list);
463 if (rbnext == &last->rb_node)
464 rbnext = NULL;
465 }
385 466
386 rbprev = rb_prev(&last->rb_node); 467 rbprev = rb_prev(&last->rb_node);
387 468
@@ -401,67 +482,53 @@ static void cfq_update_next_crq(struct cfq_rq *crq)
401 cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); 482 cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
402} 483}
403 484
404static int cfq_check_sort_rr_list(struct cfq_queue *cfqq) 485static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
405{ 486{
406 struct list_head *head = &cfqq->cfqd->rr_list; 487 struct cfq_data *cfqd = cfqq->cfqd;
407 struct list_head *next, *prev; 488 struct list_head *list, *entry;
408
409 /*
410 * list might still be ordered
411 */
412 next = cfqq->cfq_list.next;
413 if (next != head) {
414 struct cfq_queue *cnext = list_entry_cfqq(next);
415 489
416 if (cfqq->service_used > cnext->service_used) 490 BUG_ON(!cfq_cfqq_on_rr(cfqq));
417 return 1;
418 }
419 491
420 prev = cfqq->cfq_list.prev; 492 list_del(&cfqq->cfq_list);
421 if (prev != head) {
422 struct cfq_queue *cprev = list_entry_cfqq(prev);
423 493
424 if (cfqq->service_used < cprev->service_used) 494 if (cfq_class_rt(cfqq))
425 return 1; 495 list = &cfqd->cur_rr;
496 else if (cfq_class_idle(cfqq))
497 list = &cfqd->idle_rr;
498 else {
499 /*
500 * if cfqq has requests in flight, don't allow it to be
501 * found in cfq_set_active_queue before it has finished them.
502 * this is done to increase fairness between a process that
503 * has lots of io pending vs one that only generates one
504 * sporadically or synchronously
505 */
506 if (cfq_cfqq_dispatched(cfqq))
507 list = &cfqd->busy_rr;
508 else
509 list = &cfqd->rr_list[cfqq->ioprio];
426 } 510 }
427 511
428 return 0; 512 /*
429} 513 * if queue was preempted, just add to front to be fair. busy_rr
430 514 * isn't sorted.
431static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue) 515 */
432{ 516 if (preempted || list == &cfqd->busy_rr) {
433 struct list_head *entry = &cfqq->cfqd->rr_list; 517 list_add(&cfqq->cfq_list, list);
434
435 if (!cfqq->on_rr)
436 return;
437 if (!new_queue && !cfq_check_sort_rr_list(cfqq))
438 return; 518 return;
439 519 }
440 list_del(&cfqq->cfq_list);
441 520
442 /* 521 /*
443 * sort by our mean service_used, sub-sort by in-flight requests 522 * sort by when queue was last serviced
444 */ 523 */
445 while ((entry = entry->prev) != &cfqq->cfqd->rr_list) { 524 entry = list;
525 while ((entry = entry->prev) != list) {
446 struct cfq_queue *__cfqq = list_entry_cfqq(entry); 526 struct cfq_queue *__cfqq = list_entry_cfqq(entry);
447 527
448 if (cfqq->service_used > __cfqq->service_used) 528 if (!__cfqq->service_last)
529 break;
530 if (time_before(__cfqq->service_last, cfqq->service_last))
449 break; 531 break;
450 else if (cfqq->service_used == __cfqq->service_used) {
451 struct list_head *prv;
452
453 while ((prv = entry->prev) != &cfqq->cfqd->rr_list) {
454 __cfqq = list_entry_cfqq(prv);
455
456 WARN_ON(__cfqq->service_used > cfqq->service_used);
457 if (cfqq->service_used != __cfqq->service_used)
458 break;
459 if (cfqq->in_flight > __cfqq->in_flight)
460 break;
461
462 entry = prv;
463 }
464 }
465 } 532 }
466 533
467 list_add(&cfqq->cfq_list, entry); 534 list_add(&cfqq->cfq_list, entry);
@@ -469,28 +536,24 @@ static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue)
469 536
470/* 537/*
471 * add to busy list of queues for service, trying to be fair in ordering 538 * add to busy list of queues for service, trying to be fair in ordering
472 * the pending list according to requests serviced 539 * the pending list according to last request service
473 */ 540 */
474static inline void 541static inline void
475cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 542cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue)
476{ 543{
477 /* 544 BUG_ON(cfq_cfqq_on_rr(cfqq));
478 * it's currently on the empty list 545 cfq_mark_cfqq_on_rr(cfqq);
479 */
480 cfqq->on_rr = 1;
481 cfqd->busy_queues++; 546 cfqd->busy_queues++;
482 547
483 if (time_after(jiffies, cfqq->service_start + cfq_service)) 548 cfq_resort_rr_list(cfqq, requeue);
484 cfqq->service_used >>= 3;
485
486 cfq_sort_rr_list(cfqq, 1);
487} 549}
488 550
489static inline void 551static inline void
490cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 552cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
491{ 553{
554 BUG_ON(!cfq_cfqq_on_rr(cfqq));
555 cfq_clear_cfqq_on_rr(cfqq);
492 list_move(&cfqq->cfq_list, &cfqd->empty_list); 556 list_move(&cfqq->cfq_list, &cfqd->empty_list);
493 cfqq->on_rr = 0;
494 557
495 BUG_ON(!cfqd->busy_queues); 558 BUG_ON(!cfqd->busy_queues);
496 cfqd->busy_queues--; 559 cfqd->busy_queues--;
@@ -505,16 +568,17 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq)
505 568
506 if (ON_RB(&crq->rb_node)) { 569 if (ON_RB(&crq->rb_node)) {
507 struct cfq_data *cfqd = cfqq->cfqd; 570 struct cfq_data *cfqd = cfqq->cfqd;
571 const int sync = cfq_crq_is_sync(crq);
508 572
509 BUG_ON(!cfqq->queued[crq->is_sync]); 573 BUG_ON(!cfqq->queued[sync]);
574 cfqq->queued[sync]--;
510 575
511 cfq_update_next_crq(crq); 576 cfq_update_next_crq(crq);
512 577
513 cfqq->queued[crq->is_sync]--;
514 rb_erase(&crq->rb_node, &cfqq->sort_list); 578 rb_erase(&crq->rb_node, &cfqq->sort_list);
515 RB_CLEAR_COLOR(&crq->rb_node); 579 RB_CLEAR_COLOR(&crq->rb_node);
516 580
517 if (RB_EMPTY(&cfqq->sort_list) && cfqq->on_rr) 581 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
518 cfq_del_cfqq_rr(cfqd, cfqq); 582 cfq_del_cfqq_rr(cfqd, cfqq);
519 } 583 }
520} 584}
@@ -550,7 +614,7 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
550 struct cfq_rq *__alias; 614 struct cfq_rq *__alias;
551 615
552 crq->rb_key = rq_rb_key(rq); 616 crq->rb_key = rq_rb_key(rq);
553 cfqq->queued[crq->is_sync]++; 617 cfqq->queued[cfq_crq_is_sync(crq)]++;
554 618
555 /* 619 /*
556 * looks a little odd, but the first insert might return an alias. 620 * looks a little odd, but the first insert might return an alias.
@@ -561,8 +625,8 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
561 625
562 rb_insert_color(&crq->rb_node, &cfqq->sort_list); 626 rb_insert_color(&crq->rb_node, &cfqq->sort_list);
563 627
564 if (!cfqq->on_rr) 628 if (!cfq_cfqq_on_rr(cfqq))
565 cfq_add_cfqq_rr(cfqd, cfqq); 629 cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq));
566 630
567 /* 631 /*
568 * check if this request is a better next-serve candidate 632 * check if this request is a better next-serve candidate
@@ -575,17 +639,16 @@ cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
575{ 639{
576 if (ON_RB(&crq->rb_node)) { 640 if (ON_RB(&crq->rb_node)) {
577 rb_erase(&crq->rb_node, &cfqq->sort_list); 641 rb_erase(&crq->rb_node, &cfqq->sort_list);
578 cfqq->queued[crq->is_sync]--; 642 cfqq->queued[cfq_crq_is_sync(crq)]--;
579 } 643 }
580 644
581 cfq_add_crq_rb(crq); 645 cfq_add_crq_rb(crq);
582} 646}
583 647
584static struct request * 648static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
585cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) 649
586{ 650{
587 const unsigned long key = cfq_hash_key(cfqd, current); 651 struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY);
588 struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, key);
589 struct rb_node *n; 652 struct rb_node *n;
590 653
591 if (!cfqq) 654 if (!cfqq)
@@ -609,20 +672,25 @@ out:
609 672
610static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 673static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
611{ 674{
675 struct cfq_data *cfqd = q->elevator->elevator_data;
612 struct cfq_rq *crq = RQ_DATA(rq); 676 struct cfq_rq *crq = RQ_DATA(rq);
613 677
614 if (crq) { 678 if (crq) {
615 struct cfq_queue *cfqq = crq->cfq_queue; 679 struct cfq_queue *cfqq = crq->cfq_queue;
616 680
617 if (cfqq->cfqd->cfq_tagged) { 681 if (cfq_crq_in_driver(crq)) {
618 cfqq->service_used--; 682 cfq_clear_crq_in_driver(crq);
619 cfq_sort_rr_list(cfqq, 0); 683 WARN_ON(!cfqd->rq_in_driver);
684 cfqd->rq_in_driver--;
620 } 685 }
686 if (cfq_crq_in_flight(crq)) {
687 const int sync = cfq_crq_is_sync(crq);
621 688
622 if (crq->accounted) { 689 cfq_clear_crq_in_flight(crq);
623 crq->accounted = 0; 690 WARN_ON(!cfqq->on_dispatch[sync]);
624 cfqq->cfqd->rq_in_driver--; 691 cfqq->on_dispatch[sync]--;
625 } 692 }
693 cfq_mark_crq_requeued(crq);
626 } 694 }
627} 695}
628 696
@@ -640,11 +708,10 @@ static void cfq_remove_request(request_queue_t *q, struct request *rq)
640 struct cfq_rq *crq = RQ_DATA(rq); 708 struct cfq_rq *crq = RQ_DATA(rq);
641 709
642 if (crq) { 710 if (crq) {
643 cfq_remove_merge_hints(q, crq);
644 list_del_init(&rq->queuelist); 711 list_del_init(&rq->queuelist);
712 cfq_del_crq_rb(crq);
713 cfq_remove_merge_hints(q, crq);
645 714
646 if (crq->cfq_queue)
647 cfq_del_crq_rb(crq);
648 } 715 }
649} 716}
650 717
@@ -662,21 +729,15 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
662 } 729 }
663 730
664 __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); 731 __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
665 if (__rq) { 732 if (__rq && elv_rq_merge_ok(__rq, bio)) {
666 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); 733 ret = ELEVATOR_BACK_MERGE;
667 734 goto out;
668 if (elv_rq_merge_ok(__rq, bio)) {
669 ret = ELEVATOR_BACK_MERGE;
670 goto out;
671 }
672 } 735 }
673 736
674 __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); 737 __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
675 if (__rq) { 738 if (__rq && elv_rq_merge_ok(__rq, bio)) {
676 if (elv_rq_merge_ok(__rq, bio)) { 739 ret = ELEVATOR_FRONT_MERGE;
677 ret = ELEVATOR_FRONT_MERGE; 740 goto out;
678 goto out;
679 }
680 } 741 }
681 742
682 return ELEVATOR_NO_MERGE; 743 return ELEVATOR_NO_MERGE;
@@ -709,20 +770,220 @@ static void
709cfq_merged_requests(request_queue_t *q, struct request *rq, 770cfq_merged_requests(request_queue_t *q, struct request *rq,
710 struct request *next) 771 struct request *next)
711{ 772{
712 struct cfq_rq *crq = RQ_DATA(rq);
713 struct cfq_rq *cnext = RQ_DATA(next);
714
715 cfq_merged_request(q, rq); 773 cfq_merged_request(q, rq);
716 774
717 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { 775 /*
718 if (time_before(cnext->queue_start, crq->queue_start)) { 776 * reposition in fifo if next is older than rq
719 list_move(&rq->queuelist, &next->queuelist); 777 */
720 crq->queue_start = cnext->queue_start; 778 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
779 time_before(next->start_time, rq->start_time))
780 list_move(&rq->queuelist, &next->queuelist);
781
782 cfq_remove_request(q, next);
783}
784
785static inline void
786__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
787{
788 if (cfqq) {
789 /*
790 * stop potential idle class queues waiting service
791 */
792 del_timer(&cfqd->idle_class_timer);
793
794 cfqq->slice_start = jiffies;
795 cfqq->slice_end = 0;
796 cfqq->slice_left = 0;
797 cfq_clear_cfqq_must_alloc_slice(cfqq);
798 cfq_clear_cfqq_fifo_expire(cfqq);
799 cfq_clear_cfqq_expired(cfqq);
800 }
801
802 cfqd->active_queue = cfqq;
803}
804
805/*
806 * 0
807 * 0,1
808 * 0,1,2
809 * 0,1,2,3
810 * 0,1,2,3,4
811 * 0,1,2,3,4,5
812 * 0,1,2,3,4,5,6
813 * 0,1,2,3,4,5,6,7
814 */
815static int cfq_get_next_prio_level(struct cfq_data *cfqd)
816{
817 int prio, wrap;
818
819 prio = -1;
820 wrap = 0;
821 do {
822 int p;
823
824 for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
825 if (!list_empty(&cfqd->rr_list[p])) {
826 prio = p;
827 break;
828 }
829 }
830
831 if (prio != -1)
832 break;
833 cfqd->cur_prio = 0;
834 if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
835 cfqd->cur_end_prio = 0;
836 if (wrap)
837 break;
838 wrap = 1;
721 } 839 }
840 } while (1);
841
842 if (unlikely(prio == -1))
843 return -1;
844
845 BUG_ON(prio >= CFQ_PRIO_LISTS);
846
847 list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
848
849 cfqd->cur_prio = prio + 1;
850 if (cfqd->cur_prio > cfqd->cur_end_prio) {
851 cfqd->cur_end_prio = cfqd->cur_prio;
852 cfqd->cur_prio = 0;
853 }
854 if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
855 cfqd->cur_prio = 0;
856 cfqd->cur_end_prio = 0;
722 } 857 }
723 858
724 cfq_update_next_crq(cnext); 859 return prio;
725 cfq_remove_request(q, next); 860}
861
862static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
863{
864 struct cfq_queue *cfqq;
865
866 /*
867 * if current queue is expired but not done with its requests yet,
868 * wait for that to happen
869 */
870 if ((cfqq = cfqd->active_queue) != NULL) {
871 if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq))
872 return NULL;
873 }
874
875 /*
876 * if current list is non-empty, grab first entry. if it is empty,
877 * get next prio level and grab first entry then if any are spliced
878 */
879 if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
880 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
881
882 /*
883 * if we have idle queues and no rt or be queues had pending
884 * requests, either allow immediate service if the grace period
885 * has passed or arm the idle grace timer
886 */
887 if (!cfqq && !list_empty(&cfqd->idle_rr)) {
888 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
889
890 if (time_after_eq(jiffies, end))
891 cfqq = list_entry_cfqq(cfqd->idle_rr.next);
892 else
893 mod_timer(&cfqd->idle_class_timer, end);
894 }
895
896 __cfq_set_active_queue(cfqd, cfqq);
897 return cfqq;
898}
899
900/*
901 * current cfqq expired its slice (or was too idle), select new one
902 */
903static void
904__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
905 int preempted)
906{
907 unsigned long now = jiffies;
908
909 if (cfq_cfqq_wait_request(cfqq))
910 del_timer(&cfqd->idle_slice_timer);
911
912 if (!preempted && !cfq_cfqq_dispatched(cfqq))
913 cfqq->service_last = now;
914
915 cfq_clear_cfqq_must_dispatch(cfqq);
916 cfq_clear_cfqq_wait_request(cfqq);
917
918 /*
919 * store what was left of this slice, if the queue idled out
920 * or was preempted
921 */
922 if (time_after(now, cfqq->slice_end))
923 cfqq->slice_left = now - cfqq->slice_end;
924 else
925 cfqq->slice_left = 0;
926
927 if (cfq_cfqq_on_rr(cfqq))
928 cfq_resort_rr_list(cfqq, preempted);
929
930 if (cfqq == cfqd->active_queue)
931 cfqd->active_queue = NULL;
932
933 if (cfqd->active_cic) {
934 put_io_context(cfqd->active_cic->ioc);
935 cfqd->active_cic = NULL;
936 }
937
938 cfqd->dispatch_slice = 0;
939}
940
941static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
942{
943 struct cfq_queue *cfqq = cfqd->active_queue;
944
945 if (cfqq) {
946 /*
947 * use deferred expiry, if there are requests in progress as
948 * not to disturb the slice of the next queue
949 */
950 if (cfq_cfqq_dispatched(cfqq))
951 cfq_mark_cfqq_expired(cfqq);
952 else
953 __cfq_slice_expired(cfqd, cfqq, preempted);
954 }
955}
956
957static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
958
959{
960 WARN_ON(!RB_EMPTY(&cfqq->sort_list));
961 WARN_ON(cfqq != cfqd->active_queue);
962
963 /*
964 * idle is disabled, either manually or by past process history
965 */
966 if (!cfqd->cfq_slice_idle)
967 return 0;
968 if (!cfq_cfqq_idle_window(cfqq))
969 return 0;
970 /*
971 * task has exited, don't wait
972 */
973 if (cfqd->active_cic && !cfqd->active_cic->ioc->task)
974 return 0;
975
976 cfq_mark_cfqq_must_dispatch(cfqq);
977 cfq_mark_cfqq_wait_request(cfqq);
978
979 if (!timer_pending(&cfqd->idle_slice_timer)) {
980 unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
981
982 cfqd->idle_slice_timer.expires = jiffies + slice_left;
983 add_timer(&cfqd->idle_slice_timer);
984 }
985
986 return 1;
726} 987}
727 988
728/* 989/*
@@ -738,31 +999,40 @@ static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
738 struct request *__rq; 999 struct request *__rq;
739 sector_t last; 1000 sector_t last;
740 1001
741 cfq_del_crq_rb(crq);
742 cfq_remove_merge_hints(q, crq);
743 list_del(&crq->request->queuelist); 1002 list_del(&crq->request->queuelist);
744 1003
745 last = cfqd->last_sector; 1004 last = cfqd->last_sector;
746 while ((entry = entry->prev) != head) { 1005 list_for_each_entry_reverse(__rq, head, queuelist) {
747 __rq = list_entry_rq(entry); 1006 struct cfq_rq *__crq = RQ_DATA(__rq);
748 1007
749 if (blk_barrier_rq(crq->request)) 1008 if (blk_barrier_rq(__rq))
750 break; 1009 break;
751 if (!blk_fs_request(crq->request)) 1010 if (!blk_fs_request(__rq))
1011 break;
1012 if (cfq_crq_requeued(__crq))
752 break; 1013 break;
753 1014
754 if (crq->request->sector > __rq->sector) 1015 if (__rq->sector <= crq->request->sector)
755 break; 1016 break;
756 if (__rq->sector > last && crq->request->sector < last) { 1017 if (__rq->sector > last && crq->request->sector < last) {
757 last = crq->request->sector; 1018 last = crq->request->sector + crq->request->nr_sectors;
758 break; 1019 break;
759 } 1020 }
1021 entry = &__rq->queuelist;
760 } 1022 }
761 1023
762 cfqd->last_sector = last; 1024 cfqd->last_sector = last;
763 crq->in_flight = 1; 1025
764 cfqq->in_flight++; 1026 cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
765 list_add(&crq->request->queuelist, entry); 1027
1028 cfq_del_crq_rb(crq);
1029 cfq_remove_merge_hints(q, crq);
1030
1031 cfq_mark_crq_in_flight(crq);
1032 cfq_clear_crq_requeued(crq);
1033
1034 cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
1035 list_add_tail(&crq->request->queuelist, entry);
766} 1036}
767 1037
768/* 1038/*
@@ -771,173 +1041,225 @@ static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
771static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq) 1041static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
772{ 1042{
773 struct cfq_data *cfqd = cfqq->cfqd; 1043 struct cfq_data *cfqd = cfqq->cfqd;
774 const int reads = !list_empty(&cfqq->fifo[0]); 1044 struct request *rq;
775 const int writes = !list_empty(&cfqq->fifo[1]);
776 unsigned long now = jiffies;
777 struct cfq_rq *crq; 1045 struct cfq_rq *crq;
778 1046
779 if (time_before(now, cfqq->last_fifo_expire + cfqd->cfq_fifo_batch_expire)) 1047 if (cfq_cfqq_fifo_expire(cfqq))
780 return NULL; 1048 return NULL;
781 1049
782 crq = RQ_DATA(list_entry(cfqq->fifo[0].next, struct request, queuelist)); 1050 if (!list_empty(&cfqq->fifo)) {
783 if (reads && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_r)) { 1051 int fifo = cfq_cfqq_class_sync(cfqq);
784 cfqq->last_fifo_expire = now;
785 return crq;
786 }
787 1052
788 crq = RQ_DATA(list_entry(cfqq->fifo[1].next, struct request, queuelist)); 1053 crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));
789 if (writes && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_w)) { 1054 rq = crq->request;
790 cfqq->last_fifo_expire = now; 1055 if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
791 return crq; 1056 cfq_mark_cfqq_fifo_expire(cfqq);
1057 return crq;
1058 }
792 } 1059 }
793 1060
794 return NULL; 1061 return NULL;
795} 1062}
796 1063
797/* 1064/*
798 * dispatch a single request from given queue 1065 * Scale schedule slice based on io priority. Use the sync time slice only
1066 * if a queue is marked sync and has sync io queued. A sync queue with async
1067 * io only, should not get full sync slice length.
799 */ 1068 */
1069static inline int
1070cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1071{
1072 const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
1073
1074 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1075
1076 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
1077}
1078
800static inline void 1079static inline void
801cfq_dispatch_request(request_queue_t *q, struct cfq_data *cfqd, 1080cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
802 struct cfq_queue *cfqq)
803{ 1081{
804 struct cfq_rq *crq; 1082 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
1083}
1084
1085static inline int
1086cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1087{
1088 const int base_rq = cfqd->cfq_slice_async_rq;
1089
1090 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1091
1092 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1093}
1094
1095/*
1096 * get next queue for service
1097 */
1098static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force)
1099{
1100 unsigned long now = jiffies;
1101 struct cfq_queue *cfqq;
1102
1103 cfqq = cfqd->active_queue;
1104 if (!cfqq)
1105 goto new_queue;
1106
1107 if (cfq_cfqq_expired(cfqq))
1108 goto new_queue;
805 1109
806 /* 1110 /*
807 * follow expired path, else get first next available 1111 * slice has expired
808 */ 1112 */
809 if ((crq = cfq_check_fifo(cfqq)) == NULL) { 1113 if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
810 if (cfqd->find_best_crq) 1114 goto expire;
811 crq = cfqq->next_crq;
812 else
813 crq = rb_entry_crq(rb_first(&cfqq->sort_list));
814 }
815
816 cfqd->last_sector = crq->request->sector + crq->request->nr_sectors;
817 1115
818 /* 1116 /*
819 * finally, insert request into driver list 1117 * if queue has requests, dispatch one. if not, check if
1118 * enough slice is left to wait for one
820 */ 1119 */
821 cfq_dispatch_sort(q, crq); 1120 if (!RB_EMPTY(&cfqq->sort_list))
1121 goto keep_queue;
1122 else if (!force && cfq_cfqq_class_sync(cfqq) &&
1123 time_before(now, cfqq->slice_end)) {
1124 if (cfq_arm_slice_timer(cfqd, cfqq))
1125 return NULL;
1126 }
1127
1128expire:
1129 cfq_slice_expired(cfqd, 0);
1130new_queue:
1131 cfqq = cfq_set_active_queue(cfqd);
1132keep_queue:
1133 return cfqq;
822} 1134}
823 1135
824static int cfq_dispatch_requests(request_queue_t *q, int max_dispatch) 1136static int
1137__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1138 int max_dispatch)
825{ 1139{
826 struct cfq_data *cfqd = q->elevator->elevator_data; 1140 int dispatched = 0;
827 struct cfq_queue *cfqq;
828 struct list_head *entry, *tmp;
829 int queued, busy_queues, first_round;
830 1141
831 if (list_empty(&cfqd->rr_list)) 1142 BUG_ON(RB_EMPTY(&cfqq->sort_list));
832 return 0;
833 1143
834 queued = 0; 1144 do {
835 first_round = 1; 1145 struct cfq_rq *crq;
836restart:
837 busy_queues = 0;
838 list_for_each_safe(entry, tmp, &cfqd->rr_list) {
839 cfqq = list_entry_cfqq(entry);
840 1146
841 BUG_ON(RB_EMPTY(&cfqq->sort_list)); 1147 /*
1148 * follow expired path, else get first next available
1149 */
1150 if ((crq = cfq_check_fifo(cfqq)) == NULL)
1151 crq = cfqq->next_crq;
842 1152
843 /* 1153 /*
844 * first round of queueing, only select from queues that 1154 * finally, insert request into driver dispatch list
845 * don't already have io in-flight
846 */ 1155 */
847 if (first_round && cfqq->in_flight) 1156 cfq_dispatch_sort(cfqd->queue, crq);
848 continue;
849 1157
850 cfq_dispatch_request(q, cfqd, cfqq); 1158 cfqd->dispatch_slice++;
1159 dispatched++;
851 1160
852 if (!RB_EMPTY(&cfqq->sort_list)) 1161 if (!cfqd->active_cic) {
853 busy_queues++; 1162 atomic_inc(&crq->io_context->ioc->refcount);
1163 cfqd->active_cic = crq->io_context;
1164 }
854 1165
855 queued++; 1166 if (RB_EMPTY(&cfqq->sort_list))
856 } 1167 break;
1168
1169 } while (dispatched < max_dispatch);
1170
1171 /*
1172 * if slice end isn't set yet, set it. if at least one request was
1173 * sync, use the sync time slice value
1174 */
1175 if (!cfqq->slice_end)
1176 cfq_set_prio_slice(cfqd, cfqq);
1177
1178 /*
1179 * expire an async queue immediately if it has used up its slice. idle
1180 * queue always expire after 1 dispatch round.
1181 */
1182 if ((!cfq_cfqq_sync(cfqq) &&
1183 cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1184 cfq_class_idle(cfqq))
1185 cfq_slice_expired(cfqd, 0);
1186
1187 return dispatched;
1188}
1189
1190static int
1191cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
1192{
1193 struct cfq_data *cfqd = q->elevator->elevator_data;
1194 struct cfq_queue *cfqq;
1195
1196 if (!cfqd->busy_queues)
1197 return 0;
1198
1199 cfqq = cfq_select_queue(cfqd, force);
1200 if (cfqq) {
1201 cfq_clear_cfqq_must_dispatch(cfqq);
1202 cfq_clear_cfqq_wait_request(cfqq);
1203 del_timer(&cfqd->idle_slice_timer);
857 1204
858 if ((queued < max_dispatch) && (busy_queues || first_round)) { 1205 if (cfq_class_idle(cfqq))
859 first_round = 0; 1206 max_dispatch = 1;
860 goto restart; 1207
1208 return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
861 } 1209 }
862 1210
863 return queued; 1211 return 0;
864} 1212}
865 1213
866static inline void cfq_account_dispatch(struct cfq_rq *crq) 1214static inline void cfq_account_dispatch(struct cfq_rq *crq)
867{ 1215{
868 struct cfq_queue *cfqq = crq->cfq_queue; 1216 struct cfq_queue *cfqq = crq->cfq_queue;
869 struct cfq_data *cfqd = cfqq->cfqd; 1217 struct cfq_data *cfqd = cfqq->cfqd;
870 unsigned long now, elapsed;
871 1218
872 if (!blk_fs_request(crq->request)) 1219 if (unlikely(!blk_fs_request(crq->request)))
873 return; 1220 return;
874 1221
875 /* 1222 /*
876 * accounted bit is necessary since some drivers will call 1223 * accounted bit is necessary since some drivers will call
877 * elv_next_request() many times for the same request (eg ide) 1224 * elv_next_request() many times for the same request (eg ide)
878 */ 1225 */
879 if (crq->accounted) 1226 if (cfq_crq_in_driver(crq))
880 return; 1227 return;
881 1228
882 now = jiffies; 1229 cfq_mark_crq_in_driver(crq);
883 if (cfqq->service_start == ~0UL) 1230 cfqd->rq_in_driver++;
884 cfqq->service_start = now;
885
886 /*
887 * on drives with tagged command queueing, command turn-around time
888 * doesn't necessarily reflect the time spent processing this very
889 * command inside the drive. so do the accounting differently there,
890 * by just sorting on the number of requests
891 */
892 if (cfqd->cfq_tagged) {
893 if (time_after(now, cfqq->service_start + cfq_service)) {
894 cfqq->service_start = now;
895 cfqq->service_used /= 10;
896 }
897
898 cfqq->service_used++;
899 cfq_sort_rr_list(cfqq, 0);
900 }
901
902 elapsed = now - crq->queue_start;
903 if (elapsed > max_elapsed_dispatch)
904 max_elapsed_dispatch = elapsed;
905
906 crq->accounted = 1;
907 crq->service_start = now;
908
909 if (++cfqd->rq_in_driver >= CFQ_MAX_TAG && !cfqd->cfq_tagged) {
910 cfqq->cfqd->cfq_tagged = 1;
911 printk("cfq: depth %d reached, tagging now on\n", CFQ_MAX_TAG);
912 }
913} 1231}
914 1232
915static inline void 1233static inline void
916cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq) 1234cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
917{ 1235{
918 struct cfq_data *cfqd = cfqq->cfqd; 1236 struct cfq_data *cfqd = cfqq->cfqd;
1237 unsigned long now;
919 1238
920 if (!crq->accounted) 1239 if (!cfq_crq_in_driver(crq))
921 return; 1240 return;
922 1241
1242 now = jiffies;
1243
923 WARN_ON(!cfqd->rq_in_driver); 1244 WARN_ON(!cfqd->rq_in_driver);
924 cfqd->rq_in_driver--; 1245 cfqd->rq_in_driver--;
925 1246
926 if (!cfqd->cfq_tagged) { 1247 if (!cfq_class_idle(cfqq))
927 unsigned long now = jiffies; 1248 cfqd->last_end_request = now;
928 unsigned long duration = now - crq->service_start;
929 1249
930 if (time_after(now, cfqq->service_start + cfq_service)) { 1250 if (!cfq_cfqq_dispatched(cfqq)) {
931 cfqq->service_start = now; 1251 if (cfq_cfqq_on_rr(cfqq)) {
932 cfqq->service_used >>= 3; 1252 cfqq->service_last = now;
1253 cfq_resort_rr_list(cfqq, 0);
1254 }
1255 if (cfq_cfqq_expired(cfqq)) {
1256 __cfq_slice_expired(cfqd, cfqq, 0);
1257 cfq_schedule_dispatch(cfqd);
933 } 1258 }
934
935 cfqq->service_used += duration;
936 cfq_sort_rr_list(cfqq, 0);
937
938 if (duration > max_elapsed_crq)
939 max_elapsed_crq = duration;
940 } 1259 }
1260
1261 if (cfq_crq_is_sync(crq))
1262 crq->io_context->last_end_request = now;
941} 1263}
942 1264
943static struct request *cfq_next_request(request_queue_t *q) 1265static struct request *cfq_next_request(request_queue_t *q)
@@ -950,7 +1272,18 @@ static struct request *cfq_next_request(request_queue_t *q)
950dispatch: 1272dispatch:
951 rq = list_entry_rq(q->queue_head.next); 1273 rq = list_entry_rq(q->queue_head.next);
952 1274
953 if ((crq = RQ_DATA(rq)) != NULL) { 1275 crq = RQ_DATA(rq);
1276 if (crq) {
1277 struct cfq_queue *cfqq = crq->cfq_queue;
1278
1279 /*
1280 * if idle window is disabled, allow queue buildup
1281 */
1282 if (!cfq_crq_in_driver(crq) &&
1283 !cfq_cfqq_idle_window(cfqq) &&
1284 cfqd->rq_in_driver >= cfqd->cfq_max_depth)
1285 return NULL;
1286
954 cfq_remove_merge_hints(q, crq); 1287 cfq_remove_merge_hints(q, crq);
955 cfq_account_dispatch(crq); 1288 cfq_account_dispatch(crq);
956 } 1289 }
@@ -958,7 +1291,7 @@ dispatch:
958 return rq; 1291 return rq;
959 } 1292 }
960 1293
961 if (cfq_dispatch_requests(q, cfqd->cfq_quantum)) 1294 if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0))
962 goto dispatch; 1295 goto dispatch;
963 1296
964 return NULL; 1297 return NULL;
@@ -972,13 +1305,21 @@ dispatch:
972 */ 1305 */
973static void cfq_put_queue(struct cfq_queue *cfqq) 1306static void cfq_put_queue(struct cfq_queue *cfqq)
974{ 1307{
975 BUG_ON(!atomic_read(&cfqq->ref)); 1308 struct cfq_data *cfqd = cfqq->cfqd;
1309
1310 BUG_ON(atomic_read(&cfqq->ref) <= 0);
976 1311
977 if (!atomic_dec_and_test(&cfqq->ref)) 1312 if (!atomic_dec_and_test(&cfqq->ref))
978 return; 1313 return;
979 1314
980 BUG_ON(rb_first(&cfqq->sort_list)); 1315 BUG_ON(rb_first(&cfqq->sort_list));
981 BUG_ON(cfqq->on_rr); 1316 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1317 BUG_ON(cfq_cfqq_on_rr(cfqq));
1318
1319 if (unlikely(cfqd->active_queue == cfqq)) {
1320 __cfq_slice_expired(cfqd, cfqq, 0);
1321 cfq_schedule_dispatch(cfqd);
1322 }
982 1323
983 cfq_put_cfqd(cfqq->cfqd); 1324 cfq_put_cfqd(cfqq->cfqd);
984 1325
@@ -991,15 +1332,17 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
991} 1332}
992 1333
993static inline struct cfq_queue * 1334static inline struct cfq_queue *
994__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval) 1335__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
1336 const int hashval)
995{ 1337{
996 struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; 1338 struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
997 struct hlist_node *entry, *next; 1339 struct hlist_node *entry, *next;
998 1340
999 hlist_for_each_safe(entry, next, hash_list) { 1341 hlist_for_each_safe(entry, next, hash_list) {
1000 struct cfq_queue *__cfqq = list_entry_qhash(entry); 1342 struct cfq_queue *__cfqq = list_entry_qhash(entry);
1343 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio);
1001 1344
1002 if (__cfqq->key == key) 1345 if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY))
1003 return __cfqq; 1346 return __cfqq;
1004 } 1347 }
1005 1348
@@ -1007,94 +1350,220 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval)
1007} 1350}
1008 1351
1009static struct cfq_queue * 1352static struct cfq_queue *
1010cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key) 1353cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
1011{ 1354{
1012 return __cfq_find_cfq_hash(cfqd, key, hash_long(key, CFQ_QHASH_SHIFT)); 1355 return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
1013} 1356}
1014 1357
1015static inline void 1358static void cfq_free_io_context(struct cfq_io_context *cic)
1016cfq_rehash_cfqq(struct cfq_data *cfqd, struct cfq_queue **cfqq,
1017 struct cfq_io_context *cic)
1018{ 1359{
1019 unsigned long hashkey = cfq_hash_key(cfqd, current); 1360 struct cfq_io_context *__cic;
1020 unsigned long hashval = hash_long(hashkey, CFQ_QHASH_SHIFT); 1361 struct list_head *entry, *next;
1021 struct cfq_queue *__cfqq;
1022 unsigned long flags;
1023
1024 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1025 1362
1026 hlist_del(&(*cfqq)->cfq_hash); 1363 list_for_each_safe(entry, next, &cic->list) {
1027 1364 __cic = list_entry(entry, struct cfq_io_context, list);
1028 __cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval); 1365 kmem_cache_free(cfq_ioc_pool, __cic);
1029 if (!__cfqq || __cfqq == *cfqq) {
1030 __cfqq = *cfqq;
1031 hlist_add_head(&__cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1032 __cfqq->key_type = cfqd->key_type;
1033 } else {
1034 atomic_inc(&__cfqq->ref);
1035 cic->cfqq = __cfqq;
1036 cfq_put_queue(*cfqq);
1037 *cfqq = __cfqq;
1038 } 1366 }
1039 1367
1040 cic->cfqq = __cfqq; 1368 kmem_cache_free(cfq_ioc_pool, cic);
1041 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1042} 1369}
1043 1370
1044static void cfq_free_io_context(struct cfq_io_context *cic) 1371/*
1372 * Called with interrupts disabled
1373 */
1374static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1045{ 1375{
1046 kmem_cache_free(cfq_ioc_pool, cic); 1376 struct cfq_data *cfqd = cic->cfqq->cfqd;
1377 request_queue_t *q = cfqd->queue;
1378
1379 WARN_ON(!irqs_disabled());
1380
1381 spin_lock(q->queue_lock);
1382
1383 if (unlikely(cic->cfqq == cfqd->active_queue)) {
1384 __cfq_slice_expired(cfqd, cic->cfqq, 0);
1385 cfq_schedule_dispatch(cfqd);
1386 }
1387
1388 cfq_put_queue(cic->cfqq);
1389 cic->cfqq = NULL;
1390 spin_unlock(q->queue_lock);
1047} 1391}
1048 1392
1049/* 1393/*
1050 * locking hierarchy is: io_context lock -> queue locks 1394 * Another task may update the task cic list, if it is doing a queue lookup
1395 * on its behalf. cfq_cic_lock excludes such concurrent updates
1051 */ 1396 */
1052static void cfq_exit_io_context(struct cfq_io_context *cic) 1397static void cfq_exit_io_context(struct cfq_io_context *cic)
1053{ 1398{
1054 struct cfq_queue *cfqq = cic->cfqq; 1399 struct cfq_io_context *__cic;
1055 struct list_head *entry = &cic->list; 1400 struct list_head *entry;
1056 request_queue_t *q;
1057 unsigned long flags; 1401 unsigned long flags;
1058 1402
1403 local_irq_save(flags);
1404
1059 /* 1405 /*
1060 * put the reference this task is holding to the various queues 1406 * put the reference this task is holding to the various queues
1061 */ 1407 */
1062 spin_lock_irqsave(&cic->ioc->lock, flags); 1408 list_for_each(entry, &cic->list) {
1063 while ((entry = cic->list.next) != &cic->list) {
1064 struct cfq_io_context *__cic;
1065
1066 __cic = list_entry(entry, struct cfq_io_context, list); 1409 __cic = list_entry(entry, struct cfq_io_context, list);
1067 list_del(entry); 1410 cfq_exit_single_io_context(__cic);
1068
1069 q = __cic->cfqq->cfqd->queue;
1070 spin_lock(q->queue_lock);
1071 cfq_put_queue(__cic->cfqq);
1072 spin_unlock(q->queue_lock);
1073 } 1411 }
1074 1412
1075 q = cfqq->cfqd->queue; 1413 cfq_exit_single_io_context(cic);
1076 spin_lock(q->queue_lock); 1414 local_irq_restore(flags);
1077 cfq_put_queue(cfqq);
1078 spin_unlock(q->queue_lock);
1079
1080 cic->cfqq = NULL;
1081 spin_unlock_irqrestore(&cic->ioc->lock, flags);
1082} 1415}
1083 1416
1084static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags) 1417static struct cfq_io_context *
1418cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask)
1085{ 1419{
1086 struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_flags); 1420 struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
1087 1421
1088 if (cic) { 1422 if (cic) {
1089 cic->dtor = cfq_free_io_context;
1090 cic->exit = cfq_exit_io_context;
1091 INIT_LIST_HEAD(&cic->list); 1423 INIT_LIST_HEAD(&cic->list);
1092 cic->cfqq = NULL; 1424 cic->cfqq = NULL;
1425 cic->key = NULL;
1426 cic->last_end_request = jiffies;
1427 cic->ttime_total = 0;
1428 cic->ttime_samples = 0;
1429 cic->ttime_mean = 0;
1430 cic->dtor = cfq_free_io_context;
1431 cic->exit = cfq_exit_io_context;
1093 } 1432 }
1094 1433
1095 return cic; 1434 return cic;
1096} 1435}
1097 1436
1437static void cfq_init_prio_data(struct cfq_queue *cfqq)
1438{
1439 struct task_struct *tsk = current;
1440 int ioprio_class;
1441
1442 if (!cfq_cfqq_prio_changed(cfqq))
1443 return;
1444
1445 ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
1446 switch (ioprio_class) {
1447 default:
1448 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1449 case IOPRIO_CLASS_NONE:
1450 /*
1451 * no prio set, place us in the middle of the BE classes
1452 */
1453 cfqq->ioprio = task_nice_ioprio(tsk);
1454 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1455 break;
1456 case IOPRIO_CLASS_RT:
1457 cfqq->ioprio = task_ioprio(tsk);
1458 cfqq->ioprio_class = IOPRIO_CLASS_RT;
1459 break;
1460 case IOPRIO_CLASS_BE:
1461 cfqq->ioprio = task_ioprio(tsk);
1462 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1463 break;
1464 case IOPRIO_CLASS_IDLE:
1465 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1466 cfqq->ioprio = 7;
1467 cfq_clear_cfqq_idle_window(cfqq);
1468 break;
1469 }
1470
1471 /*
1472 * keep track of original prio settings in case we have to temporarily
1473 * elevate the priority of this queue
1474 */
1475 cfqq->org_ioprio = cfqq->ioprio;
1476 cfqq->org_ioprio_class = cfqq->ioprio_class;
1477
1478 if (cfq_cfqq_on_rr(cfqq))
1479 cfq_resort_rr_list(cfqq, 0);
1480
1481 cfq_clear_cfqq_prio_changed(cfqq);
1482}
1483
1484static inline void changed_ioprio(struct cfq_queue *cfqq)
1485{
1486 if (cfqq) {
1487 struct cfq_data *cfqd = cfqq->cfqd;
1488
1489 spin_lock(cfqd->queue->queue_lock);
1490 cfq_mark_cfqq_prio_changed(cfqq);
1491 cfq_init_prio_data(cfqq);
1492 spin_unlock(cfqd->queue->queue_lock);
1493 }
1494}
1495
1496/*
1497 * callback from sys_ioprio_set, irqs are disabled
1498 */
1499static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
1500{
1501 struct cfq_io_context *cic = ioc->cic;
1502
1503 changed_ioprio(cic->cfqq);
1504
1505 list_for_each_entry(cic, &cic->list, list)
1506 changed_ioprio(cic->cfqq);
1507
1508 return 0;
1509}
1510
1511static struct cfq_queue *
1512cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio,
1513 int gfp_mask)
1514{
1515 const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1516 struct cfq_queue *cfqq, *new_cfqq = NULL;
1517
1518retry:
1519 cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
1520
1521 if (!cfqq) {
1522 if (new_cfqq) {
1523 cfqq = new_cfqq;
1524 new_cfqq = NULL;
1525 } else if (gfp_mask & __GFP_WAIT) {
1526 spin_unlock_irq(cfqd->queue->queue_lock);
1527 new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
1528 spin_lock_irq(cfqd->queue->queue_lock);
1529 goto retry;
1530 } else {
1531 cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
1532 if (!cfqq)
1533 goto out;
1534 }
1535
1536 memset(cfqq, 0, sizeof(*cfqq));
1537
1538 INIT_HLIST_NODE(&cfqq->cfq_hash);
1539 INIT_LIST_HEAD(&cfqq->cfq_list);
1540 RB_CLEAR_ROOT(&cfqq->sort_list);
1541 INIT_LIST_HEAD(&cfqq->fifo);
1542
1543 cfqq->key = key;
1544 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1545 atomic_set(&cfqq->ref, 0);
1546 cfqq->cfqd = cfqd;
1547 atomic_inc(&cfqd->ref);
1548 cfqq->service_last = 0;
1549 /*
1550 * set ->slice_left to allow preemption for a new process
1551 */
1552 cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1553 cfq_mark_cfqq_idle_window(cfqq);
1554 cfq_mark_cfqq_prio_changed(cfqq);
1555 cfq_init_prio_data(cfqq);
1556 }
1557
1558 if (new_cfqq)
1559 kmem_cache_free(cfq_pool, new_cfqq);
1560
1561 atomic_inc(&cfqq->ref);
1562out:
1563 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1564 return cfqq;
1565}
1566
1098/* 1567/*
1099 * Setup general io context and cfq io context. There can be several cfq 1568 * Setup general io context and cfq io context. There can be several cfq
1100 * io contexts per general io context, if this process is doing io to more 1569 * io contexts per general io context, if this process is doing io to more
@@ -1102,39 +1571,39 @@ static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags)
1102 * cfqq, so we don't need to worry about it disappearing 1571 * cfqq, so we don't need to worry about it disappearing
1103 */ 1572 */
1104static struct cfq_io_context * 1573static struct cfq_io_context *
1105cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags) 1574cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask)
1106{ 1575{
1107 struct cfq_data *cfqd = (*cfqq)->cfqd; 1576 struct io_context *ioc = NULL;
1108 struct cfq_queue *__cfqq = *cfqq;
1109 struct cfq_io_context *cic; 1577 struct cfq_io_context *cic;
1110 struct io_context *ioc;
1111 1578
1112 might_sleep_if(gfp_flags & __GFP_WAIT); 1579 might_sleep_if(gfp_mask & __GFP_WAIT);
1113 1580
1114 ioc = get_io_context(gfp_flags); 1581 ioc = get_io_context(gfp_mask);
1115 if (!ioc) 1582 if (!ioc)
1116 return NULL; 1583 return NULL;
1117 1584
1118 if ((cic = ioc->cic) == NULL) { 1585 if ((cic = ioc->cic) == NULL) {
1119 cic = cfq_alloc_io_context(gfp_flags); 1586 cic = cfq_alloc_io_context(cfqd, gfp_mask);
1120 1587
1121 if (cic == NULL) 1588 if (cic == NULL)
1122 goto err; 1589 goto err;
1123 1590
1591 /*
1592 * manually increment generic io_context usage count, it
1593 * cannot go away since we are already holding one ref to it
1594 */
1124 ioc->cic = cic; 1595 ioc->cic = cic;
1596 ioc->set_ioprio = cfq_ioc_set_ioprio;
1125 cic->ioc = ioc; 1597 cic->ioc = ioc;
1126 cic->cfqq = __cfqq; 1598 cic->key = cfqd;
1127 atomic_inc(&__cfqq->ref); 1599 atomic_inc(&cfqd->ref);
1128 } else { 1600 } else {
1129 struct cfq_io_context *__cic; 1601 struct cfq_io_context *__cic;
1130 unsigned long flags;
1131 1602
1132 /* 1603 /*
1133 * since the first cic on the list is actually the head 1604 * the first cic on the list is actually the head itself
1134 * itself, need to check this here or we'll duplicate an
1135 * cic per ioc for no reason
1136 */ 1605 */
1137 if (cic->cfqq == __cfqq) 1606 if (cic->key == cfqd)
1138 goto out; 1607 goto out;
1139 1608
1140 /* 1609 /*
@@ -1142,152 +1611,250 @@ cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags)
1142 * should be ok here, the list will usually not be more than 1611 * should be ok here, the list will usually not be more than
1143 * 1 or a few entries long 1612 * 1 or a few entries long
1144 */ 1613 */
1145 spin_lock_irqsave(&ioc->lock, flags);
1146 list_for_each_entry(__cic, &cic->list, list) { 1614 list_for_each_entry(__cic, &cic->list, list) {
1147 /* 1615 /*
1148 * this process is already holding a reference to 1616 * this process is already holding a reference to
1149 * this queue, so no need to get one more 1617 * this queue, so no need to get one more
1150 */ 1618 */
1151 if (__cic->cfqq == __cfqq) { 1619 if (__cic->key == cfqd) {
1152 cic = __cic; 1620 cic = __cic;
1153 spin_unlock_irqrestore(&ioc->lock, flags);
1154 goto out; 1621 goto out;
1155 } 1622 }
1156 } 1623 }
1157 spin_unlock_irqrestore(&ioc->lock, flags);
1158 1624
1159 /* 1625 /*
1160 * nope, process doesn't have a cic assoicated with this 1626 * nope, process doesn't have a cic assoicated with this
1161 * cfqq yet. get a new one and add to list 1627 * cfqq yet. get a new one and add to list
1162 */ 1628 */
1163 __cic = cfq_alloc_io_context(gfp_flags); 1629 __cic = cfq_alloc_io_context(cfqd, gfp_mask);
1164 if (__cic == NULL) 1630 if (__cic == NULL)
1165 goto err; 1631 goto err;
1166 1632
1167 __cic->ioc = ioc; 1633 __cic->ioc = ioc;
1168 __cic->cfqq = __cfqq; 1634 __cic->key = cfqd;
1169 atomic_inc(&__cfqq->ref); 1635 atomic_inc(&cfqd->ref);
1170 spin_lock_irqsave(&ioc->lock, flags);
1171 list_add(&__cic->list, &cic->list); 1636 list_add(&__cic->list, &cic->list);
1172 spin_unlock_irqrestore(&ioc->lock, flags);
1173
1174 cic = __cic; 1637 cic = __cic;
1175 *cfqq = __cfqq;
1176 } 1638 }
1177 1639
1178out: 1640out:
1179 /*
1180 * if key_type has been changed on the fly, we lazily rehash
1181 * each queue at lookup time
1182 */
1183 if ((*cfqq)->key_type != cfqd->key_type)
1184 cfq_rehash_cfqq(cfqd, cfqq, cic);
1185
1186 return cic; 1641 return cic;
1187err: 1642err:
1188 put_io_context(ioc); 1643 put_io_context(ioc);
1189 return NULL; 1644 return NULL;
1190} 1645}
1191 1646
1192static struct cfq_queue * 1647static void
1193__cfq_get_queue(struct cfq_data *cfqd, unsigned long key, int gfp_mask) 1648cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1194{ 1649{
1195 const int hashval = hash_long(key, CFQ_QHASH_SHIFT); 1650 unsigned long elapsed, ttime;
1196 struct cfq_queue *cfqq, *new_cfqq = NULL;
1197
1198retry:
1199 cfqq = __cfq_find_cfq_hash(cfqd, key, hashval);
1200 1651
1201 if (!cfqq) { 1652 /*
1202 if (new_cfqq) { 1653 * if this context already has stuff queued, thinktime is from
1203 cfqq = new_cfqq; 1654 * last queue not last end
1204 new_cfqq = NULL; 1655 */
1205 } else { 1656#if 0
1206 spin_unlock_irq(cfqd->queue->queue_lock); 1657 if (time_after(cic->last_end_request, cic->last_queue))
1207 new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); 1658 elapsed = jiffies - cic->last_end_request;
1208 spin_lock_irq(cfqd->queue->queue_lock); 1659 else
1660 elapsed = jiffies - cic->last_queue;
1661#else
1662 elapsed = jiffies - cic->last_end_request;
1663#endif
1209 1664
1210 if (!new_cfqq && !(gfp_mask & __GFP_WAIT)) 1665 ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1211 goto out;
1212 1666
1213 goto retry; 1667 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1214 } 1668 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1669 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1670}
1215 1671
1216 memset(cfqq, 0, sizeof(*cfqq)); 1672#define sample_valid(samples) ((samples) > 80)
1217 1673
1218 INIT_HLIST_NODE(&cfqq->cfq_hash); 1674/*
1219 INIT_LIST_HEAD(&cfqq->cfq_list); 1675 * Disable idle window if the process thinks too long or seeks so much that
1220 RB_CLEAR_ROOT(&cfqq->sort_list); 1676 * it doesn't matter
1221 INIT_LIST_HEAD(&cfqq->fifo[0]); 1677 */
1222 INIT_LIST_HEAD(&cfqq->fifo[1]); 1678static void
1679cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1680 struct cfq_io_context *cic)
1681{
1682 int enable_idle = cfq_cfqq_idle_window(cfqq);
1223 1683
1224 cfqq->key = key; 1684 if (!cic->ioc->task || !cfqd->cfq_slice_idle)
1225 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); 1685 enable_idle = 0;
1226 atomic_set(&cfqq->ref, 0); 1686 else if (sample_valid(cic->ttime_samples)) {
1227 cfqq->cfqd = cfqd; 1687 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1228 atomic_inc(&cfqd->ref); 1688 enable_idle = 0;
1229 cfqq->key_type = cfqd->key_type; 1689 else
1230 cfqq->service_start = ~0UL; 1690 enable_idle = 1;
1231 } 1691 }
1232 1692
1233 if (new_cfqq) 1693 if (enable_idle)
1234 kmem_cache_free(cfq_pool, new_cfqq); 1694 cfq_mark_cfqq_idle_window(cfqq);
1695 else
1696 cfq_clear_cfqq_idle_window(cfqq);
1697}
1235 1698
1236 atomic_inc(&cfqq->ref); 1699
1237out: 1700/*
1238 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); 1701 * Check if new_cfqq should preempt the currently active queue. Return 0 for
1239 return cfqq; 1702 * no or if we aren't sure, a 1 will cause a preempt.
1703 */
1704static int
1705cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1706 struct cfq_rq *crq)
1707{
1708 struct cfq_queue *cfqq = cfqd->active_queue;
1709
1710 if (cfq_class_idle(new_cfqq))
1711 return 0;
1712
1713 if (!cfqq)
1714 return 1;
1715
1716 if (cfq_class_idle(cfqq))
1717 return 1;
1718 if (!cfq_cfqq_wait_request(new_cfqq))
1719 return 0;
1720 /*
1721 * if it doesn't have slice left, forget it
1722 */
1723 if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
1724 return 0;
1725 if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq))
1726 return 1;
1727
1728 return 0;
1729}
1730
1731/*
1732 * cfqq preempts the active queue. if we allowed preempt with no slice left,
1733 * let it have half of its nominal slice.
1734 */
1735static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1736{
1737 struct cfq_queue *__cfqq, *next;
1738
1739 list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
1740 cfq_resort_rr_list(__cfqq, 1);
1741
1742 if (!cfqq->slice_left)
1743 cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
1744
1745 cfqq->slice_end = cfqq->slice_left + jiffies;
1746 __cfq_slice_expired(cfqd, cfqq, 1);
1747 __cfq_set_active_queue(cfqd, cfqq);
1240} 1748}
1241 1749
1242static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq) 1750/*
1751 * should really be a ll_rw_blk.c helper
1752 */
1753static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1754{
1755 request_queue_t *q = cfqd->queue;
1756
1757 if (!blk_queue_plugged(q))
1758 q->request_fn(q);
1759 else
1760 __generic_unplug_device(q);
1761}
1762
1763/*
1764 * Called when a new fs request (crq) is added (to cfqq). Check if there's
1765 * something we should do about it
1766 */
1767static void
1768cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1769 struct cfq_rq *crq)
1243{ 1770{
1244 crq->is_sync = 0; 1771 const int sync = cfq_crq_is_sync(crq);
1245 if (rq_data_dir(crq->request) == READ || current->flags & PF_SYNCWRITE) 1772
1246 crq->is_sync = 1; 1773 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
1774
1775 if (sync) {
1776 struct cfq_io_context *cic = crq->io_context;
1777
1778 cfq_update_io_thinktime(cfqd, cic);
1779 cfq_update_idle_window(cfqd, cfqq, cic);
1780
1781 cic->last_queue = jiffies;
1782 }
1783
1784 if (cfqq == cfqd->active_queue) {
1785 /*
1786 * if we are waiting for a request for this queue, let it rip
1787 * immediately and flag that we must not expire this queue
1788 * just now
1789 */
1790 if (cfq_cfqq_wait_request(cfqq)) {
1791 cfq_mark_cfqq_must_dispatch(cfqq);
1792 del_timer(&cfqd->idle_slice_timer);
1793 cfq_start_queueing(cfqd, cfqq);
1794 }
1795 } else if (cfq_should_preempt(cfqd, cfqq, crq)) {
1796 /*
1797 * not the active queue - expire current slice if it is
1798 * idle and has expired it's mean thinktime or this new queue
1799 * has some old slice time left and is of higher priority
1800 */
1801 cfq_preempt_queue(cfqd, cfqq);
1802 cfq_mark_cfqq_must_dispatch(cfqq);
1803 cfq_start_queueing(cfqd, cfqq);
1804 }
1805}
1806
1807static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
1808{
1809 struct cfq_rq *crq = RQ_DATA(rq);
1810 struct cfq_queue *cfqq = crq->cfq_queue;
1811
1812 cfq_init_prio_data(cfqq);
1247 1813
1248 cfq_add_crq_rb(crq); 1814 cfq_add_crq_rb(crq);
1249 crq->queue_start = jiffies;
1250 1815
1251 list_add_tail(&crq->request->queuelist, &crq->cfq_queue->fifo[crq->is_sync]); 1816 list_add_tail(&rq->queuelist, &cfqq->fifo);
1817
1818 if (rq_mergeable(rq)) {
1819 cfq_add_crq_hash(cfqd, crq);
1820
1821 if (!cfqd->queue->last_merge)
1822 cfqd->queue->last_merge = rq;
1823 }
1824
1825 cfq_crq_enqueued(cfqd, cfqq, crq);
1252} 1826}
1253 1827
1254static void 1828static void
1255cfq_insert_request(request_queue_t *q, struct request *rq, int where) 1829cfq_insert_request(request_queue_t *q, struct request *rq, int where)
1256{ 1830{
1257 struct cfq_data *cfqd = q->elevator->elevator_data; 1831 struct cfq_data *cfqd = q->elevator->elevator_data;
1258 struct cfq_rq *crq = RQ_DATA(rq);
1259 1832
1260 switch (where) { 1833 switch (where) {
1261 case ELEVATOR_INSERT_BACK: 1834 case ELEVATOR_INSERT_BACK:
1262 while (cfq_dispatch_requests(q, cfqd->cfq_quantum)) 1835 while (cfq_dispatch_requests(q, INT_MAX, 1))
1263 ; 1836 ;
1264 list_add_tail(&rq->queuelist, &q->queue_head); 1837 list_add_tail(&rq->queuelist, &q->queue_head);
1838 /*
1839 * If we were idling with pending requests on
1840 * inactive cfqqs, force dispatching will
1841 * remove the idle timer and the queue won't
1842 * be kicked by __make_request() afterward.
1843 * Kick it here.
1844 */
1845 cfq_schedule_dispatch(cfqd);
1265 break; 1846 break;
1266 case ELEVATOR_INSERT_FRONT: 1847 case ELEVATOR_INSERT_FRONT:
1267 list_add(&rq->queuelist, &q->queue_head); 1848 list_add(&rq->queuelist, &q->queue_head);
1268 break; 1849 break;
1269 case ELEVATOR_INSERT_SORT: 1850 case ELEVATOR_INSERT_SORT:
1270 BUG_ON(!blk_fs_request(rq)); 1851 BUG_ON(!blk_fs_request(rq));
1271 cfq_enqueue(cfqd, crq); 1852 cfq_enqueue(cfqd, rq);
1272 break; 1853 break;
1273 default: 1854 default:
1274 printk("%s: bad insert point %d\n", __FUNCTION__,where); 1855 printk("%s: bad insert point %d\n", __FUNCTION__,where);
1275 return; 1856 return;
1276 } 1857 }
1277
1278 if (rq_mergeable(rq)) {
1279 cfq_add_crq_hash(cfqd, crq);
1280
1281 if (!q->last_merge)
1282 q->last_merge = rq;
1283 }
1284}
1285
1286static int cfq_queue_empty(request_queue_t *q)
1287{
1288 struct cfq_data *cfqd = q->elevator->elevator_data;
1289
1290 return list_empty(&q->queue_head) && list_empty(&cfqd->rr_list);
1291} 1858}
1292 1859
1293static void cfq_completed_request(request_queue_t *q, struct request *rq) 1860static void cfq_completed_request(request_queue_t *q, struct request *rq)
@@ -1300,9 +1867,11 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
1300 1867
1301 cfqq = crq->cfq_queue; 1868 cfqq = crq->cfq_queue;
1302 1869
1303 if (crq->in_flight) { 1870 if (cfq_crq_in_flight(crq)) {
1304 WARN_ON(!cfqq->in_flight); 1871 const int sync = cfq_crq_is_sync(crq);
1305 cfqq->in_flight--; 1872
1873 WARN_ON(!cfqq->on_dispatch[sync]);
1874 cfqq->on_dispatch[sync]--;
1306 } 1875 }
1307 1876
1308 cfq_account_completion(cfqq, crq); 1877 cfq_account_completion(cfqq, crq);
@@ -1332,51 +1901,136 @@ cfq_latter_request(request_queue_t *q, struct request *rq)
1332 return NULL; 1901 return NULL;
1333} 1902}
1334 1903
1335static int cfq_may_queue(request_queue_t *q, int rw) 1904/*
1905 * we temporarily boost lower priority queues if they are holding fs exclusive
1906 * resources. they are boosted to normal prio (CLASS_BE/4)
1907 */
1908static void cfq_prio_boost(struct cfq_queue *cfqq)
1336{ 1909{
1337 struct cfq_data *cfqd = q->elevator->elevator_data; 1910 const int ioprio_class = cfqq->ioprio_class;
1338 struct cfq_queue *cfqq; 1911 const int ioprio = cfqq->ioprio;
1339 int ret = ELV_MQUEUE_MAY;
1340 1912
1341 if (current->flags & PF_MEMALLOC) 1913 if (has_fs_excl()) {
1342 return ELV_MQUEUE_MAY; 1914 /*
1915 * boost idle prio on transactions that would lock out other
1916 * users of the filesystem
1917 */
1918 if (cfq_class_idle(cfqq))
1919 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1920 if (cfqq->ioprio > IOPRIO_NORM)
1921 cfqq->ioprio = IOPRIO_NORM;
1922 } else {
1923 /*
1924 * check if we need to unboost the queue
1925 */
1926 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1927 cfqq->ioprio_class = cfqq->org_ioprio_class;
1928 if (cfqq->ioprio != cfqq->org_ioprio)
1929 cfqq->ioprio = cfqq->org_ioprio;
1930 }
1343 1931
1344 cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(cfqd, current)); 1932 /*
1345 if (cfqq) { 1933 * refile between round-robin lists if we moved the priority class
1346 int limit = cfqd->max_queued; 1934 */
1935 if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&
1936 cfq_cfqq_on_rr(cfqq))
1937 cfq_resort_rr_list(cfqq, 0);
1938}
1347 1939
1348 if (cfqq->allocated[rw] < cfqd->cfq_queued) 1940static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
1349 return ELV_MQUEUE_MUST; 1941{
1942 if (rw == READ || process_sync(task))
1943 return task->pid;
1350 1944
1351 if (cfqd->busy_queues) 1945 return CFQ_KEY_ASYNC;
1352 limit = q->nr_requests / cfqd->busy_queues; 1946}
1353 1947
1354 if (limit < cfqd->cfq_queued) 1948static inline int
1355 limit = cfqd->cfq_queued; 1949__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1356 else if (limit > cfqd->max_queued) 1950 struct task_struct *task, int rw)
1357 limit = cfqd->max_queued; 1951{
1952#if 1
1953 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1954 !cfq_cfqq_must_alloc_slice(cfqq)) {
1955 cfq_mark_cfqq_must_alloc_slice(cfqq);
1956 return ELV_MQUEUE_MUST;
1957 }
1358 1958
1359 if (cfqq->allocated[rw] >= limit) { 1959 return ELV_MQUEUE_MAY;
1360 if (limit > cfqq->alloc_limit[rw]) 1960#else
1361 cfqq->alloc_limit[rw] = limit; 1961 if (!cfqq || task->flags & PF_MEMALLOC)
1962 return ELV_MQUEUE_MAY;
1963 if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
1964 if (cfq_cfqq_wait_request(cfqq))
1965 return ELV_MQUEUE_MUST;
1362 1966
1363 ret = ELV_MQUEUE_NO; 1967 /*
1968 * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
1969 * can quickly flood the queue with writes from a single task
1970 */
1971 if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
1972 cfq_mark_cfqq_must_alloc_slice(cfqq);
1973 return ELV_MQUEUE_MUST;
1364 } 1974 }
1975
1976 return ELV_MQUEUE_MAY;
1365 } 1977 }
1978 if (cfq_class_idle(cfqq))
1979 return ELV_MQUEUE_NO;
1980 if (cfqq->allocated[rw] >= cfqd->max_queued) {
1981 struct io_context *ioc = get_io_context(GFP_ATOMIC);
1982 int ret = ELV_MQUEUE_NO;
1366 1983
1367 return ret; 1984 if (ioc && ioc->nr_batch_requests)
1985 ret = ELV_MQUEUE_MAY;
1986
1987 put_io_context(ioc);
1988 return ret;
1989 }
1990
1991 return ELV_MQUEUE_MAY;
1992#endif
1993}
1994
1995static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
1996{
1997 struct cfq_data *cfqd = q->elevator->elevator_data;
1998 struct task_struct *tsk = current;
1999 struct cfq_queue *cfqq;
2000
2001 /*
2002 * don't force setup of a queue from here, as a call to may_queue
2003 * does not necessarily imply that a request actually will be queued.
2004 * so just lookup a possibly existing queue, or return 'may queue'
2005 * if that fails
2006 */
2007 cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
2008 if (cfqq) {
2009 cfq_init_prio_data(cfqq);
2010 cfq_prio_boost(cfqq);
2011
2012 return __cfq_may_queue(cfqd, cfqq, tsk, rw);
2013 }
2014
2015 return ELV_MQUEUE_MAY;
1368} 2016}
1369 2017
1370static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) 2018static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
1371{ 2019{
2020 struct cfq_data *cfqd = q->elevator->elevator_data;
1372 struct request_list *rl = &q->rq; 2021 struct request_list *rl = &q->rq;
1373 const int write = waitqueue_active(&rl->wait[WRITE]);
1374 const int read = waitqueue_active(&rl->wait[READ]);
1375 2022
1376 if (read && cfqq->allocated[READ] < cfqq->alloc_limit[READ]) 2023 if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {
1377 wake_up(&rl->wait[READ]); 2024 smp_mb();
1378 if (write && cfqq->allocated[WRITE] < cfqq->alloc_limit[WRITE]) 2025 if (waitqueue_active(&rl->wait[READ]))
1379 wake_up(&rl->wait[WRITE]); 2026 wake_up(&rl->wait[READ]);
2027 }
2028
2029 if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
2030 smp_mb();
2031 if (waitqueue_active(&rl->wait[WRITE]))
2032 wake_up(&rl->wait[WRITE]);
2033 }
1380} 2034}
1381 2035
1382/* 2036/*
@@ -1389,69 +2043,61 @@ static void cfq_put_request(request_queue_t *q, struct request *rq)
1389 2043
1390 if (crq) { 2044 if (crq) {
1391 struct cfq_queue *cfqq = crq->cfq_queue; 2045 struct cfq_queue *cfqq = crq->cfq_queue;
2046 const int rw = rq_data_dir(rq);
1392 2047
1393 BUG_ON(q->last_merge == rq); 2048 BUG_ON(!cfqq->allocated[rw]);
1394 BUG_ON(!hlist_unhashed(&crq->hash)); 2049 cfqq->allocated[rw]--;
1395 2050
1396 if (crq->io_context) 2051 put_io_context(crq->io_context->ioc);
1397 put_io_context(crq->io_context->ioc);
1398
1399 BUG_ON(!cfqq->allocated[crq->is_write]);
1400 cfqq->allocated[crq->is_write]--;
1401 2052
1402 mempool_free(crq, cfqd->crq_pool); 2053 mempool_free(crq, cfqd->crq_pool);
1403 rq->elevator_private = NULL; 2054 rq->elevator_private = NULL;
1404 2055
1405 smp_mb();
1406 cfq_check_waiters(q, cfqq); 2056 cfq_check_waiters(q, cfqq);
1407 cfq_put_queue(cfqq); 2057 cfq_put_queue(cfqq);
1408 } 2058 }
1409} 2059}
1410 2060
1411/* 2061/*
1412 * Allocate cfq data structures associated with this request. A queue and 2062 * Allocate cfq data structures associated with this request.
1413 */ 2063 */
1414static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask) 2064static int
2065cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
2066 int gfp_mask)
1415{ 2067{
1416 struct cfq_data *cfqd = q->elevator->elevator_data; 2068 struct cfq_data *cfqd = q->elevator->elevator_data;
2069 struct task_struct *tsk = current;
1417 struct cfq_io_context *cic; 2070 struct cfq_io_context *cic;
1418 const int rw = rq_data_dir(rq); 2071 const int rw = rq_data_dir(rq);
1419 struct cfq_queue *cfqq, *saved_cfqq; 2072 pid_t key = cfq_queue_pid(tsk, rw);
2073 struct cfq_queue *cfqq;
1420 struct cfq_rq *crq; 2074 struct cfq_rq *crq;
1421 unsigned long flags; 2075 unsigned long flags;
1422 2076
1423 might_sleep_if(gfp_mask & __GFP_WAIT); 2077 might_sleep_if(gfp_mask & __GFP_WAIT);
1424 2078
2079 cic = cfq_get_io_context(cfqd, key, gfp_mask);
2080
1425 spin_lock_irqsave(q->queue_lock, flags); 2081 spin_lock_irqsave(q->queue_lock, flags);
1426 2082
1427 cfqq = __cfq_get_queue(cfqd, cfq_hash_key(cfqd, current), gfp_mask); 2083 if (!cic)
1428 if (!cfqq) 2084 goto queue_fail;
1429 goto out_lock; 2085
2086 if (!cic->cfqq) {
2087 cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask);
2088 if (!cfqq)
2089 goto queue_fail;
1430 2090
1431repeat: 2091 cic->cfqq = cfqq;
1432 if (cfqq->allocated[rw] >= cfqd->max_queued) 2092 } else
1433 goto out_lock; 2093 cfqq = cic->cfqq;
1434 2094
1435 cfqq->allocated[rw]++; 2095 cfqq->allocated[rw]++;
2096 cfq_clear_cfqq_must_alloc(cfqq);
2097 cfqd->rq_starved = 0;
2098 atomic_inc(&cfqq->ref);
1436 spin_unlock_irqrestore(q->queue_lock, flags); 2099 spin_unlock_irqrestore(q->queue_lock, flags);
1437 2100
1438 /*
1439 * if hashing type has changed, the cfq_queue might change here.
1440 */
1441 saved_cfqq = cfqq;
1442 cic = cfq_get_io_context(&cfqq, gfp_mask);
1443 if (!cic)
1444 goto err;
1445
1446 /*
1447 * repeat allocation checks on queue change
1448 */
1449 if (unlikely(saved_cfqq != cfqq)) {
1450 spin_lock_irqsave(q->queue_lock, flags);
1451 saved_cfqq->allocated[rw]--;
1452 goto repeat;
1453 }
1454
1455 crq = mempool_alloc(cfqd->crq_pool, gfp_mask); 2101 crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
1456 if (crq) { 2102 if (crq) {
1457 RB_CLEAR(&crq->rb_node); 2103 RB_CLEAR(&crq->rb_node);
@@ -1460,24 +2106,141 @@ repeat:
1460 INIT_HLIST_NODE(&crq->hash); 2106 INIT_HLIST_NODE(&crq->hash);
1461 crq->cfq_queue = cfqq; 2107 crq->cfq_queue = cfqq;
1462 crq->io_context = cic; 2108 crq->io_context = cic;
1463 crq->service_start = crq->queue_start = 0; 2109 cfq_clear_crq_in_flight(crq);
1464 crq->in_flight = crq->accounted = crq->is_sync = 0; 2110 cfq_clear_crq_in_driver(crq);
1465 crq->is_write = rw; 2111 cfq_clear_crq_requeued(crq);
2112
2113 if (rw == READ || process_sync(tsk))
2114 cfq_mark_crq_is_sync(crq);
2115 else
2116 cfq_clear_crq_is_sync(crq);
2117
1466 rq->elevator_private = crq; 2118 rq->elevator_private = crq;
1467 cfqq->alloc_limit[rw] = 0;
1468 return 0; 2119 return 0;
1469 } 2120 }
1470 2121
1471 put_io_context(cic->ioc);
1472err:
1473 spin_lock_irqsave(q->queue_lock, flags); 2122 spin_lock_irqsave(q->queue_lock, flags);
1474 cfqq->allocated[rw]--; 2123 cfqq->allocated[rw]--;
2124 if (!(cfqq->allocated[0] + cfqq->allocated[1]))
2125 cfq_mark_cfqq_must_alloc(cfqq);
1475 cfq_put_queue(cfqq); 2126 cfq_put_queue(cfqq);
1476out_lock: 2127queue_fail:
2128 if (cic)
2129 put_io_context(cic->ioc);
2130 /*
2131 * mark us rq allocation starved. we need to kickstart the process
2132 * ourselves if there are no pending requests that can do it for us.
2133 * that would be an extremely rare OOM situation
2134 */
2135 cfqd->rq_starved = 1;
2136 cfq_schedule_dispatch(cfqd);
1477 spin_unlock_irqrestore(q->queue_lock, flags); 2137 spin_unlock_irqrestore(q->queue_lock, flags);
1478 return 1; 2138 return 1;
1479} 2139}
1480 2140
2141static void cfq_kick_queue(void *data)
2142{
2143 request_queue_t *q = data;
2144 struct cfq_data *cfqd = q->elevator->elevator_data;
2145 unsigned long flags;
2146
2147 spin_lock_irqsave(q->queue_lock, flags);
2148
2149 if (cfqd->rq_starved) {
2150 struct request_list *rl = &q->rq;
2151
2152 /*
2153 * we aren't guaranteed to get a request after this, but we
2154 * have to be opportunistic
2155 */
2156 smp_mb();
2157 if (waitqueue_active(&rl->wait[READ]))
2158 wake_up(&rl->wait[READ]);
2159 if (waitqueue_active(&rl->wait[WRITE]))
2160 wake_up(&rl->wait[WRITE]);
2161 }
2162
2163 blk_remove_plug(q);
2164 q->request_fn(q);
2165 spin_unlock_irqrestore(q->queue_lock, flags);
2166}
2167
2168/*
2169 * Timer running if the active_queue is currently idling inside its time slice
2170 */
2171static void cfq_idle_slice_timer(unsigned long data)
2172{
2173 struct cfq_data *cfqd = (struct cfq_data *) data;
2174 struct cfq_queue *cfqq;
2175 unsigned long flags;
2176
2177 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2178
2179 if ((cfqq = cfqd->active_queue) != NULL) {
2180 unsigned long now = jiffies;
2181
2182 /*
2183 * expired
2184 */
2185 if (time_after(now, cfqq->slice_end))
2186 goto expire;
2187
2188 /*
2189 * only expire and reinvoke request handler, if there are
2190 * other queues with pending requests
2191 */
2192 if (!cfq_pending_requests(cfqd)) {
2193 cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
2194 add_timer(&cfqd->idle_slice_timer);
2195 goto out_cont;
2196 }
2197
2198 /*
2199 * not expired and it has a request pending, let it dispatch
2200 */
2201 if (!RB_EMPTY(&cfqq->sort_list)) {
2202 cfq_mark_cfqq_must_dispatch(cfqq);
2203 goto out_kick;
2204 }
2205 }
2206expire:
2207 cfq_slice_expired(cfqd, 0);
2208out_kick:
2209 cfq_schedule_dispatch(cfqd);
2210out_cont:
2211 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2212}
2213
2214/*
2215 * Timer running if an idle class queue is waiting for service
2216 */
2217static void cfq_idle_class_timer(unsigned long data)
2218{
2219 struct cfq_data *cfqd = (struct cfq_data *) data;
2220 unsigned long flags, end;
2221
2222 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2223
2224 /*
2225 * race with a non-idle queue, reset timer
2226 */
2227 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
2228 if (!time_after_eq(jiffies, end)) {
2229 cfqd->idle_class_timer.expires = end;
2230 add_timer(&cfqd->idle_class_timer);
2231 } else
2232 cfq_schedule_dispatch(cfqd);
2233
2234 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2235}
2236
2237static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2238{
2239 del_timer_sync(&cfqd->idle_slice_timer);
2240 del_timer_sync(&cfqd->idle_class_timer);
2241 blk_sync_queue(cfqd->queue);
2242}
2243
1481static void cfq_put_cfqd(struct cfq_data *cfqd) 2244static void cfq_put_cfqd(struct cfq_data *cfqd)
1482{ 2245{
1483 request_queue_t *q = cfqd->queue; 2246 request_queue_t *q = cfqd->queue;
@@ -1487,6 +2250,9 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
1487 2250
1488 blk_put_queue(q); 2251 blk_put_queue(q);
1489 2252
2253 cfq_shutdown_timer_wq(cfqd);
2254 q->elevator->elevator_data = NULL;
2255
1490 mempool_destroy(cfqd->crq_pool); 2256 mempool_destroy(cfqd->crq_pool);
1491 kfree(cfqd->crq_hash); 2257 kfree(cfqd->crq_hash);
1492 kfree(cfqd->cfq_hash); 2258 kfree(cfqd->cfq_hash);
@@ -1495,7 +2261,10 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
1495 2261
1496static void cfq_exit_queue(elevator_t *e) 2262static void cfq_exit_queue(elevator_t *e)
1497{ 2263{
1498 cfq_put_cfqd(e->elevator_data); 2264 struct cfq_data *cfqd = e->elevator_data;
2265
2266 cfq_shutdown_timer_wq(cfqd);
2267 cfq_put_cfqd(cfqd);
1499} 2268}
1500 2269
1501static int cfq_init_queue(request_queue_t *q, elevator_t *e) 2270static int cfq_init_queue(request_queue_t *q, elevator_t *e)
@@ -1508,7 +2277,13 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
1508 return -ENOMEM; 2277 return -ENOMEM;
1509 2278
1510 memset(cfqd, 0, sizeof(*cfqd)); 2279 memset(cfqd, 0, sizeof(*cfqd));
1511 INIT_LIST_HEAD(&cfqd->rr_list); 2280
2281 for (i = 0; i < CFQ_PRIO_LISTS; i++)
2282 INIT_LIST_HEAD(&cfqd->rr_list[i]);
2283
2284 INIT_LIST_HEAD(&cfqd->busy_rr);
2285 INIT_LIST_HEAD(&cfqd->cur_rr);
2286 INIT_LIST_HEAD(&cfqd->idle_rr);
1512 INIT_LIST_HEAD(&cfqd->empty_list); 2287 INIT_LIST_HEAD(&cfqd->empty_list);
1513 2288
1514 cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); 2289 cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
@@ -1533,24 +2308,32 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
1533 cfqd->queue = q; 2308 cfqd->queue = q;
1534 atomic_inc(&q->refcnt); 2309 atomic_inc(&q->refcnt);
1535 2310
1536 /* 2311 cfqd->max_queued = q->nr_requests / 4;
1537 * just set it to some high value, we want anyone to be able to queue
1538 * some requests. fairness is handled differently
1539 */
1540 q->nr_requests = 1024;
1541 cfqd->max_queued = q->nr_requests / 16;
1542 q->nr_batching = cfq_queued; 2312 q->nr_batching = cfq_queued;
1543 cfqd->key_type = CFQ_KEY_TGID; 2313
1544 cfqd->find_best_crq = 1; 2314 init_timer(&cfqd->idle_slice_timer);
2315 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2316 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2317
2318 init_timer(&cfqd->idle_class_timer);
2319 cfqd->idle_class_timer.function = cfq_idle_class_timer;
2320 cfqd->idle_class_timer.data = (unsigned long) cfqd;
2321
2322 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
2323
1545 atomic_set(&cfqd->ref, 1); 2324 atomic_set(&cfqd->ref, 1);
1546 2325
1547 cfqd->cfq_queued = cfq_queued; 2326 cfqd->cfq_queued = cfq_queued;
1548 cfqd->cfq_quantum = cfq_quantum; 2327 cfqd->cfq_quantum = cfq_quantum;
1549 cfqd->cfq_fifo_expire_r = cfq_fifo_expire_r; 2328 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
1550 cfqd->cfq_fifo_expire_w = cfq_fifo_expire_w; 2329 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1551 cfqd->cfq_fifo_batch_expire = cfq_fifo_rate;
1552 cfqd->cfq_back_max = cfq_back_max; 2330 cfqd->cfq_back_max = cfq_back_max;
1553 cfqd->cfq_back_penalty = cfq_back_penalty; 2331 cfqd->cfq_back_penalty = cfq_back_penalty;
2332 cfqd->cfq_slice[0] = cfq_slice_async;
2333 cfqd->cfq_slice[1] = cfq_slice_sync;
2334 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2335 cfqd->cfq_slice_idle = cfq_slice_idle;
2336 cfqd->cfq_max_depth = cfq_max_depth;
1554 2337
1555 return 0; 2338 return 0;
1556out_crqpool: 2339out_crqpool:
@@ -1595,7 +2378,6 @@ fail:
1595 return -ENOMEM; 2378 return -ENOMEM;
1596} 2379}
1597 2380
1598
1599/* 2381/*
1600 * sysfs parts below --> 2382 * sysfs parts below -->
1601 */ 2383 */
@@ -1620,45 +2402,6 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
1620 return count; 2402 return count;
1621} 2403}
1622 2404
1623static ssize_t
1624cfq_clear_elapsed(struct cfq_data *cfqd, const char *page, size_t count)
1625{
1626 max_elapsed_dispatch = max_elapsed_crq = 0;
1627 return count;
1628}
1629
1630static ssize_t
1631cfq_set_key_type(struct cfq_data *cfqd, const char *page, size_t count)
1632{
1633 spin_lock_irq(cfqd->queue->queue_lock);
1634 if (!strncmp(page, "pgid", 4))
1635 cfqd->key_type = CFQ_KEY_PGID;
1636 else if (!strncmp(page, "tgid", 4))
1637 cfqd->key_type = CFQ_KEY_TGID;
1638 else if (!strncmp(page, "uid", 3))
1639 cfqd->key_type = CFQ_KEY_UID;
1640 else if (!strncmp(page, "gid", 3))
1641 cfqd->key_type = CFQ_KEY_GID;
1642 spin_unlock_irq(cfqd->queue->queue_lock);
1643 return count;
1644}
1645
1646static ssize_t
1647cfq_read_key_type(struct cfq_data *cfqd, char *page)
1648{
1649 ssize_t len = 0;
1650 int i;
1651
1652 for (i = CFQ_KEY_PGID; i < CFQ_KEY_LAST; i++) {
1653 if (cfqd->key_type == i)
1654 len += sprintf(page+len, "[%s] ", cfq_key_types[i]);
1655 else
1656 len += sprintf(page+len, "%s ", cfq_key_types[i]);
1657 }
1658 len += sprintf(page+len, "\n");
1659 return len;
1660}
1661
1662#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 2405#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
1663static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ 2406static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \
1664{ \ 2407{ \
@@ -1669,12 +2412,15 @@ static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \
1669} 2412}
1670SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); 2413SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
1671SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); 2414SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
1672SHOW_FUNCTION(cfq_fifo_expire_r_show, cfqd->cfq_fifo_expire_r, 1); 2415SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
1673SHOW_FUNCTION(cfq_fifo_expire_w_show, cfqd->cfq_fifo_expire_w, 1); 2416SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
1674SHOW_FUNCTION(cfq_fifo_batch_expire_show, cfqd->cfq_fifo_batch_expire, 1);
1675SHOW_FUNCTION(cfq_find_best_show, cfqd->find_best_crq, 0);
1676SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); 2417SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);
1677SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); 2418SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);
2419SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2420SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2421SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2422SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2423SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);
1678#undef SHOW_FUNCTION 2424#undef SHOW_FUNCTION
1679 2425
1680#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2426#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -1694,12 +2440,15 @@ static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \
1694} 2440}
1695STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2441STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
1696STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); 2442STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
1697STORE_FUNCTION(cfq_fifo_expire_r_store, &cfqd->cfq_fifo_expire_r, 1, UINT_MAX, 1); 2443STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
1698STORE_FUNCTION(cfq_fifo_expire_w_store, &cfqd->cfq_fifo_expire_w, 1, UINT_MAX, 1); 2444STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
1699STORE_FUNCTION(cfq_fifo_batch_expire_store, &cfqd->cfq_fifo_batch_expire, 0, UINT_MAX, 1);
1700STORE_FUNCTION(cfq_find_best_store, &cfqd->find_best_crq, 0, 1, 0);
1701STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2445STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
1702STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); 2446STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2447STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2448STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2449STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2450STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
2451STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
1703#undef STORE_FUNCTION 2452#undef STORE_FUNCTION
1704 2453
1705static struct cfq_fs_entry cfq_quantum_entry = { 2454static struct cfq_fs_entry cfq_quantum_entry = {
@@ -1712,25 +2461,15 @@ static struct cfq_fs_entry cfq_queued_entry = {
1712 .show = cfq_queued_show, 2461 .show = cfq_queued_show,
1713 .store = cfq_queued_store, 2462 .store = cfq_queued_store,
1714}; 2463};
1715static struct cfq_fs_entry cfq_fifo_expire_r_entry = { 2464static struct cfq_fs_entry cfq_fifo_expire_sync_entry = {
1716 .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, 2465 .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
1717 .show = cfq_fifo_expire_r_show, 2466 .show = cfq_fifo_expire_sync_show,
1718 .store = cfq_fifo_expire_r_store, 2467 .store = cfq_fifo_expire_sync_store,
1719}; 2468};
1720static struct cfq_fs_entry cfq_fifo_expire_w_entry = { 2469static struct cfq_fs_entry cfq_fifo_expire_async_entry = {
1721 .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR }, 2470 .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
1722 .show = cfq_fifo_expire_w_show, 2471 .show = cfq_fifo_expire_async_show,
1723 .store = cfq_fifo_expire_w_store, 2472 .store = cfq_fifo_expire_async_store,
1724};
1725static struct cfq_fs_entry cfq_fifo_batch_expire_entry = {
1726 .attr = {.name = "fifo_batch_expire", .mode = S_IRUGO | S_IWUSR },
1727 .show = cfq_fifo_batch_expire_show,
1728 .store = cfq_fifo_batch_expire_store,
1729};
1730static struct cfq_fs_entry cfq_find_best_entry = {
1731 .attr = {.name = "find_best_crq", .mode = S_IRUGO | S_IWUSR },
1732 .show = cfq_find_best_show,
1733 .store = cfq_find_best_store,
1734}; 2473};
1735static struct cfq_fs_entry cfq_back_max_entry = { 2474static struct cfq_fs_entry cfq_back_max_entry = {
1736 .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, 2475 .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
@@ -1742,27 +2481,44 @@ static struct cfq_fs_entry cfq_back_penalty_entry = {
1742 .show = cfq_back_penalty_show, 2481 .show = cfq_back_penalty_show,
1743 .store = cfq_back_penalty_store, 2482 .store = cfq_back_penalty_store,
1744}; 2483};
1745static struct cfq_fs_entry cfq_clear_elapsed_entry = { 2484static struct cfq_fs_entry cfq_slice_sync_entry = {
1746 .attr = {.name = "clear_elapsed", .mode = S_IWUSR }, 2485 .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR },
1747 .store = cfq_clear_elapsed, 2486 .show = cfq_slice_sync_show,
2487 .store = cfq_slice_sync_store,
2488};
2489static struct cfq_fs_entry cfq_slice_async_entry = {
2490 .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR },
2491 .show = cfq_slice_async_show,
2492 .store = cfq_slice_async_store,
1748}; 2493};
1749static struct cfq_fs_entry cfq_key_type_entry = { 2494static struct cfq_fs_entry cfq_slice_async_rq_entry = {
1750 .attr = {.name = "key_type", .mode = S_IRUGO | S_IWUSR }, 2495 .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR },
1751 .show = cfq_read_key_type, 2496 .show = cfq_slice_async_rq_show,
1752 .store = cfq_set_key_type, 2497 .store = cfq_slice_async_rq_store,
2498};
2499static struct cfq_fs_entry cfq_slice_idle_entry = {
2500 .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR },
2501 .show = cfq_slice_idle_show,
2502 .store = cfq_slice_idle_store,
2503};
2504static struct cfq_fs_entry cfq_max_depth_entry = {
2505 .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR },
2506 .show = cfq_max_depth_show,
2507 .store = cfq_max_depth_store,
1753}; 2508};
1754 2509
1755static struct attribute *default_attrs[] = { 2510static struct attribute *default_attrs[] = {
1756 &cfq_quantum_entry.attr, 2511 &cfq_quantum_entry.attr,
1757 &cfq_queued_entry.attr, 2512 &cfq_queued_entry.attr,
1758 &cfq_fifo_expire_r_entry.attr, 2513 &cfq_fifo_expire_sync_entry.attr,
1759 &cfq_fifo_expire_w_entry.attr, 2514 &cfq_fifo_expire_async_entry.attr,
1760 &cfq_fifo_batch_expire_entry.attr,
1761 &cfq_key_type_entry.attr,
1762 &cfq_find_best_entry.attr,
1763 &cfq_back_max_entry.attr, 2515 &cfq_back_max_entry.attr,
1764 &cfq_back_penalty_entry.attr, 2516 &cfq_back_penalty_entry.attr,
1765 &cfq_clear_elapsed_entry.attr, 2517 &cfq_slice_sync_entry.attr,
2518 &cfq_slice_async_entry.attr,
2519 &cfq_slice_async_rq_entry.attr,
2520 &cfq_slice_idle_entry.attr,
2521 &cfq_max_depth_entry.attr,
1766 NULL, 2522 NULL,
1767}; 2523};
1768 2524
@@ -1832,21 +2588,46 @@ static int __init cfq_init(void)
1832{ 2588{
1833 int ret; 2589 int ret;
1834 2590
2591 /*
2592 * could be 0 on HZ < 1000 setups
2593 */
2594 if (!cfq_slice_async)
2595 cfq_slice_async = 1;
2596 if (!cfq_slice_idle)
2597 cfq_slice_idle = 1;
2598
1835 if (cfq_slab_setup()) 2599 if (cfq_slab_setup())
1836 return -ENOMEM; 2600 return -ENOMEM;
1837 2601
1838 ret = elv_register(&iosched_cfq); 2602 ret = elv_register(&iosched_cfq);
1839 if (!ret) { 2603 if (ret)
1840 __module_get(THIS_MODULE); 2604 cfq_slab_kill();
1841 return 0;
1842 }
1843 2605
1844 cfq_slab_kill();
1845 return ret; 2606 return ret;
1846} 2607}
1847 2608
1848static void __exit cfq_exit(void) 2609static void __exit cfq_exit(void)
1849{ 2610{
2611 struct task_struct *g, *p;
2612 unsigned long flags;
2613
2614 read_lock_irqsave(&tasklist_lock, flags);
2615
2616 /*
2617 * iterate each process in the system, removing our io_context
2618 */
2619 do_each_thread(g, p) {
2620 struct io_context *ioc = p->io_context;
2621
2622 if (ioc && ioc->cic) {
2623 ioc->cic->exit(ioc->cic);
2624 cfq_free_io_context(ioc->cic);
2625 ioc->cic = NULL;
2626 }
2627 } while_each_thread(g, p);
2628
2629 read_unlock_irqrestore(&tasklist_lock, flags);
2630
1850 cfq_slab_kill(); 2631 cfq_slab_kill();
1851 elv_unregister(&iosched_cfq); 2632 elv_unregister(&iosched_cfq);
1852} 2633}