diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 2464 |
1 files changed, 2464 insertions, 0 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c new file mode 100644 index 000000000000..2b64f5852bfd --- /dev/null +++ b/block/cfq-iosched.c | |||
@@ -0,0 +1,2464 @@ | |||
1 | /* | ||
2 | * linux/drivers/block/cfq-iosched.c | ||
3 | * | ||
4 | * CFQ, or complete fairness queueing, disk scheduler. | ||
5 | * | ||
6 | * Based on ideas from a previously unfinished io | ||
7 | * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. | ||
8 | * | ||
9 | * Copyright (C) 2003 Jens Axboe <axboe@suse.de> | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/blkdev.h> | ||
14 | #include <linux/elevator.h> | ||
15 | #include <linux/bio.h> | ||
16 | #include <linux/config.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/compiler.h> | ||
21 | #include <linux/hash.h> | ||
22 | #include <linux/rbtree.h> | ||
23 | #include <linux/mempool.h> | ||
24 | #include <linux/ioprio.h> | ||
25 | #include <linux/writeback.h> | ||
26 | |||
27 | /* | ||
28 | * tunables | ||
29 | */ | ||
30 | static int cfq_quantum = 4; /* max queue in one round of service */ | ||
31 | static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/ | ||
32 | static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; | ||
33 | static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ | ||
34 | static int cfq_back_penalty = 2; /* penalty of a backwards seek */ | ||
35 | |||
36 | static int cfq_slice_sync = HZ / 10; | ||
37 | static int cfq_slice_async = HZ / 25; | ||
38 | static int cfq_slice_async_rq = 2; | ||
39 | static int cfq_slice_idle = HZ / 100; | ||
40 | |||
41 | #define CFQ_IDLE_GRACE (HZ / 10) | ||
42 | #define CFQ_SLICE_SCALE (5) | ||
43 | |||
44 | #define CFQ_KEY_ASYNC (0) | ||
45 | #define CFQ_KEY_ANY (0xffff) | ||
46 | |||
47 | /* | ||
48 | * disable queueing at the driver/hardware level | ||
49 | */ | ||
50 | static int cfq_max_depth = 2; | ||
51 | |||
52 | /* | ||
53 | * for the hash of cfqq inside the cfqd | ||
54 | */ | ||
55 | #define CFQ_QHASH_SHIFT 6 | ||
56 | #define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT) | ||
57 | #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash) | ||
58 | |||
59 | /* | ||
60 | * for the hash of crq inside the cfqq | ||
61 | */ | ||
62 | #define CFQ_MHASH_SHIFT 6 | ||
63 | #define CFQ_MHASH_BLOCK(sec) ((sec) >> 3) | ||
64 | #define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT) | ||
65 | #define CFQ_MHASH_FN(sec) hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT) | ||
66 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | ||
67 | #define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash) | ||
68 | |||
69 | #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) | ||
70 | #define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) | ||
71 | |||
72 | #define RQ_DATA(rq) (rq)->elevator_private | ||
73 | |||
74 | /* | ||
75 | * rb-tree defines | ||
76 | */ | ||
77 | #define RB_NONE (2) | ||
78 | #define RB_EMPTY(node) ((node)->rb_node == NULL) | ||
79 | #define RB_CLEAR_COLOR(node) (node)->rb_color = RB_NONE | ||
80 | #define RB_CLEAR(node) do { \ | ||
81 | (node)->rb_parent = NULL; \ | ||
82 | RB_CLEAR_COLOR((node)); \ | ||
83 | (node)->rb_right = NULL; \ | ||
84 | (node)->rb_left = NULL; \ | ||
85 | } while (0) | ||
86 | #define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) | ||
87 | #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) | ||
88 | #define rq_rb_key(rq) (rq)->sector | ||
89 | |||
90 | static kmem_cache_t *crq_pool; | ||
91 | static kmem_cache_t *cfq_pool; | ||
92 | static kmem_cache_t *cfq_ioc_pool; | ||
93 | |||
94 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR | ||
95 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | ||
96 | #define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) | ||
97 | #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) | ||
98 | |||
99 | #define ASYNC (0) | ||
100 | #define SYNC (1) | ||
101 | |||
102 | #define cfq_cfqq_dispatched(cfqq) \ | ||
103 | ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC]) | ||
104 | |||
105 | #define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC) | ||
106 | |||
107 | #define cfq_cfqq_sync(cfqq) \ | ||
108 | (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC]) | ||
109 | |||
110 | /* | ||
111 | * Per block device queue structure | ||
112 | */ | ||
113 | struct cfq_data { | ||
114 | atomic_t ref; | ||
115 | request_queue_t *queue; | ||
116 | |||
117 | /* | ||
118 | * rr list of queues with requests and the count of them | ||
119 | */ | ||
120 | struct list_head rr_list[CFQ_PRIO_LISTS]; | ||
121 | struct list_head busy_rr; | ||
122 | struct list_head cur_rr; | ||
123 | struct list_head idle_rr; | ||
124 | unsigned int busy_queues; | ||
125 | |||
126 | /* | ||
127 | * non-ordered list of empty cfqq's | ||
128 | */ | ||
129 | struct list_head empty_list; | ||
130 | |||
131 | /* | ||
132 | * cfqq lookup hash | ||
133 | */ | ||
134 | struct hlist_head *cfq_hash; | ||
135 | |||
136 | /* | ||
137 | * global crq hash for all queues | ||
138 | */ | ||
139 | struct hlist_head *crq_hash; | ||
140 | |||
141 | unsigned int max_queued; | ||
142 | |||
143 | mempool_t *crq_pool; | ||
144 | |||
145 | int rq_in_driver; | ||
146 | |||
147 | /* | ||
148 | * schedule slice state info | ||
149 | */ | ||
150 | /* | ||
151 | * idle window management | ||
152 | */ | ||
153 | struct timer_list idle_slice_timer; | ||
154 | struct work_struct unplug_work; | ||
155 | |||
156 | struct cfq_queue *active_queue; | ||
157 | struct cfq_io_context *active_cic; | ||
158 | int cur_prio, cur_end_prio; | ||
159 | unsigned int dispatch_slice; | ||
160 | |||
161 | struct timer_list idle_class_timer; | ||
162 | |||
163 | sector_t last_sector; | ||
164 | unsigned long last_end_request; | ||
165 | |||
166 | unsigned int rq_starved; | ||
167 | |||
168 | /* | ||
169 | * tunables, see top of file | ||
170 | */ | ||
171 | unsigned int cfq_quantum; | ||
172 | unsigned int cfq_queued; | ||
173 | unsigned int cfq_fifo_expire[2]; | ||
174 | unsigned int cfq_back_penalty; | ||
175 | unsigned int cfq_back_max; | ||
176 | unsigned int cfq_slice[2]; | ||
177 | unsigned int cfq_slice_async_rq; | ||
178 | unsigned int cfq_slice_idle; | ||
179 | unsigned int cfq_max_depth; | ||
180 | }; | ||
181 | |||
182 | /* | ||
183 | * Per process-grouping structure | ||
184 | */ | ||
185 | struct cfq_queue { | ||
186 | /* reference count */ | ||
187 | atomic_t ref; | ||
188 | /* parent cfq_data */ | ||
189 | struct cfq_data *cfqd; | ||
190 | /* cfqq lookup hash */ | ||
191 | struct hlist_node cfq_hash; | ||
192 | /* hash key */ | ||
193 | unsigned int key; | ||
194 | /* on either rr or empty list of cfqd */ | ||
195 | struct list_head cfq_list; | ||
196 | /* sorted list of pending requests */ | ||
197 | struct rb_root sort_list; | ||
198 | /* if fifo isn't expired, next request to serve */ | ||
199 | struct cfq_rq *next_crq; | ||
200 | /* requests queued in sort_list */ | ||
201 | int queued[2]; | ||
202 | /* currently allocated requests */ | ||
203 | int allocated[2]; | ||
204 | /* fifo list of requests in sort_list */ | ||
205 | struct list_head fifo; | ||
206 | |||
207 | unsigned long slice_start; | ||
208 | unsigned long slice_end; | ||
209 | unsigned long slice_left; | ||
210 | unsigned long service_last; | ||
211 | |||
212 | /* number of requests that are on the dispatch list */ | ||
213 | int on_dispatch[2]; | ||
214 | |||
215 | /* io prio of this group */ | ||
216 | unsigned short ioprio, org_ioprio; | ||
217 | unsigned short ioprio_class, org_ioprio_class; | ||
218 | |||
219 | /* various state flags, see below */ | ||
220 | unsigned int flags; | ||
221 | }; | ||
222 | |||
223 | struct cfq_rq { | ||
224 | struct rb_node rb_node; | ||
225 | sector_t rb_key; | ||
226 | struct request *request; | ||
227 | struct hlist_node hash; | ||
228 | |||
229 | struct cfq_queue *cfq_queue; | ||
230 | struct cfq_io_context *io_context; | ||
231 | |||
232 | unsigned int crq_flags; | ||
233 | }; | ||
234 | |||
235 | enum cfqq_state_flags { | ||
236 | CFQ_CFQQ_FLAG_on_rr = 0, | ||
237 | CFQ_CFQQ_FLAG_wait_request, | ||
238 | CFQ_CFQQ_FLAG_must_alloc, | ||
239 | CFQ_CFQQ_FLAG_must_alloc_slice, | ||
240 | CFQ_CFQQ_FLAG_must_dispatch, | ||
241 | CFQ_CFQQ_FLAG_fifo_expire, | ||
242 | CFQ_CFQQ_FLAG_idle_window, | ||
243 | CFQ_CFQQ_FLAG_prio_changed, | ||
244 | CFQ_CFQQ_FLAG_expired, | ||
245 | }; | ||
246 | |||
247 | #define CFQ_CFQQ_FNS(name) \ | ||
248 | static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ | ||
249 | { \ | ||
250 | cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ | ||
251 | } \ | ||
252 | static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ | ||
253 | { \ | ||
254 | cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ | ||
255 | } \ | ||
256 | static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ | ||
257 | { \ | ||
258 | return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ | ||
259 | } | ||
260 | |||
261 | CFQ_CFQQ_FNS(on_rr); | ||
262 | CFQ_CFQQ_FNS(wait_request); | ||
263 | CFQ_CFQQ_FNS(must_alloc); | ||
264 | CFQ_CFQQ_FNS(must_alloc_slice); | ||
265 | CFQ_CFQQ_FNS(must_dispatch); | ||
266 | CFQ_CFQQ_FNS(fifo_expire); | ||
267 | CFQ_CFQQ_FNS(idle_window); | ||
268 | CFQ_CFQQ_FNS(prio_changed); | ||
269 | CFQ_CFQQ_FNS(expired); | ||
270 | #undef CFQ_CFQQ_FNS | ||
271 | |||
272 | enum cfq_rq_state_flags { | ||
273 | CFQ_CRQ_FLAG_is_sync = 0, | ||
274 | }; | ||
275 | |||
276 | #define CFQ_CRQ_FNS(name) \ | ||
277 | static inline void cfq_mark_crq_##name(struct cfq_rq *crq) \ | ||
278 | { \ | ||
279 | crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name); \ | ||
280 | } \ | ||
281 | static inline void cfq_clear_crq_##name(struct cfq_rq *crq) \ | ||
282 | { \ | ||
283 | crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name); \ | ||
284 | } \ | ||
285 | static inline int cfq_crq_##name(const struct cfq_rq *crq) \ | ||
286 | { \ | ||
287 | return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \ | ||
288 | } | ||
289 | |||
290 | CFQ_CRQ_FNS(is_sync); | ||
291 | #undef CFQ_CRQ_FNS | ||
292 | |||
293 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); | ||
294 | static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); | ||
295 | static void cfq_put_cfqd(struct cfq_data *cfqd); | ||
296 | |||
297 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) | ||
298 | |||
299 | /* | ||
300 | * lots of deadline iosched dupes, can be abstracted later... | ||
301 | */ | ||
302 | static inline void cfq_del_crq_hash(struct cfq_rq *crq) | ||
303 | { | ||
304 | hlist_del_init(&crq->hash); | ||
305 | } | ||
306 | |||
307 | static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) | ||
308 | { | ||
309 | const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); | ||
310 | |||
311 | hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]); | ||
312 | } | ||
313 | |||
314 | static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset) | ||
315 | { | ||
316 | struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)]; | ||
317 | struct hlist_node *entry, *next; | ||
318 | |||
319 | hlist_for_each_safe(entry, next, hash_list) { | ||
320 | struct cfq_rq *crq = list_entry_hash(entry); | ||
321 | struct request *__rq = crq->request; | ||
322 | |||
323 | if (!rq_mergeable(__rq)) { | ||
324 | cfq_del_crq_hash(crq); | ||
325 | continue; | ||
326 | } | ||
327 | |||
328 | if (rq_hash_key(__rq) == offset) | ||
329 | return __rq; | ||
330 | } | ||
331 | |||
332 | return NULL; | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * scheduler run of queue, if there are requests pending and no one in the | ||
337 | * driver that will restart queueing | ||
338 | */ | ||
339 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | ||
340 | { | ||
341 | if (!cfqd->rq_in_driver && cfqd->busy_queues) | ||
342 | kblockd_schedule_work(&cfqd->unplug_work); | ||
343 | } | ||
344 | |||
345 | static int cfq_queue_empty(request_queue_t *q) | ||
346 | { | ||
347 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
348 | |||
349 | return !cfqd->busy_queues; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Lifted from AS - choose which of crq1 and crq2 that is best served now. | ||
354 | * We choose the request that is closest to the head right now. Distance | ||
355 | * behind the head are penalized and only allowed to a certain extent. | ||
356 | */ | ||
357 | static struct cfq_rq * | ||
358 | cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) | ||
359 | { | ||
360 | sector_t last, s1, s2, d1 = 0, d2 = 0; | ||
361 | int r1_wrap = 0, r2_wrap = 0; /* requests are behind the disk head */ | ||
362 | unsigned long back_max; | ||
363 | |||
364 | if (crq1 == NULL || crq1 == crq2) | ||
365 | return crq2; | ||
366 | if (crq2 == NULL) | ||
367 | return crq1; | ||
368 | |||
369 | if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2)) | ||
370 | return crq1; | ||
371 | else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1)) | ||
372 | return crq2; | ||
373 | |||
374 | s1 = crq1->request->sector; | ||
375 | s2 = crq2->request->sector; | ||
376 | |||
377 | last = cfqd->last_sector; | ||
378 | |||
379 | /* | ||
380 | * by definition, 1KiB is 2 sectors | ||
381 | */ | ||
382 | back_max = cfqd->cfq_back_max * 2; | ||
383 | |||
384 | /* | ||
385 | * Strict one way elevator _except_ in the case where we allow | ||
386 | * short backward seeks which are biased as twice the cost of a | ||
387 | * similar forward seek. | ||
388 | */ | ||
389 | if (s1 >= last) | ||
390 | d1 = s1 - last; | ||
391 | else if (s1 + back_max >= last) | ||
392 | d1 = (last - s1) * cfqd->cfq_back_penalty; | ||
393 | else | ||
394 | r1_wrap = 1; | ||
395 | |||
396 | if (s2 >= last) | ||
397 | d2 = s2 - last; | ||
398 | else if (s2 + back_max >= last) | ||
399 | d2 = (last - s2) * cfqd->cfq_back_penalty; | ||
400 | else | ||
401 | r2_wrap = 1; | ||
402 | |||
403 | /* Found required data */ | ||
404 | if (!r1_wrap && r2_wrap) | ||
405 | return crq1; | ||
406 | else if (!r2_wrap && r1_wrap) | ||
407 | return crq2; | ||
408 | else if (r1_wrap && r2_wrap) { | ||
409 | /* both behind the head */ | ||
410 | if (s1 <= s2) | ||
411 | return crq1; | ||
412 | else | ||
413 | return crq2; | ||
414 | } | ||
415 | |||
416 | /* Both requests in front of the head */ | ||
417 | if (d1 < d2) | ||
418 | return crq1; | ||
419 | else if (d2 < d1) | ||
420 | return crq2; | ||
421 | else { | ||
422 | if (s1 >= s2) | ||
423 | return crq1; | ||
424 | else | ||
425 | return crq2; | ||
426 | } | ||
427 | } | ||
428 | |||
429 | /* | ||
430 | * would be nice to take fifo expire time into account as well | ||
431 | */ | ||
432 | static struct cfq_rq * | ||
433 | cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
434 | struct cfq_rq *last) | ||
435 | { | ||
436 | struct cfq_rq *crq_next = NULL, *crq_prev = NULL; | ||
437 | struct rb_node *rbnext, *rbprev; | ||
438 | |||
439 | if (!(rbnext = rb_next(&last->rb_node))) { | ||
440 | rbnext = rb_first(&cfqq->sort_list); | ||
441 | if (rbnext == &last->rb_node) | ||
442 | rbnext = NULL; | ||
443 | } | ||
444 | |||
445 | rbprev = rb_prev(&last->rb_node); | ||
446 | |||
447 | if (rbprev) | ||
448 | crq_prev = rb_entry_crq(rbprev); | ||
449 | if (rbnext) | ||
450 | crq_next = rb_entry_crq(rbnext); | ||
451 | |||
452 | return cfq_choose_req(cfqd, crq_next, crq_prev); | ||
453 | } | ||
454 | |||
455 | static void cfq_update_next_crq(struct cfq_rq *crq) | ||
456 | { | ||
457 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
458 | |||
459 | if (cfqq->next_crq == crq) | ||
460 | cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); | ||
461 | } | ||
462 | |||
463 | static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | ||
464 | { | ||
465 | struct cfq_data *cfqd = cfqq->cfqd; | ||
466 | struct list_head *list, *entry; | ||
467 | |||
468 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); | ||
469 | |||
470 | list_del(&cfqq->cfq_list); | ||
471 | |||
472 | if (cfq_class_rt(cfqq)) | ||
473 | list = &cfqd->cur_rr; | ||
474 | else if (cfq_class_idle(cfqq)) | ||
475 | list = &cfqd->idle_rr; | ||
476 | else { | ||
477 | /* | ||
478 | * if cfqq has requests in flight, don't allow it to be | ||
479 | * found in cfq_set_active_queue before it has finished them. | ||
480 | * this is done to increase fairness between a process that | ||
481 | * has lots of io pending vs one that only generates one | ||
482 | * sporadically or synchronously | ||
483 | */ | ||
484 | if (cfq_cfqq_dispatched(cfqq)) | ||
485 | list = &cfqd->busy_rr; | ||
486 | else | ||
487 | list = &cfqd->rr_list[cfqq->ioprio]; | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * if queue was preempted, just add to front to be fair. busy_rr | ||
492 | * isn't sorted. | ||
493 | */ | ||
494 | if (preempted || list == &cfqd->busy_rr) { | ||
495 | list_add(&cfqq->cfq_list, list); | ||
496 | return; | ||
497 | } | ||
498 | |||
499 | /* | ||
500 | * sort by when queue was last serviced | ||
501 | */ | ||
502 | entry = list; | ||
503 | while ((entry = entry->prev) != list) { | ||
504 | struct cfq_queue *__cfqq = list_entry_cfqq(entry); | ||
505 | |||
506 | if (!__cfqq->service_last) | ||
507 | break; | ||
508 | if (time_before(__cfqq->service_last, cfqq->service_last)) | ||
509 | break; | ||
510 | } | ||
511 | |||
512 | list_add(&cfqq->cfq_list, entry); | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * add to busy list of queues for service, trying to be fair in ordering | ||
517 | * the pending list according to last request service | ||
518 | */ | ||
519 | static inline void | ||
520 | cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
521 | { | ||
522 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | ||
523 | cfq_mark_cfqq_on_rr(cfqq); | ||
524 | cfqd->busy_queues++; | ||
525 | |||
526 | cfq_resort_rr_list(cfqq, 0); | ||
527 | } | ||
528 | |||
529 | static inline void | ||
530 | cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
531 | { | ||
532 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); | ||
533 | cfq_clear_cfqq_on_rr(cfqq); | ||
534 | list_move(&cfqq->cfq_list, &cfqd->empty_list); | ||
535 | |||
536 | BUG_ON(!cfqd->busy_queues); | ||
537 | cfqd->busy_queues--; | ||
538 | } | ||
539 | |||
540 | /* | ||
541 | * rb tree support functions | ||
542 | */ | ||
543 | static inline void cfq_del_crq_rb(struct cfq_rq *crq) | ||
544 | { | ||
545 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
546 | struct cfq_data *cfqd = cfqq->cfqd; | ||
547 | const int sync = cfq_crq_is_sync(crq); | ||
548 | |||
549 | BUG_ON(!cfqq->queued[sync]); | ||
550 | cfqq->queued[sync]--; | ||
551 | |||
552 | cfq_update_next_crq(crq); | ||
553 | |||
554 | rb_erase(&crq->rb_node, &cfqq->sort_list); | ||
555 | RB_CLEAR_COLOR(&crq->rb_node); | ||
556 | |||
557 | if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) | ||
558 | cfq_del_cfqq_rr(cfqd, cfqq); | ||
559 | } | ||
560 | |||
561 | static struct cfq_rq * | ||
562 | __cfq_add_crq_rb(struct cfq_rq *crq) | ||
563 | { | ||
564 | struct rb_node **p = &crq->cfq_queue->sort_list.rb_node; | ||
565 | struct rb_node *parent = NULL; | ||
566 | struct cfq_rq *__crq; | ||
567 | |||
568 | while (*p) { | ||
569 | parent = *p; | ||
570 | __crq = rb_entry_crq(parent); | ||
571 | |||
572 | if (crq->rb_key < __crq->rb_key) | ||
573 | p = &(*p)->rb_left; | ||
574 | else if (crq->rb_key > __crq->rb_key) | ||
575 | p = &(*p)->rb_right; | ||
576 | else | ||
577 | return __crq; | ||
578 | } | ||
579 | |||
580 | rb_link_node(&crq->rb_node, parent, p); | ||
581 | return NULL; | ||
582 | } | ||
583 | |||
584 | static void cfq_add_crq_rb(struct cfq_rq *crq) | ||
585 | { | ||
586 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
587 | struct cfq_data *cfqd = cfqq->cfqd; | ||
588 | struct request *rq = crq->request; | ||
589 | struct cfq_rq *__alias; | ||
590 | |||
591 | crq->rb_key = rq_rb_key(rq); | ||
592 | cfqq->queued[cfq_crq_is_sync(crq)]++; | ||
593 | |||
594 | /* | ||
595 | * looks a little odd, but the first insert might return an alias. | ||
596 | * if that happens, put the alias on the dispatch list | ||
597 | */ | ||
598 | while ((__alias = __cfq_add_crq_rb(crq)) != NULL) | ||
599 | cfq_dispatch_insert(cfqd->queue, __alias); | ||
600 | |||
601 | rb_insert_color(&crq->rb_node, &cfqq->sort_list); | ||
602 | |||
603 | if (!cfq_cfqq_on_rr(cfqq)) | ||
604 | cfq_add_cfqq_rr(cfqd, cfqq); | ||
605 | |||
606 | /* | ||
607 | * check if this request is a better next-serve candidate | ||
608 | */ | ||
609 | cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); | ||
610 | } | ||
611 | |||
612 | static inline void | ||
613 | cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) | ||
614 | { | ||
615 | rb_erase(&crq->rb_node, &cfqq->sort_list); | ||
616 | cfqq->queued[cfq_crq_is_sync(crq)]--; | ||
617 | |||
618 | cfq_add_crq_rb(crq); | ||
619 | } | ||
620 | |||
621 | static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) | ||
622 | |||
623 | { | ||
624 | struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY); | ||
625 | struct rb_node *n; | ||
626 | |||
627 | if (!cfqq) | ||
628 | goto out; | ||
629 | |||
630 | n = cfqq->sort_list.rb_node; | ||
631 | while (n) { | ||
632 | struct cfq_rq *crq = rb_entry_crq(n); | ||
633 | |||
634 | if (sector < crq->rb_key) | ||
635 | n = n->rb_left; | ||
636 | else if (sector > crq->rb_key) | ||
637 | n = n->rb_right; | ||
638 | else | ||
639 | return crq->request; | ||
640 | } | ||
641 | |||
642 | out: | ||
643 | return NULL; | ||
644 | } | ||
645 | |||
646 | static void cfq_activate_request(request_queue_t *q, struct request *rq) | ||
647 | { | ||
648 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
649 | |||
650 | cfqd->rq_in_driver++; | ||
651 | } | ||
652 | |||
653 | static void cfq_deactivate_request(request_queue_t *q, struct request *rq) | ||
654 | { | ||
655 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
656 | |||
657 | WARN_ON(!cfqd->rq_in_driver); | ||
658 | cfqd->rq_in_driver--; | ||
659 | } | ||
660 | |||
661 | static void cfq_remove_request(struct request *rq) | ||
662 | { | ||
663 | struct cfq_rq *crq = RQ_DATA(rq); | ||
664 | |||
665 | list_del_init(&rq->queuelist); | ||
666 | cfq_del_crq_rb(crq); | ||
667 | cfq_del_crq_hash(crq); | ||
668 | } | ||
669 | |||
670 | static int | ||
671 | cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) | ||
672 | { | ||
673 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
674 | struct request *__rq; | ||
675 | int ret; | ||
676 | |||
677 | __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); | ||
678 | if (__rq && elv_rq_merge_ok(__rq, bio)) { | ||
679 | ret = ELEVATOR_BACK_MERGE; | ||
680 | goto out; | ||
681 | } | ||
682 | |||
683 | __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); | ||
684 | if (__rq && elv_rq_merge_ok(__rq, bio)) { | ||
685 | ret = ELEVATOR_FRONT_MERGE; | ||
686 | goto out; | ||
687 | } | ||
688 | |||
689 | return ELEVATOR_NO_MERGE; | ||
690 | out: | ||
691 | *req = __rq; | ||
692 | return ret; | ||
693 | } | ||
694 | |||
695 | static void cfq_merged_request(request_queue_t *q, struct request *req) | ||
696 | { | ||
697 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
698 | struct cfq_rq *crq = RQ_DATA(req); | ||
699 | |||
700 | cfq_del_crq_hash(crq); | ||
701 | cfq_add_crq_hash(cfqd, crq); | ||
702 | |||
703 | if (rq_rb_key(req) != crq->rb_key) { | ||
704 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
705 | |||
706 | cfq_update_next_crq(crq); | ||
707 | cfq_reposition_crq_rb(cfqq, crq); | ||
708 | } | ||
709 | } | ||
710 | |||
711 | static void | ||
712 | cfq_merged_requests(request_queue_t *q, struct request *rq, | ||
713 | struct request *next) | ||
714 | { | ||
715 | cfq_merged_request(q, rq); | ||
716 | |||
717 | /* | ||
718 | * reposition in fifo if next is older than rq | ||
719 | */ | ||
720 | if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && | ||
721 | time_before(next->start_time, rq->start_time)) | ||
722 | list_move(&rq->queuelist, &next->queuelist); | ||
723 | |||
724 | cfq_remove_request(next); | ||
725 | } | ||
726 | |||
727 | static inline void | ||
728 | __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
729 | { | ||
730 | if (cfqq) { | ||
731 | /* | ||
732 | * stop potential idle class queues waiting service | ||
733 | */ | ||
734 | del_timer(&cfqd->idle_class_timer); | ||
735 | |||
736 | cfqq->slice_start = jiffies; | ||
737 | cfqq->slice_end = 0; | ||
738 | cfqq->slice_left = 0; | ||
739 | cfq_clear_cfqq_must_alloc_slice(cfqq); | ||
740 | cfq_clear_cfqq_fifo_expire(cfqq); | ||
741 | cfq_clear_cfqq_expired(cfqq); | ||
742 | } | ||
743 | |||
744 | cfqd->active_queue = cfqq; | ||
745 | } | ||
746 | |||
747 | /* | ||
748 | * 0 | ||
749 | * 0,1 | ||
750 | * 0,1,2 | ||
751 | * 0,1,2,3 | ||
752 | * 0,1,2,3,4 | ||
753 | * 0,1,2,3,4,5 | ||
754 | * 0,1,2,3,4,5,6 | ||
755 | * 0,1,2,3,4,5,6,7 | ||
756 | */ | ||
757 | static int cfq_get_next_prio_level(struct cfq_data *cfqd) | ||
758 | { | ||
759 | int prio, wrap; | ||
760 | |||
761 | prio = -1; | ||
762 | wrap = 0; | ||
763 | do { | ||
764 | int p; | ||
765 | |||
766 | for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) { | ||
767 | if (!list_empty(&cfqd->rr_list[p])) { | ||
768 | prio = p; | ||
769 | break; | ||
770 | } | ||
771 | } | ||
772 | |||
773 | if (prio != -1) | ||
774 | break; | ||
775 | cfqd->cur_prio = 0; | ||
776 | if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) { | ||
777 | cfqd->cur_end_prio = 0; | ||
778 | if (wrap) | ||
779 | break; | ||
780 | wrap = 1; | ||
781 | } | ||
782 | } while (1); | ||
783 | |||
784 | if (unlikely(prio == -1)) | ||
785 | return -1; | ||
786 | |||
787 | BUG_ON(prio >= CFQ_PRIO_LISTS); | ||
788 | |||
789 | list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr); | ||
790 | |||
791 | cfqd->cur_prio = prio + 1; | ||
792 | if (cfqd->cur_prio > cfqd->cur_end_prio) { | ||
793 | cfqd->cur_end_prio = cfqd->cur_prio; | ||
794 | cfqd->cur_prio = 0; | ||
795 | } | ||
796 | if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) { | ||
797 | cfqd->cur_prio = 0; | ||
798 | cfqd->cur_end_prio = 0; | ||
799 | } | ||
800 | |||
801 | return prio; | ||
802 | } | ||
803 | |||
804 | static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) | ||
805 | { | ||
806 | struct cfq_queue *cfqq; | ||
807 | |||
808 | /* | ||
809 | * if current queue is expired but not done with its requests yet, | ||
810 | * wait for that to happen | ||
811 | */ | ||
812 | if ((cfqq = cfqd->active_queue) != NULL) { | ||
813 | if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq)) | ||
814 | return NULL; | ||
815 | } | ||
816 | |||
817 | /* | ||
818 | * if current list is non-empty, grab first entry. if it is empty, | ||
819 | * get next prio level and grab first entry then if any are spliced | ||
820 | */ | ||
821 | if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) | ||
822 | cfqq = list_entry_cfqq(cfqd->cur_rr.next); | ||
823 | |||
824 | /* | ||
825 | * if we have idle queues and no rt or be queues had pending | ||
826 | * requests, either allow immediate service if the grace period | ||
827 | * has passed or arm the idle grace timer | ||
828 | */ | ||
829 | if (!cfqq && !list_empty(&cfqd->idle_rr)) { | ||
830 | unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; | ||
831 | |||
832 | if (time_after_eq(jiffies, end)) | ||
833 | cfqq = list_entry_cfqq(cfqd->idle_rr.next); | ||
834 | else | ||
835 | mod_timer(&cfqd->idle_class_timer, end); | ||
836 | } | ||
837 | |||
838 | __cfq_set_active_queue(cfqd, cfqq); | ||
839 | return cfqq; | ||
840 | } | ||
841 | |||
842 | /* | ||
843 | * current cfqq expired its slice (or was too idle), select new one | ||
844 | */ | ||
845 | static void | ||
846 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
847 | int preempted) | ||
848 | { | ||
849 | unsigned long now = jiffies; | ||
850 | |||
851 | if (cfq_cfqq_wait_request(cfqq)) | ||
852 | del_timer(&cfqd->idle_slice_timer); | ||
853 | |||
854 | if (!preempted && !cfq_cfqq_dispatched(cfqq)) | ||
855 | cfqq->service_last = now; | ||
856 | |||
857 | cfq_clear_cfqq_must_dispatch(cfqq); | ||
858 | cfq_clear_cfqq_wait_request(cfqq); | ||
859 | |||
860 | /* | ||
861 | * store what was left of this slice, if the queue idled out | ||
862 | * or was preempted | ||
863 | */ | ||
864 | if (time_after(cfqq->slice_end, now)) | ||
865 | cfqq->slice_left = cfqq->slice_end - now; | ||
866 | else | ||
867 | cfqq->slice_left = 0; | ||
868 | |||
869 | if (cfq_cfqq_on_rr(cfqq)) | ||
870 | cfq_resort_rr_list(cfqq, preempted); | ||
871 | |||
872 | if (cfqq == cfqd->active_queue) | ||
873 | cfqd->active_queue = NULL; | ||
874 | |||
875 | if (cfqd->active_cic) { | ||
876 | put_io_context(cfqd->active_cic->ioc); | ||
877 | cfqd->active_cic = NULL; | ||
878 | } | ||
879 | |||
880 | cfqd->dispatch_slice = 0; | ||
881 | } | ||
882 | |||
883 | static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted) | ||
884 | { | ||
885 | struct cfq_queue *cfqq = cfqd->active_queue; | ||
886 | |||
887 | if (cfqq) { | ||
888 | /* | ||
889 | * use deferred expiry, if there are requests in progress as | ||
890 | * not to disturb the slice of the next queue | ||
891 | */ | ||
892 | if (cfq_cfqq_dispatched(cfqq)) | ||
893 | cfq_mark_cfqq_expired(cfqq); | ||
894 | else | ||
895 | __cfq_slice_expired(cfqd, cfqq, preempted); | ||
896 | } | ||
897 | } | ||
898 | |||
899 | static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
900 | |||
901 | { | ||
902 | WARN_ON(!RB_EMPTY(&cfqq->sort_list)); | ||
903 | WARN_ON(cfqq != cfqd->active_queue); | ||
904 | |||
905 | /* | ||
906 | * idle is disabled, either manually or by past process history | ||
907 | */ | ||
908 | if (!cfqd->cfq_slice_idle) | ||
909 | return 0; | ||
910 | if (!cfq_cfqq_idle_window(cfqq)) | ||
911 | return 0; | ||
912 | /* | ||
913 | * task has exited, don't wait | ||
914 | */ | ||
915 | if (cfqd->active_cic && !cfqd->active_cic->ioc->task) | ||
916 | return 0; | ||
917 | |||
918 | cfq_mark_cfqq_must_dispatch(cfqq); | ||
919 | cfq_mark_cfqq_wait_request(cfqq); | ||
920 | |||
921 | if (!timer_pending(&cfqd->idle_slice_timer)) { | ||
922 | unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); | ||
923 | |||
924 | cfqd->idle_slice_timer.expires = jiffies + slice_left; | ||
925 | add_timer(&cfqd->idle_slice_timer); | ||
926 | } | ||
927 | |||
928 | return 1; | ||
929 | } | ||
930 | |||
931 | static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq) | ||
932 | { | ||
933 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
934 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
935 | |||
936 | cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); | ||
937 | cfq_remove_request(crq->request); | ||
938 | cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; | ||
939 | elv_dispatch_sort(q, crq->request); | ||
940 | } | ||
941 | |||
942 | /* | ||
943 | * return expired entry, or NULL to just start from scratch in rbtree | ||
944 | */ | ||
945 | static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq) | ||
946 | { | ||
947 | struct cfq_data *cfqd = cfqq->cfqd; | ||
948 | struct request *rq; | ||
949 | struct cfq_rq *crq; | ||
950 | |||
951 | if (cfq_cfqq_fifo_expire(cfqq)) | ||
952 | return NULL; | ||
953 | |||
954 | if (!list_empty(&cfqq->fifo)) { | ||
955 | int fifo = cfq_cfqq_class_sync(cfqq); | ||
956 | |||
957 | crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next)); | ||
958 | rq = crq->request; | ||
959 | if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) { | ||
960 | cfq_mark_cfqq_fifo_expire(cfqq); | ||
961 | return crq; | ||
962 | } | ||
963 | } | ||
964 | |||
965 | return NULL; | ||
966 | } | ||
967 | |||
968 | /* | ||
969 | * Scale schedule slice based on io priority. Use the sync time slice only | ||
970 | * if a queue is marked sync and has sync io queued. A sync queue with async | ||
971 | * io only, should not get full sync slice length. | ||
972 | */ | ||
973 | static inline int | ||
974 | cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
975 | { | ||
976 | const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; | ||
977 | |||
978 | WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); | ||
979 | |||
980 | return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio)); | ||
981 | } | ||
982 | |||
983 | static inline void | ||
984 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
985 | { | ||
986 | cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; | ||
987 | } | ||
988 | |||
989 | static inline int | ||
990 | cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
991 | { | ||
992 | const int base_rq = cfqd->cfq_slice_async_rq; | ||
993 | |||
994 | WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); | ||
995 | |||
996 | return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); | ||
997 | } | ||
998 | |||
999 | /* | ||
1000 | * get next queue for service | ||
1001 | */ | ||
1002 | static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | ||
1003 | { | ||
1004 | unsigned long now = jiffies; | ||
1005 | struct cfq_queue *cfqq; | ||
1006 | |||
1007 | cfqq = cfqd->active_queue; | ||
1008 | if (!cfqq) | ||
1009 | goto new_queue; | ||
1010 | |||
1011 | if (cfq_cfqq_expired(cfqq)) | ||
1012 | goto new_queue; | ||
1013 | |||
1014 | /* | ||
1015 | * slice has expired | ||
1016 | */ | ||
1017 | if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end)) | ||
1018 | goto expire; | ||
1019 | |||
1020 | /* | ||
1021 | * if queue has requests, dispatch one. if not, check if | ||
1022 | * enough slice is left to wait for one | ||
1023 | */ | ||
1024 | if (!RB_EMPTY(&cfqq->sort_list)) | ||
1025 | goto keep_queue; | ||
1026 | else if (cfq_cfqq_class_sync(cfqq) && | ||
1027 | time_before(now, cfqq->slice_end)) { | ||
1028 | if (cfq_arm_slice_timer(cfqd, cfqq)) | ||
1029 | return NULL; | ||
1030 | } | ||
1031 | |||
1032 | expire: | ||
1033 | cfq_slice_expired(cfqd, 0); | ||
1034 | new_queue: | ||
1035 | cfqq = cfq_set_active_queue(cfqd); | ||
1036 | keep_queue: | ||
1037 | return cfqq; | ||
1038 | } | ||
1039 | |||
1040 | static int | ||
1041 | __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
1042 | int max_dispatch) | ||
1043 | { | ||
1044 | int dispatched = 0; | ||
1045 | |||
1046 | BUG_ON(RB_EMPTY(&cfqq->sort_list)); | ||
1047 | |||
1048 | do { | ||
1049 | struct cfq_rq *crq; | ||
1050 | |||
1051 | /* | ||
1052 | * follow expired path, else get first next available | ||
1053 | */ | ||
1054 | if ((crq = cfq_check_fifo(cfqq)) == NULL) | ||
1055 | crq = cfqq->next_crq; | ||
1056 | |||
1057 | /* | ||
1058 | * finally, insert request into driver dispatch list | ||
1059 | */ | ||
1060 | cfq_dispatch_insert(cfqd->queue, crq); | ||
1061 | |||
1062 | cfqd->dispatch_slice++; | ||
1063 | dispatched++; | ||
1064 | |||
1065 | if (!cfqd->active_cic) { | ||
1066 | atomic_inc(&crq->io_context->ioc->refcount); | ||
1067 | cfqd->active_cic = crq->io_context; | ||
1068 | } | ||
1069 | |||
1070 | if (RB_EMPTY(&cfqq->sort_list)) | ||
1071 | break; | ||
1072 | |||
1073 | } while (dispatched < max_dispatch); | ||
1074 | |||
1075 | /* | ||
1076 | * if slice end isn't set yet, set it. if at least one request was | ||
1077 | * sync, use the sync time slice value | ||
1078 | */ | ||
1079 | if (!cfqq->slice_end) | ||
1080 | cfq_set_prio_slice(cfqd, cfqq); | ||
1081 | |||
1082 | /* | ||
1083 | * expire an async queue immediately if it has used up its slice. idle | ||
1084 | * queue always expire after 1 dispatch round. | ||
1085 | */ | ||
1086 | if ((!cfq_cfqq_sync(cfqq) && | ||
1087 | cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || | ||
1088 | cfq_class_idle(cfqq)) | ||
1089 | cfq_slice_expired(cfqd, 0); | ||
1090 | |||
1091 | return dispatched; | ||
1092 | } | ||
1093 | |||
1094 | static int | ||
1095 | cfq_forced_dispatch_cfqqs(struct list_head *list) | ||
1096 | { | ||
1097 | int dispatched = 0; | ||
1098 | struct cfq_queue *cfqq, *next; | ||
1099 | struct cfq_rq *crq; | ||
1100 | |||
1101 | list_for_each_entry_safe(cfqq, next, list, cfq_list) { | ||
1102 | while ((crq = cfqq->next_crq)) { | ||
1103 | cfq_dispatch_insert(cfqq->cfqd->queue, crq); | ||
1104 | dispatched++; | ||
1105 | } | ||
1106 | BUG_ON(!list_empty(&cfqq->fifo)); | ||
1107 | } | ||
1108 | return dispatched; | ||
1109 | } | ||
1110 | |||
1111 | static int | ||
1112 | cfq_forced_dispatch(struct cfq_data *cfqd) | ||
1113 | { | ||
1114 | int i, dispatched = 0; | ||
1115 | |||
1116 | for (i = 0; i < CFQ_PRIO_LISTS; i++) | ||
1117 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]); | ||
1118 | |||
1119 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr); | ||
1120 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr); | ||
1121 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr); | ||
1122 | |||
1123 | cfq_slice_expired(cfqd, 0); | ||
1124 | |||
1125 | BUG_ON(cfqd->busy_queues); | ||
1126 | |||
1127 | return dispatched; | ||
1128 | } | ||
1129 | |||
1130 | static int | ||
1131 | cfq_dispatch_requests(request_queue_t *q, int force) | ||
1132 | { | ||
1133 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1134 | struct cfq_queue *cfqq; | ||
1135 | |||
1136 | if (!cfqd->busy_queues) | ||
1137 | return 0; | ||
1138 | |||
1139 | if (unlikely(force)) | ||
1140 | return cfq_forced_dispatch(cfqd); | ||
1141 | |||
1142 | cfqq = cfq_select_queue(cfqd); | ||
1143 | if (cfqq) { | ||
1144 | int max_dispatch; | ||
1145 | |||
1146 | /* | ||
1147 | * if idle window is disabled, allow queue buildup | ||
1148 | */ | ||
1149 | if (!cfq_cfqq_idle_window(cfqq) && | ||
1150 | cfqd->rq_in_driver >= cfqd->cfq_max_depth) | ||
1151 | return 0; | ||
1152 | |||
1153 | cfq_clear_cfqq_must_dispatch(cfqq); | ||
1154 | cfq_clear_cfqq_wait_request(cfqq); | ||
1155 | del_timer(&cfqd->idle_slice_timer); | ||
1156 | |||
1157 | max_dispatch = cfqd->cfq_quantum; | ||
1158 | if (cfq_class_idle(cfqq)) | ||
1159 | max_dispatch = 1; | ||
1160 | |||
1161 | return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); | ||
1162 | } | ||
1163 | |||
1164 | return 0; | ||
1165 | } | ||
1166 | |||
1167 | /* | ||
1168 | * task holds one reference to the queue, dropped when task exits. each crq | ||
1169 | * in-flight on this queue also holds a reference, dropped when crq is freed. | ||
1170 | * | ||
1171 | * queue lock must be held here. | ||
1172 | */ | ||
1173 | static void cfq_put_queue(struct cfq_queue *cfqq) | ||
1174 | { | ||
1175 | struct cfq_data *cfqd = cfqq->cfqd; | ||
1176 | |||
1177 | BUG_ON(atomic_read(&cfqq->ref) <= 0); | ||
1178 | |||
1179 | if (!atomic_dec_and_test(&cfqq->ref)) | ||
1180 | return; | ||
1181 | |||
1182 | BUG_ON(rb_first(&cfqq->sort_list)); | ||
1183 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); | ||
1184 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | ||
1185 | |||
1186 | if (unlikely(cfqd->active_queue == cfqq)) { | ||
1187 | __cfq_slice_expired(cfqd, cfqq, 0); | ||
1188 | cfq_schedule_dispatch(cfqd); | ||
1189 | } | ||
1190 | |||
1191 | cfq_put_cfqd(cfqq->cfqd); | ||
1192 | |||
1193 | /* | ||
1194 | * it's on the empty list and still hashed | ||
1195 | */ | ||
1196 | list_del(&cfqq->cfq_list); | ||
1197 | hlist_del(&cfqq->cfq_hash); | ||
1198 | kmem_cache_free(cfq_pool, cfqq); | ||
1199 | } | ||
1200 | |||
1201 | static inline struct cfq_queue * | ||
1202 | __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio, | ||
1203 | const int hashval) | ||
1204 | { | ||
1205 | struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; | ||
1206 | struct hlist_node *entry, *next; | ||
1207 | |||
1208 | hlist_for_each_safe(entry, next, hash_list) { | ||
1209 | struct cfq_queue *__cfqq = list_entry_qhash(entry); | ||
1210 | const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio); | ||
1211 | |||
1212 | if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) | ||
1213 | return __cfqq; | ||
1214 | } | ||
1215 | |||
1216 | return NULL; | ||
1217 | } | ||
1218 | |||
1219 | static struct cfq_queue * | ||
1220 | cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio) | ||
1221 | { | ||
1222 | return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT)); | ||
1223 | } | ||
1224 | |||
1225 | static void cfq_free_io_context(struct cfq_io_context *cic) | ||
1226 | { | ||
1227 | struct cfq_io_context *__cic; | ||
1228 | struct list_head *entry, *next; | ||
1229 | |||
1230 | list_for_each_safe(entry, next, &cic->list) { | ||
1231 | __cic = list_entry(entry, struct cfq_io_context, list); | ||
1232 | kmem_cache_free(cfq_ioc_pool, __cic); | ||
1233 | } | ||
1234 | |||
1235 | kmem_cache_free(cfq_ioc_pool, cic); | ||
1236 | } | ||
1237 | |||
1238 | /* | ||
1239 | * Called with interrupts disabled | ||
1240 | */ | ||
1241 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) | ||
1242 | { | ||
1243 | struct cfq_data *cfqd = cic->cfqq->cfqd; | ||
1244 | request_queue_t *q = cfqd->queue; | ||
1245 | |||
1246 | WARN_ON(!irqs_disabled()); | ||
1247 | |||
1248 | spin_lock(q->queue_lock); | ||
1249 | |||
1250 | if (unlikely(cic->cfqq == cfqd->active_queue)) { | ||
1251 | __cfq_slice_expired(cfqd, cic->cfqq, 0); | ||
1252 | cfq_schedule_dispatch(cfqd); | ||
1253 | } | ||
1254 | |||
1255 | cfq_put_queue(cic->cfqq); | ||
1256 | cic->cfqq = NULL; | ||
1257 | spin_unlock(q->queue_lock); | ||
1258 | } | ||
1259 | |||
1260 | /* | ||
1261 | * Another task may update the task cic list, if it is doing a queue lookup | ||
1262 | * on its behalf. cfq_cic_lock excludes such concurrent updates | ||
1263 | */ | ||
1264 | static void cfq_exit_io_context(struct cfq_io_context *cic) | ||
1265 | { | ||
1266 | struct cfq_io_context *__cic; | ||
1267 | struct list_head *entry; | ||
1268 | unsigned long flags; | ||
1269 | |||
1270 | local_irq_save(flags); | ||
1271 | |||
1272 | /* | ||
1273 | * put the reference this task is holding to the various queues | ||
1274 | */ | ||
1275 | list_for_each(entry, &cic->list) { | ||
1276 | __cic = list_entry(entry, struct cfq_io_context, list); | ||
1277 | cfq_exit_single_io_context(__cic); | ||
1278 | } | ||
1279 | |||
1280 | cfq_exit_single_io_context(cic); | ||
1281 | local_irq_restore(flags); | ||
1282 | } | ||
1283 | |||
1284 | static struct cfq_io_context * | ||
1285 | cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | ||
1286 | { | ||
1287 | struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); | ||
1288 | |||
1289 | if (cic) { | ||
1290 | INIT_LIST_HEAD(&cic->list); | ||
1291 | cic->cfqq = NULL; | ||
1292 | cic->key = NULL; | ||
1293 | cic->last_end_request = jiffies; | ||
1294 | cic->ttime_total = 0; | ||
1295 | cic->ttime_samples = 0; | ||
1296 | cic->ttime_mean = 0; | ||
1297 | cic->dtor = cfq_free_io_context; | ||
1298 | cic->exit = cfq_exit_io_context; | ||
1299 | } | ||
1300 | |||
1301 | return cic; | ||
1302 | } | ||
1303 | |||
1304 | static void cfq_init_prio_data(struct cfq_queue *cfqq) | ||
1305 | { | ||
1306 | struct task_struct *tsk = current; | ||
1307 | int ioprio_class; | ||
1308 | |||
1309 | if (!cfq_cfqq_prio_changed(cfqq)) | ||
1310 | return; | ||
1311 | |||
1312 | ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio); | ||
1313 | switch (ioprio_class) { | ||
1314 | default: | ||
1315 | printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); | ||
1316 | case IOPRIO_CLASS_NONE: | ||
1317 | /* | ||
1318 | * no prio set, place us in the middle of the BE classes | ||
1319 | */ | ||
1320 | cfqq->ioprio = task_nice_ioprio(tsk); | ||
1321 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | ||
1322 | break; | ||
1323 | case IOPRIO_CLASS_RT: | ||
1324 | cfqq->ioprio = task_ioprio(tsk); | ||
1325 | cfqq->ioprio_class = IOPRIO_CLASS_RT; | ||
1326 | break; | ||
1327 | case IOPRIO_CLASS_BE: | ||
1328 | cfqq->ioprio = task_ioprio(tsk); | ||
1329 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | ||
1330 | break; | ||
1331 | case IOPRIO_CLASS_IDLE: | ||
1332 | cfqq->ioprio_class = IOPRIO_CLASS_IDLE; | ||
1333 | cfqq->ioprio = 7; | ||
1334 | cfq_clear_cfqq_idle_window(cfqq); | ||
1335 | break; | ||
1336 | } | ||
1337 | |||
1338 | /* | ||
1339 | * keep track of original prio settings in case we have to temporarily | ||
1340 | * elevate the priority of this queue | ||
1341 | */ | ||
1342 | cfqq->org_ioprio = cfqq->ioprio; | ||
1343 | cfqq->org_ioprio_class = cfqq->ioprio_class; | ||
1344 | |||
1345 | if (cfq_cfqq_on_rr(cfqq)) | ||
1346 | cfq_resort_rr_list(cfqq, 0); | ||
1347 | |||
1348 | cfq_clear_cfqq_prio_changed(cfqq); | ||
1349 | } | ||
1350 | |||
1351 | static inline void changed_ioprio(struct cfq_queue *cfqq) | ||
1352 | { | ||
1353 | if (cfqq) { | ||
1354 | struct cfq_data *cfqd = cfqq->cfqd; | ||
1355 | |||
1356 | spin_lock(cfqd->queue->queue_lock); | ||
1357 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1358 | cfq_init_prio_data(cfqq); | ||
1359 | spin_unlock(cfqd->queue->queue_lock); | ||
1360 | } | ||
1361 | } | ||
1362 | |||
1363 | /* | ||
1364 | * callback from sys_ioprio_set, irqs are disabled | ||
1365 | */ | ||
1366 | static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) | ||
1367 | { | ||
1368 | struct cfq_io_context *cic = ioc->cic; | ||
1369 | |||
1370 | changed_ioprio(cic->cfqq); | ||
1371 | |||
1372 | list_for_each_entry(cic, &cic->list, list) | ||
1373 | changed_ioprio(cic->cfqq); | ||
1374 | |||
1375 | return 0; | ||
1376 | } | ||
1377 | |||
1378 | static struct cfq_queue * | ||
1379 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, | ||
1380 | gfp_t gfp_mask) | ||
1381 | { | ||
1382 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); | ||
1383 | struct cfq_queue *cfqq, *new_cfqq = NULL; | ||
1384 | |||
1385 | retry: | ||
1386 | cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); | ||
1387 | |||
1388 | if (!cfqq) { | ||
1389 | if (new_cfqq) { | ||
1390 | cfqq = new_cfqq; | ||
1391 | new_cfqq = NULL; | ||
1392 | } else if (gfp_mask & __GFP_WAIT) { | ||
1393 | spin_unlock_irq(cfqd->queue->queue_lock); | ||
1394 | new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); | ||
1395 | spin_lock_irq(cfqd->queue->queue_lock); | ||
1396 | goto retry; | ||
1397 | } else { | ||
1398 | cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); | ||
1399 | if (!cfqq) | ||
1400 | goto out; | ||
1401 | } | ||
1402 | |||
1403 | memset(cfqq, 0, sizeof(*cfqq)); | ||
1404 | |||
1405 | INIT_HLIST_NODE(&cfqq->cfq_hash); | ||
1406 | INIT_LIST_HEAD(&cfqq->cfq_list); | ||
1407 | RB_CLEAR_ROOT(&cfqq->sort_list); | ||
1408 | INIT_LIST_HEAD(&cfqq->fifo); | ||
1409 | |||
1410 | cfqq->key = key; | ||
1411 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); | ||
1412 | atomic_set(&cfqq->ref, 0); | ||
1413 | cfqq->cfqd = cfqd; | ||
1414 | atomic_inc(&cfqd->ref); | ||
1415 | cfqq->service_last = 0; | ||
1416 | /* | ||
1417 | * set ->slice_left to allow preemption for a new process | ||
1418 | */ | ||
1419 | cfqq->slice_left = 2 * cfqd->cfq_slice_idle; | ||
1420 | cfq_mark_cfqq_idle_window(cfqq); | ||
1421 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1422 | cfq_init_prio_data(cfqq); | ||
1423 | } | ||
1424 | |||
1425 | if (new_cfqq) | ||
1426 | kmem_cache_free(cfq_pool, new_cfqq); | ||
1427 | |||
1428 | atomic_inc(&cfqq->ref); | ||
1429 | out: | ||
1430 | WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); | ||
1431 | return cfqq; | ||
1432 | } | ||
1433 | |||
1434 | /* | ||
1435 | * Setup general io context and cfq io context. There can be several cfq | ||
1436 | * io contexts per general io context, if this process is doing io to more | ||
1437 | * than one device managed by cfq. Note that caller is holding a reference to | ||
1438 | * cfqq, so we don't need to worry about it disappearing | ||
1439 | */ | ||
1440 | static struct cfq_io_context * | ||
1441 | cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | ||
1442 | { | ||
1443 | struct io_context *ioc = NULL; | ||
1444 | struct cfq_io_context *cic; | ||
1445 | |||
1446 | might_sleep_if(gfp_mask & __GFP_WAIT); | ||
1447 | |||
1448 | ioc = get_io_context(gfp_mask); | ||
1449 | if (!ioc) | ||
1450 | return NULL; | ||
1451 | |||
1452 | if ((cic = ioc->cic) == NULL) { | ||
1453 | cic = cfq_alloc_io_context(cfqd, gfp_mask); | ||
1454 | |||
1455 | if (cic == NULL) | ||
1456 | goto err; | ||
1457 | |||
1458 | /* | ||
1459 | * manually increment generic io_context usage count, it | ||
1460 | * cannot go away since we are already holding one ref to it | ||
1461 | */ | ||
1462 | ioc->cic = cic; | ||
1463 | ioc->set_ioprio = cfq_ioc_set_ioprio; | ||
1464 | cic->ioc = ioc; | ||
1465 | cic->key = cfqd; | ||
1466 | atomic_inc(&cfqd->ref); | ||
1467 | } else { | ||
1468 | struct cfq_io_context *__cic; | ||
1469 | |||
1470 | /* | ||
1471 | * the first cic on the list is actually the head itself | ||
1472 | */ | ||
1473 | if (cic->key == cfqd) | ||
1474 | goto out; | ||
1475 | |||
1476 | /* | ||
1477 | * cic exists, check if we already are there. linear search | ||
1478 | * should be ok here, the list will usually not be more than | ||
1479 | * 1 or a few entries long | ||
1480 | */ | ||
1481 | list_for_each_entry(__cic, &cic->list, list) { | ||
1482 | /* | ||
1483 | * this process is already holding a reference to | ||
1484 | * this queue, so no need to get one more | ||
1485 | */ | ||
1486 | if (__cic->key == cfqd) { | ||
1487 | cic = __cic; | ||
1488 | goto out; | ||
1489 | } | ||
1490 | } | ||
1491 | |||
1492 | /* | ||
1493 | * nope, process doesn't have a cic assoicated with this | ||
1494 | * cfqq yet. get a new one and add to list | ||
1495 | */ | ||
1496 | __cic = cfq_alloc_io_context(cfqd, gfp_mask); | ||
1497 | if (__cic == NULL) | ||
1498 | goto err; | ||
1499 | |||
1500 | __cic->ioc = ioc; | ||
1501 | __cic->key = cfqd; | ||
1502 | atomic_inc(&cfqd->ref); | ||
1503 | list_add(&__cic->list, &cic->list); | ||
1504 | cic = __cic; | ||
1505 | } | ||
1506 | |||
1507 | out: | ||
1508 | return cic; | ||
1509 | err: | ||
1510 | put_io_context(ioc); | ||
1511 | return NULL; | ||
1512 | } | ||
1513 | |||
1514 | static void | ||
1515 | cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) | ||
1516 | { | ||
1517 | unsigned long elapsed, ttime; | ||
1518 | |||
1519 | /* | ||
1520 | * if this context already has stuff queued, thinktime is from | ||
1521 | * last queue not last end | ||
1522 | */ | ||
1523 | #if 0 | ||
1524 | if (time_after(cic->last_end_request, cic->last_queue)) | ||
1525 | elapsed = jiffies - cic->last_end_request; | ||
1526 | else | ||
1527 | elapsed = jiffies - cic->last_queue; | ||
1528 | #else | ||
1529 | elapsed = jiffies - cic->last_end_request; | ||
1530 | #endif | ||
1531 | |||
1532 | ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); | ||
1533 | |||
1534 | cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; | ||
1535 | cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; | ||
1536 | cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; | ||
1537 | } | ||
1538 | |||
1539 | #define sample_valid(samples) ((samples) > 80) | ||
1540 | |||
1541 | /* | ||
1542 | * Disable idle window if the process thinks too long or seeks so much that | ||
1543 | * it doesn't matter | ||
1544 | */ | ||
1545 | static void | ||
1546 | cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
1547 | struct cfq_io_context *cic) | ||
1548 | { | ||
1549 | int enable_idle = cfq_cfqq_idle_window(cfqq); | ||
1550 | |||
1551 | if (!cic->ioc->task || !cfqd->cfq_slice_idle) | ||
1552 | enable_idle = 0; | ||
1553 | else if (sample_valid(cic->ttime_samples)) { | ||
1554 | if (cic->ttime_mean > cfqd->cfq_slice_idle) | ||
1555 | enable_idle = 0; | ||
1556 | else | ||
1557 | enable_idle = 1; | ||
1558 | } | ||
1559 | |||
1560 | if (enable_idle) | ||
1561 | cfq_mark_cfqq_idle_window(cfqq); | ||
1562 | else | ||
1563 | cfq_clear_cfqq_idle_window(cfqq); | ||
1564 | } | ||
1565 | |||
1566 | |||
1567 | /* | ||
1568 | * Check if new_cfqq should preempt the currently active queue. Return 0 for | ||
1569 | * no or if we aren't sure, a 1 will cause a preempt. | ||
1570 | */ | ||
1571 | static int | ||
1572 | cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | ||
1573 | struct cfq_rq *crq) | ||
1574 | { | ||
1575 | struct cfq_queue *cfqq = cfqd->active_queue; | ||
1576 | |||
1577 | if (cfq_class_idle(new_cfqq)) | ||
1578 | return 0; | ||
1579 | |||
1580 | if (!cfqq) | ||
1581 | return 1; | ||
1582 | |||
1583 | if (cfq_class_idle(cfqq)) | ||
1584 | return 1; | ||
1585 | if (!cfq_cfqq_wait_request(new_cfqq)) | ||
1586 | return 0; | ||
1587 | /* | ||
1588 | * if it doesn't have slice left, forget it | ||
1589 | */ | ||
1590 | if (new_cfqq->slice_left < cfqd->cfq_slice_idle) | ||
1591 | return 0; | ||
1592 | if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq)) | ||
1593 | return 1; | ||
1594 | |||
1595 | return 0; | ||
1596 | } | ||
1597 | |||
1598 | /* | ||
1599 | * cfqq preempts the active queue. if we allowed preempt with no slice left, | ||
1600 | * let it have half of its nominal slice. | ||
1601 | */ | ||
1602 | static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
1603 | { | ||
1604 | struct cfq_queue *__cfqq, *next; | ||
1605 | |||
1606 | list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list) | ||
1607 | cfq_resort_rr_list(__cfqq, 1); | ||
1608 | |||
1609 | if (!cfqq->slice_left) | ||
1610 | cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; | ||
1611 | |||
1612 | cfqq->slice_end = cfqq->slice_left + jiffies; | ||
1613 | __cfq_slice_expired(cfqd, cfqq, 1); | ||
1614 | __cfq_set_active_queue(cfqd, cfqq); | ||
1615 | } | ||
1616 | |||
1617 | /* | ||
1618 | * should really be a ll_rw_blk.c helper | ||
1619 | */ | ||
1620 | static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
1621 | { | ||
1622 | request_queue_t *q = cfqd->queue; | ||
1623 | |||
1624 | if (!blk_queue_plugged(q)) | ||
1625 | q->request_fn(q); | ||
1626 | else | ||
1627 | __generic_unplug_device(q); | ||
1628 | } | ||
1629 | |||
1630 | /* | ||
1631 | * Called when a new fs request (crq) is added (to cfqq). Check if there's | ||
1632 | * something we should do about it | ||
1633 | */ | ||
1634 | static void | ||
1635 | cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
1636 | struct cfq_rq *crq) | ||
1637 | { | ||
1638 | struct cfq_io_context *cic; | ||
1639 | |||
1640 | cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); | ||
1641 | |||
1642 | /* | ||
1643 | * we never wait for an async request and we don't allow preemption | ||
1644 | * of an async request. so just return early | ||
1645 | */ | ||
1646 | if (!cfq_crq_is_sync(crq)) | ||
1647 | return; | ||
1648 | |||
1649 | cic = crq->io_context; | ||
1650 | |||
1651 | cfq_update_io_thinktime(cfqd, cic); | ||
1652 | cfq_update_idle_window(cfqd, cfqq, cic); | ||
1653 | |||
1654 | cic->last_queue = jiffies; | ||
1655 | |||
1656 | if (cfqq == cfqd->active_queue) { | ||
1657 | /* | ||
1658 | * if we are waiting for a request for this queue, let it rip | ||
1659 | * immediately and flag that we must not expire this queue | ||
1660 | * just now | ||
1661 | */ | ||
1662 | if (cfq_cfqq_wait_request(cfqq)) { | ||
1663 | cfq_mark_cfqq_must_dispatch(cfqq); | ||
1664 | del_timer(&cfqd->idle_slice_timer); | ||
1665 | cfq_start_queueing(cfqd, cfqq); | ||
1666 | } | ||
1667 | } else if (cfq_should_preempt(cfqd, cfqq, crq)) { | ||
1668 | /* | ||
1669 | * not the active queue - expire current slice if it is | ||
1670 | * idle and has expired it's mean thinktime or this new queue | ||
1671 | * has some old slice time left and is of higher priority | ||
1672 | */ | ||
1673 | cfq_preempt_queue(cfqd, cfqq); | ||
1674 | cfq_mark_cfqq_must_dispatch(cfqq); | ||
1675 | cfq_start_queueing(cfqd, cfqq); | ||
1676 | } | ||
1677 | } | ||
1678 | |||
1679 | static void cfq_insert_request(request_queue_t *q, struct request *rq) | ||
1680 | { | ||
1681 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1682 | struct cfq_rq *crq = RQ_DATA(rq); | ||
1683 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
1684 | |||
1685 | cfq_init_prio_data(cfqq); | ||
1686 | |||
1687 | cfq_add_crq_rb(crq); | ||
1688 | |||
1689 | list_add_tail(&rq->queuelist, &cfqq->fifo); | ||
1690 | |||
1691 | if (rq_mergeable(rq)) | ||
1692 | cfq_add_crq_hash(cfqd, crq); | ||
1693 | |||
1694 | cfq_crq_enqueued(cfqd, cfqq, crq); | ||
1695 | } | ||
1696 | |||
1697 | static void cfq_completed_request(request_queue_t *q, struct request *rq) | ||
1698 | { | ||
1699 | struct cfq_rq *crq = RQ_DATA(rq); | ||
1700 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
1701 | struct cfq_data *cfqd = cfqq->cfqd; | ||
1702 | const int sync = cfq_crq_is_sync(crq); | ||
1703 | unsigned long now; | ||
1704 | |||
1705 | now = jiffies; | ||
1706 | |||
1707 | WARN_ON(!cfqd->rq_in_driver); | ||
1708 | WARN_ON(!cfqq->on_dispatch[sync]); | ||
1709 | cfqd->rq_in_driver--; | ||
1710 | cfqq->on_dispatch[sync]--; | ||
1711 | |||
1712 | if (!cfq_class_idle(cfqq)) | ||
1713 | cfqd->last_end_request = now; | ||
1714 | |||
1715 | if (!cfq_cfqq_dispatched(cfqq)) { | ||
1716 | if (cfq_cfqq_on_rr(cfqq)) { | ||
1717 | cfqq->service_last = now; | ||
1718 | cfq_resort_rr_list(cfqq, 0); | ||
1719 | } | ||
1720 | if (cfq_cfqq_expired(cfqq)) { | ||
1721 | __cfq_slice_expired(cfqd, cfqq, 0); | ||
1722 | cfq_schedule_dispatch(cfqd); | ||
1723 | } | ||
1724 | } | ||
1725 | |||
1726 | if (cfq_crq_is_sync(crq)) | ||
1727 | crq->io_context->last_end_request = now; | ||
1728 | } | ||
1729 | |||
1730 | static struct request * | ||
1731 | cfq_former_request(request_queue_t *q, struct request *rq) | ||
1732 | { | ||
1733 | struct cfq_rq *crq = RQ_DATA(rq); | ||
1734 | struct rb_node *rbprev = rb_prev(&crq->rb_node); | ||
1735 | |||
1736 | if (rbprev) | ||
1737 | return rb_entry_crq(rbprev)->request; | ||
1738 | |||
1739 | return NULL; | ||
1740 | } | ||
1741 | |||
1742 | static struct request * | ||
1743 | cfq_latter_request(request_queue_t *q, struct request *rq) | ||
1744 | { | ||
1745 | struct cfq_rq *crq = RQ_DATA(rq); | ||
1746 | struct rb_node *rbnext = rb_next(&crq->rb_node); | ||
1747 | |||
1748 | if (rbnext) | ||
1749 | return rb_entry_crq(rbnext)->request; | ||
1750 | |||
1751 | return NULL; | ||
1752 | } | ||
1753 | |||
1754 | /* | ||
1755 | * we temporarily boost lower priority queues if they are holding fs exclusive | ||
1756 | * resources. they are boosted to normal prio (CLASS_BE/4) | ||
1757 | */ | ||
1758 | static void cfq_prio_boost(struct cfq_queue *cfqq) | ||
1759 | { | ||
1760 | const int ioprio_class = cfqq->ioprio_class; | ||
1761 | const int ioprio = cfqq->ioprio; | ||
1762 | |||
1763 | if (has_fs_excl()) { | ||
1764 | /* | ||
1765 | * boost idle prio on transactions that would lock out other | ||
1766 | * users of the filesystem | ||
1767 | */ | ||
1768 | if (cfq_class_idle(cfqq)) | ||
1769 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | ||
1770 | if (cfqq->ioprio > IOPRIO_NORM) | ||
1771 | cfqq->ioprio = IOPRIO_NORM; | ||
1772 | } else { | ||
1773 | /* | ||
1774 | * check if we need to unboost the queue | ||
1775 | */ | ||
1776 | if (cfqq->ioprio_class != cfqq->org_ioprio_class) | ||
1777 | cfqq->ioprio_class = cfqq->org_ioprio_class; | ||
1778 | if (cfqq->ioprio != cfqq->org_ioprio) | ||
1779 | cfqq->ioprio = cfqq->org_ioprio; | ||
1780 | } | ||
1781 | |||
1782 | /* | ||
1783 | * refile between round-robin lists if we moved the priority class | ||
1784 | */ | ||
1785 | if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) && | ||
1786 | cfq_cfqq_on_rr(cfqq)) | ||
1787 | cfq_resort_rr_list(cfqq, 0); | ||
1788 | } | ||
1789 | |||
1790 | static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) | ||
1791 | { | ||
1792 | if (rw == READ || process_sync(task)) | ||
1793 | return task->pid; | ||
1794 | |||
1795 | return CFQ_KEY_ASYNC; | ||
1796 | } | ||
1797 | |||
1798 | static inline int | ||
1799 | __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
1800 | struct task_struct *task, int rw) | ||
1801 | { | ||
1802 | #if 1 | ||
1803 | if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && | ||
1804 | !cfq_cfqq_must_alloc_slice(cfqq)) { | ||
1805 | cfq_mark_cfqq_must_alloc_slice(cfqq); | ||
1806 | return ELV_MQUEUE_MUST; | ||
1807 | } | ||
1808 | |||
1809 | return ELV_MQUEUE_MAY; | ||
1810 | #else | ||
1811 | if (!cfqq || task->flags & PF_MEMALLOC) | ||
1812 | return ELV_MQUEUE_MAY; | ||
1813 | if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) { | ||
1814 | if (cfq_cfqq_wait_request(cfqq)) | ||
1815 | return ELV_MQUEUE_MUST; | ||
1816 | |||
1817 | /* | ||
1818 | * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we | ||
1819 | * can quickly flood the queue with writes from a single task | ||
1820 | */ | ||
1821 | if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) { | ||
1822 | cfq_mark_cfqq_must_alloc_slice(cfqq); | ||
1823 | return ELV_MQUEUE_MUST; | ||
1824 | } | ||
1825 | |||
1826 | return ELV_MQUEUE_MAY; | ||
1827 | } | ||
1828 | if (cfq_class_idle(cfqq)) | ||
1829 | return ELV_MQUEUE_NO; | ||
1830 | if (cfqq->allocated[rw] >= cfqd->max_queued) { | ||
1831 | struct io_context *ioc = get_io_context(GFP_ATOMIC); | ||
1832 | int ret = ELV_MQUEUE_NO; | ||
1833 | |||
1834 | if (ioc && ioc->nr_batch_requests) | ||
1835 | ret = ELV_MQUEUE_MAY; | ||
1836 | |||
1837 | put_io_context(ioc); | ||
1838 | return ret; | ||
1839 | } | ||
1840 | |||
1841 | return ELV_MQUEUE_MAY; | ||
1842 | #endif | ||
1843 | } | ||
1844 | |||
1845 | static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) | ||
1846 | { | ||
1847 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1848 | struct task_struct *tsk = current; | ||
1849 | struct cfq_queue *cfqq; | ||
1850 | |||
1851 | /* | ||
1852 | * don't force setup of a queue from here, as a call to may_queue | ||
1853 | * does not necessarily imply that a request actually will be queued. | ||
1854 | * so just lookup a possibly existing queue, or return 'may queue' | ||
1855 | * if that fails | ||
1856 | */ | ||
1857 | cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio); | ||
1858 | if (cfqq) { | ||
1859 | cfq_init_prio_data(cfqq); | ||
1860 | cfq_prio_boost(cfqq); | ||
1861 | |||
1862 | return __cfq_may_queue(cfqd, cfqq, tsk, rw); | ||
1863 | } | ||
1864 | |||
1865 | return ELV_MQUEUE_MAY; | ||
1866 | } | ||
1867 | |||
1868 | static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) | ||
1869 | { | ||
1870 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1871 | struct request_list *rl = &q->rq; | ||
1872 | |||
1873 | if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) { | ||
1874 | smp_mb(); | ||
1875 | if (waitqueue_active(&rl->wait[READ])) | ||
1876 | wake_up(&rl->wait[READ]); | ||
1877 | } | ||
1878 | |||
1879 | if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) { | ||
1880 | smp_mb(); | ||
1881 | if (waitqueue_active(&rl->wait[WRITE])) | ||
1882 | wake_up(&rl->wait[WRITE]); | ||
1883 | } | ||
1884 | } | ||
1885 | |||
1886 | /* | ||
1887 | * queue lock held here | ||
1888 | */ | ||
1889 | static void cfq_put_request(request_queue_t *q, struct request *rq) | ||
1890 | { | ||
1891 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1892 | struct cfq_rq *crq = RQ_DATA(rq); | ||
1893 | |||
1894 | if (crq) { | ||
1895 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
1896 | const int rw = rq_data_dir(rq); | ||
1897 | |||
1898 | BUG_ON(!cfqq->allocated[rw]); | ||
1899 | cfqq->allocated[rw]--; | ||
1900 | |||
1901 | put_io_context(crq->io_context->ioc); | ||
1902 | |||
1903 | mempool_free(crq, cfqd->crq_pool); | ||
1904 | rq->elevator_private = NULL; | ||
1905 | |||
1906 | cfq_check_waiters(q, cfqq); | ||
1907 | cfq_put_queue(cfqq); | ||
1908 | } | ||
1909 | } | ||
1910 | |||
1911 | /* | ||
1912 | * Allocate cfq data structures associated with this request. | ||
1913 | */ | ||
1914 | static int | ||
1915 | cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | ||
1916 | gfp_t gfp_mask) | ||
1917 | { | ||
1918 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1919 | struct task_struct *tsk = current; | ||
1920 | struct cfq_io_context *cic; | ||
1921 | const int rw = rq_data_dir(rq); | ||
1922 | pid_t key = cfq_queue_pid(tsk, rw); | ||
1923 | struct cfq_queue *cfqq; | ||
1924 | struct cfq_rq *crq; | ||
1925 | unsigned long flags; | ||
1926 | |||
1927 | might_sleep_if(gfp_mask & __GFP_WAIT); | ||
1928 | |||
1929 | cic = cfq_get_io_context(cfqd, key, gfp_mask); | ||
1930 | |||
1931 | spin_lock_irqsave(q->queue_lock, flags); | ||
1932 | |||
1933 | if (!cic) | ||
1934 | goto queue_fail; | ||
1935 | |||
1936 | if (!cic->cfqq) { | ||
1937 | cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask); | ||
1938 | if (!cfqq) | ||
1939 | goto queue_fail; | ||
1940 | |||
1941 | cic->cfqq = cfqq; | ||
1942 | } else | ||
1943 | cfqq = cic->cfqq; | ||
1944 | |||
1945 | cfqq->allocated[rw]++; | ||
1946 | cfq_clear_cfqq_must_alloc(cfqq); | ||
1947 | cfqd->rq_starved = 0; | ||
1948 | atomic_inc(&cfqq->ref); | ||
1949 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
1950 | |||
1951 | crq = mempool_alloc(cfqd->crq_pool, gfp_mask); | ||
1952 | if (crq) { | ||
1953 | RB_CLEAR(&crq->rb_node); | ||
1954 | crq->rb_key = 0; | ||
1955 | crq->request = rq; | ||
1956 | INIT_HLIST_NODE(&crq->hash); | ||
1957 | crq->cfq_queue = cfqq; | ||
1958 | crq->io_context = cic; | ||
1959 | |||
1960 | if (rw == READ || process_sync(tsk)) | ||
1961 | cfq_mark_crq_is_sync(crq); | ||
1962 | else | ||
1963 | cfq_clear_crq_is_sync(crq); | ||
1964 | |||
1965 | rq->elevator_private = crq; | ||
1966 | return 0; | ||
1967 | } | ||
1968 | |||
1969 | spin_lock_irqsave(q->queue_lock, flags); | ||
1970 | cfqq->allocated[rw]--; | ||
1971 | if (!(cfqq->allocated[0] + cfqq->allocated[1])) | ||
1972 | cfq_mark_cfqq_must_alloc(cfqq); | ||
1973 | cfq_put_queue(cfqq); | ||
1974 | queue_fail: | ||
1975 | if (cic) | ||
1976 | put_io_context(cic->ioc); | ||
1977 | /* | ||
1978 | * mark us rq allocation starved. we need to kickstart the process | ||
1979 | * ourselves if there are no pending requests that can do it for us. | ||
1980 | * that would be an extremely rare OOM situation | ||
1981 | */ | ||
1982 | cfqd->rq_starved = 1; | ||
1983 | cfq_schedule_dispatch(cfqd); | ||
1984 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
1985 | return 1; | ||
1986 | } | ||
1987 | |||
1988 | static void cfq_kick_queue(void *data) | ||
1989 | { | ||
1990 | request_queue_t *q = data; | ||
1991 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1992 | unsigned long flags; | ||
1993 | |||
1994 | spin_lock_irqsave(q->queue_lock, flags); | ||
1995 | |||
1996 | if (cfqd->rq_starved) { | ||
1997 | struct request_list *rl = &q->rq; | ||
1998 | |||
1999 | /* | ||
2000 | * we aren't guaranteed to get a request after this, but we | ||
2001 | * have to be opportunistic | ||
2002 | */ | ||
2003 | smp_mb(); | ||
2004 | if (waitqueue_active(&rl->wait[READ])) | ||
2005 | wake_up(&rl->wait[READ]); | ||
2006 | if (waitqueue_active(&rl->wait[WRITE])) | ||
2007 | wake_up(&rl->wait[WRITE]); | ||
2008 | } | ||
2009 | |||
2010 | blk_remove_plug(q); | ||
2011 | q->request_fn(q); | ||
2012 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2013 | } | ||
2014 | |||
2015 | /* | ||
2016 | * Timer running if the active_queue is currently idling inside its time slice | ||
2017 | */ | ||
2018 | static void cfq_idle_slice_timer(unsigned long data) | ||
2019 | { | ||
2020 | struct cfq_data *cfqd = (struct cfq_data *) data; | ||
2021 | struct cfq_queue *cfqq; | ||
2022 | unsigned long flags; | ||
2023 | |||
2024 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | ||
2025 | |||
2026 | if ((cfqq = cfqd->active_queue) != NULL) { | ||
2027 | unsigned long now = jiffies; | ||
2028 | |||
2029 | /* | ||
2030 | * expired | ||
2031 | */ | ||
2032 | if (time_after(now, cfqq->slice_end)) | ||
2033 | goto expire; | ||
2034 | |||
2035 | /* | ||
2036 | * only expire and reinvoke request handler, if there are | ||
2037 | * other queues with pending requests | ||
2038 | */ | ||
2039 | if (!cfqd->busy_queues) { | ||
2040 | cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); | ||
2041 | add_timer(&cfqd->idle_slice_timer); | ||
2042 | goto out_cont; | ||
2043 | } | ||
2044 | |||
2045 | /* | ||
2046 | * not expired and it has a request pending, let it dispatch | ||
2047 | */ | ||
2048 | if (!RB_EMPTY(&cfqq->sort_list)) { | ||
2049 | cfq_mark_cfqq_must_dispatch(cfqq); | ||
2050 | goto out_kick; | ||
2051 | } | ||
2052 | } | ||
2053 | expire: | ||
2054 | cfq_slice_expired(cfqd, 0); | ||
2055 | out_kick: | ||
2056 | cfq_schedule_dispatch(cfqd); | ||
2057 | out_cont: | ||
2058 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | ||
2059 | } | ||
2060 | |||
2061 | /* | ||
2062 | * Timer running if an idle class queue is waiting for service | ||
2063 | */ | ||
2064 | static void cfq_idle_class_timer(unsigned long data) | ||
2065 | { | ||
2066 | struct cfq_data *cfqd = (struct cfq_data *) data; | ||
2067 | unsigned long flags, end; | ||
2068 | |||
2069 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | ||
2070 | |||
2071 | /* | ||
2072 | * race with a non-idle queue, reset timer | ||
2073 | */ | ||
2074 | end = cfqd->last_end_request + CFQ_IDLE_GRACE; | ||
2075 | if (!time_after_eq(jiffies, end)) { | ||
2076 | cfqd->idle_class_timer.expires = end; | ||
2077 | add_timer(&cfqd->idle_class_timer); | ||
2078 | } else | ||
2079 | cfq_schedule_dispatch(cfqd); | ||
2080 | |||
2081 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | ||
2082 | } | ||
2083 | |||
2084 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | ||
2085 | { | ||
2086 | del_timer_sync(&cfqd->idle_slice_timer); | ||
2087 | del_timer_sync(&cfqd->idle_class_timer); | ||
2088 | blk_sync_queue(cfqd->queue); | ||
2089 | } | ||
2090 | |||
2091 | static void cfq_put_cfqd(struct cfq_data *cfqd) | ||
2092 | { | ||
2093 | request_queue_t *q = cfqd->queue; | ||
2094 | |||
2095 | if (!atomic_dec_and_test(&cfqd->ref)) | ||
2096 | return; | ||
2097 | |||
2098 | cfq_shutdown_timer_wq(cfqd); | ||
2099 | blk_put_queue(q); | ||
2100 | |||
2101 | mempool_destroy(cfqd->crq_pool); | ||
2102 | kfree(cfqd->crq_hash); | ||
2103 | kfree(cfqd->cfq_hash); | ||
2104 | kfree(cfqd); | ||
2105 | } | ||
2106 | |||
2107 | static void cfq_exit_queue(elevator_t *e) | ||
2108 | { | ||
2109 | struct cfq_data *cfqd = e->elevator_data; | ||
2110 | |||
2111 | cfq_shutdown_timer_wq(cfqd); | ||
2112 | cfq_put_cfqd(cfqd); | ||
2113 | } | ||
2114 | |||
2115 | static int cfq_init_queue(request_queue_t *q, elevator_t *e) | ||
2116 | { | ||
2117 | struct cfq_data *cfqd; | ||
2118 | int i; | ||
2119 | |||
2120 | cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); | ||
2121 | if (!cfqd) | ||
2122 | return -ENOMEM; | ||
2123 | |||
2124 | memset(cfqd, 0, sizeof(*cfqd)); | ||
2125 | |||
2126 | for (i = 0; i < CFQ_PRIO_LISTS; i++) | ||
2127 | INIT_LIST_HEAD(&cfqd->rr_list[i]); | ||
2128 | |||
2129 | INIT_LIST_HEAD(&cfqd->busy_rr); | ||
2130 | INIT_LIST_HEAD(&cfqd->cur_rr); | ||
2131 | INIT_LIST_HEAD(&cfqd->idle_rr); | ||
2132 | INIT_LIST_HEAD(&cfqd->empty_list); | ||
2133 | |||
2134 | cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); | ||
2135 | if (!cfqd->crq_hash) | ||
2136 | goto out_crqhash; | ||
2137 | |||
2138 | cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL); | ||
2139 | if (!cfqd->cfq_hash) | ||
2140 | goto out_cfqhash; | ||
2141 | |||
2142 | cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool); | ||
2143 | if (!cfqd->crq_pool) | ||
2144 | goto out_crqpool; | ||
2145 | |||
2146 | for (i = 0; i < CFQ_MHASH_ENTRIES; i++) | ||
2147 | INIT_HLIST_HEAD(&cfqd->crq_hash[i]); | ||
2148 | for (i = 0; i < CFQ_QHASH_ENTRIES; i++) | ||
2149 | INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); | ||
2150 | |||
2151 | e->elevator_data = cfqd; | ||
2152 | |||
2153 | cfqd->queue = q; | ||
2154 | atomic_inc(&q->refcnt); | ||
2155 | |||
2156 | cfqd->max_queued = q->nr_requests / 4; | ||
2157 | q->nr_batching = cfq_queued; | ||
2158 | |||
2159 | init_timer(&cfqd->idle_slice_timer); | ||
2160 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; | ||
2161 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; | ||
2162 | |||
2163 | init_timer(&cfqd->idle_class_timer); | ||
2164 | cfqd->idle_class_timer.function = cfq_idle_class_timer; | ||
2165 | cfqd->idle_class_timer.data = (unsigned long) cfqd; | ||
2166 | |||
2167 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); | ||
2168 | |||
2169 | atomic_set(&cfqd->ref, 1); | ||
2170 | |||
2171 | cfqd->cfq_queued = cfq_queued; | ||
2172 | cfqd->cfq_quantum = cfq_quantum; | ||
2173 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | ||
2174 | cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; | ||
2175 | cfqd->cfq_back_max = cfq_back_max; | ||
2176 | cfqd->cfq_back_penalty = cfq_back_penalty; | ||
2177 | cfqd->cfq_slice[0] = cfq_slice_async; | ||
2178 | cfqd->cfq_slice[1] = cfq_slice_sync; | ||
2179 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | ||
2180 | cfqd->cfq_slice_idle = cfq_slice_idle; | ||
2181 | cfqd->cfq_max_depth = cfq_max_depth; | ||
2182 | |||
2183 | return 0; | ||
2184 | out_crqpool: | ||
2185 | kfree(cfqd->cfq_hash); | ||
2186 | out_cfqhash: | ||
2187 | kfree(cfqd->crq_hash); | ||
2188 | out_crqhash: | ||
2189 | kfree(cfqd); | ||
2190 | return -ENOMEM; | ||
2191 | } | ||
2192 | |||
2193 | static void cfq_slab_kill(void) | ||
2194 | { | ||
2195 | if (crq_pool) | ||
2196 | kmem_cache_destroy(crq_pool); | ||
2197 | if (cfq_pool) | ||
2198 | kmem_cache_destroy(cfq_pool); | ||
2199 | if (cfq_ioc_pool) | ||
2200 | kmem_cache_destroy(cfq_ioc_pool); | ||
2201 | } | ||
2202 | |||
2203 | static int __init cfq_slab_setup(void) | ||
2204 | { | ||
2205 | crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0, | ||
2206 | NULL, NULL); | ||
2207 | if (!crq_pool) | ||
2208 | goto fail; | ||
2209 | |||
2210 | cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0, | ||
2211 | NULL, NULL); | ||
2212 | if (!cfq_pool) | ||
2213 | goto fail; | ||
2214 | |||
2215 | cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool", | ||
2216 | sizeof(struct cfq_io_context), 0, 0, NULL, NULL); | ||
2217 | if (!cfq_ioc_pool) | ||
2218 | goto fail; | ||
2219 | |||
2220 | return 0; | ||
2221 | fail: | ||
2222 | cfq_slab_kill(); | ||
2223 | return -ENOMEM; | ||
2224 | } | ||
2225 | |||
2226 | /* | ||
2227 | * sysfs parts below --> | ||
2228 | */ | ||
2229 | struct cfq_fs_entry { | ||
2230 | struct attribute attr; | ||
2231 | ssize_t (*show)(struct cfq_data *, char *); | ||
2232 | ssize_t (*store)(struct cfq_data *, const char *, size_t); | ||
2233 | }; | ||
2234 | |||
2235 | static ssize_t | ||
2236 | cfq_var_show(unsigned int var, char *page) | ||
2237 | { | ||
2238 | return sprintf(page, "%d\n", var); | ||
2239 | } | ||
2240 | |||
2241 | static ssize_t | ||
2242 | cfq_var_store(unsigned int *var, const char *page, size_t count) | ||
2243 | { | ||
2244 | char *p = (char *) page; | ||
2245 | |||
2246 | *var = simple_strtoul(p, &p, 10); | ||
2247 | return count; | ||
2248 | } | ||
2249 | |||
2250 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | ||
2251 | static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ | ||
2252 | { \ | ||
2253 | unsigned int __data = __VAR; \ | ||
2254 | if (__CONV) \ | ||
2255 | __data = jiffies_to_msecs(__data); \ | ||
2256 | return cfq_var_show(__data, (page)); \ | ||
2257 | } | ||
2258 | SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); | ||
2259 | SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); | ||
2260 | SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); | ||
2261 | SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); | ||
2262 | SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); | ||
2263 | SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); | ||
2264 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | ||
2265 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | ||
2266 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | ||
2267 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | ||
2268 | SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); | ||
2269 | #undef SHOW_FUNCTION | ||
2270 | |||
2271 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | ||
2272 | static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \ | ||
2273 | { \ | ||
2274 | unsigned int __data; \ | ||
2275 | int ret = cfq_var_store(&__data, (page), count); \ | ||
2276 | if (__data < (MIN)) \ | ||
2277 | __data = (MIN); \ | ||
2278 | else if (__data > (MAX)) \ | ||
2279 | __data = (MAX); \ | ||
2280 | if (__CONV) \ | ||
2281 | *(__PTR) = msecs_to_jiffies(__data); \ | ||
2282 | else \ | ||
2283 | *(__PTR) = __data; \ | ||
2284 | return ret; \ | ||
2285 | } | ||
2286 | STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); | ||
2287 | STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); | ||
2288 | STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); | ||
2289 | STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); | ||
2290 | STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); | ||
2291 | STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); | ||
2292 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); | ||
2293 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | ||
2294 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | ||
2295 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); | ||
2296 | STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); | ||
2297 | #undef STORE_FUNCTION | ||
2298 | |||
2299 | static struct cfq_fs_entry cfq_quantum_entry = { | ||
2300 | .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR }, | ||
2301 | .show = cfq_quantum_show, | ||
2302 | .store = cfq_quantum_store, | ||
2303 | }; | ||
2304 | static struct cfq_fs_entry cfq_queued_entry = { | ||
2305 | .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR }, | ||
2306 | .show = cfq_queued_show, | ||
2307 | .store = cfq_queued_store, | ||
2308 | }; | ||
2309 | static struct cfq_fs_entry cfq_fifo_expire_sync_entry = { | ||
2310 | .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, | ||
2311 | .show = cfq_fifo_expire_sync_show, | ||
2312 | .store = cfq_fifo_expire_sync_store, | ||
2313 | }; | ||
2314 | static struct cfq_fs_entry cfq_fifo_expire_async_entry = { | ||
2315 | .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR }, | ||
2316 | .show = cfq_fifo_expire_async_show, | ||
2317 | .store = cfq_fifo_expire_async_store, | ||
2318 | }; | ||
2319 | static struct cfq_fs_entry cfq_back_max_entry = { | ||
2320 | .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, | ||
2321 | .show = cfq_back_max_show, | ||
2322 | .store = cfq_back_max_store, | ||
2323 | }; | ||
2324 | static struct cfq_fs_entry cfq_back_penalty_entry = { | ||
2325 | .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR }, | ||
2326 | .show = cfq_back_penalty_show, | ||
2327 | .store = cfq_back_penalty_store, | ||
2328 | }; | ||
2329 | static struct cfq_fs_entry cfq_slice_sync_entry = { | ||
2330 | .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR }, | ||
2331 | .show = cfq_slice_sync_show, | ||
2332 | .store = cfq_slice_sync_store, | ||
2333 | }; | ||
2334 | static struct cfq_fs_entry cfq_slice_async_entry = { | ||
2335 | .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR }, | ||
2336 | .show = cfq_slice_async_show, | ||
2337 | .store = cfq_slice_async_store, | ||
2338 | }; | ||
2339 | static struct cfq_fs_entry cfq_slice_async_rq_entry = { | ||
2340 | .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR }, | ||
2341 | .show = cfq_slice_async_rq_show, | ||
2342 | .store = cfq_slice_async_rq_store, | ||
2343 | }; | ||
2344 | static struct cfq_fs_entry cfq_slice_idle_entry = { | ||
2345 | .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR }, | ||
2346 | .show = cfq_slice_idle_show, | ||
2347 | .store = cfq_slice_idle_store, | ||
2348 | }; | ||
2349 | static struct cfq_fs_entry cfq_max_depth_entry = { | ||
2350 | .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR }, | ||
2351 | .show = cfq_max_depth_show, | ||
2352 | .store = cfq_max_depth_store, | ||
2353 | }; | ||
2354 | |||
2355 | static struct attribute *default_attrs[] = { | ||
2356 | &cfq_quantum_entry.attr, | ||
2357 | &cfq_queued_entry.attr, | ||
2358 | &cfq_fifo_expire_sync_entry.attr, | ||
2359 | &cfq_fifo_expire_async_entry.attr, | ||
2360 | &cfq_back_max_entry.attr, | ||
2361 | &cfq_back_penalty_entry.attr, | ||
2362 | &cfq_slice_sync_entry.attr, | ||
2363 | &cfq_slice_async_entry.attr, | ||
2364 | &cfq_slice_async_rq_entry.attr, | ||
2365 | &cfq_slice_idle_entry.attr, | ||
2366 | &cfq_max_depth_entry.attr, | ||
2367 | NULL, | ||
2368 | }; | ||
2369 | |||
2370 | #define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr) | ||
2371 | |||
2372 | static ssize_t | ||
2373 | cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
2374 | { | ||
2375 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
2376 | struct cfq_fs_entry *entry = to_cfq(attr); | ||
2377 | |||
2378 | if (!entry->show) | ||
2379 | return -EIO; | ||
2380 | |||
2381 | return entry->show(e->elevator_data, page); | ||
2382 | } | ||
2383 | |||
2384 | static ssize_t | ||
2385 | cfq_attr_store(struct kobject *kobj, struct attribute *attr, | ||
2386 | const char *page, size_t length) | ||
2387 | { | ||
2388 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
2389 | struct cfq_fs_entry *entry = to_cfq(attr); | ||
2390 | |||
2391 | if (!entry->store) | ||
2392 | return -EIO; | ||
2393 | |||
2394 | return entry->store(e->elevator_data, page, length); | ||
2395 | } | ||
2396 | |||
2397 | static struct sysfs_ops cfq_sysfs_ops = { | ||
2398 | .show = cfq_attr_show, | ||
2399 | .store = cfq_attr_store, | ||
2400 | }; | ||
2401 | |||
2402 | static struct kobj_type cfq_ktype = { | ||
2403 | .sysfs_ops = &cfq_sysfs_ops, | ||
2404 | .default_attrs = default_attrs, | ||
2405 | }; | ||
2406 | |||
2407 | static struct elevator_type iosched_cfq = { | ||
2408 | .ops = { | ||
2409 | .elevator_merge_fn = cfq_merge, | ||
2410 | .elevator_merged_fn = cfq_merged_request, | ||
2411 | .elevator_merge_req_fn = cfq_merged_requests, | ||
2412 | .elevator_dispatch_fn = cfq_dispatch_requests, | ||
2413 | .elevator_add_req_fn = cfq_insert_request, | ||
2414 | .elevator_activate_req_fn = cfq_activate_request, | ||
2415 | .elevator_deactivate_req_fn = cfq_deactivate_request, | ||
2416 | .elevator_queue_empty_fn = cfq_queue_empty, | ||
2417 | .elevator_completed_req_fn = cfq_completed_request, | ||
2418 | .elevator_former_req_fn = cfq_former_request, | ||
2419 | .elevator_latter_req_fn = cfq_latter_request, | ||
2420 | .elevator_set_req_fn = cfq_set_request, | ||
2421 | .elevator_put_req_fn = cfq_put_request, | ||
2422 | .elevator_may_queue_fn = cfq_may_queue, | ||
2423 | .elevator_init_fn = cfq_init_queue, | ||
2424 | .elevator_exit_fn = cfq_exit_queue, | ||
2425 | }, | ||
2426 | .elevator_ktype = &cfq_ktype, | ||
2427 | .elevator_name = "cfq", | ||
2428 | .elevator_owner = THIS_MODULE, | ||
2429 | }; | ||
2430 | |||
2431 | static int __init cfq_init(void) | ||
2432 | { | ||
2433 | int ret; | ||
2434 | |||
2435 | /* | ||
2436 | * could be 0 on HZ < 1000 setups | ||
2437 | */ | ||
2438 | if (!cfq_slice_async) | ||
2439 | cfq_slice_async = 1; | ||
2440 | if (!cfq_slice_idle) | ||
2441 | cfq_slice_idle = 1; | ||
2442 | |||
2443 | if (cfq_slab_setup()) | ||
2444 | return -ENOMEM; | ||
2445 | |||
2446 | ret = elv_register(&iosched_cfq); | ||
2447 | if (ret) | ||
2448 | cfq_slab_kill(); | ||
2449 | |||
2450 | return ret; | ||
2451 | } | ||
2452 | |||
2453 | static void __exit cfq_exit(void) | ||
2454 | { | ||
2455 | elv_unregister(&iosched_cfq); | ||
2456 | cfq_slab_kill(); | ||
2457 | } | ||
2458 | |||
2459 | module_init(cfq_init); | ||
2460 | module_exit(cfq_exit); | ||
2461 | |||
2462 | MODULE_AUTHOR("Jens Axboe"); | ||
2463 | MODULE_LICENSE("GPL"); | ||
2464 | MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); | ||