diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/block/deadline-iosched.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/block/deadline-iosched.c')
-rw-r--r-- | drivers/block/deadline-iosched.c | 967 |
1 files changed, 967 insertions, 0 deletions
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c new file mode 100644 index 000000000000..d63d34c671f7 --- /dev/null +++ b/drivers/block/deadline-iosched.c | |||
@@ -0,0 +1,967 @@ | |||
1 | /* | ||
2 | * linux/drivers/block/deadline-iosched.c | ||
3 | * | ||
4 | * Deadline i/o scheduler. | ||
5 | * | ||
6 | * Copyright (C) 2002 Jens Axboe <axboe@suse.de> | ||
7 | */ | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/fs.h> | ||
10 | #include <linux/blkdev.h> | ||
11 | #include <linux/elevator.h> | ||
12 | #include <linux/bio.h> | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/compiler.h> | ||
18 | #include <linux/hash.h> | ||
19 | #include <linux/rbtree.h> | ||
20 | |||
21 | /* | ||
22 | * See Documentation/block/deadline-iosched.txt | ||
23 | */ | ||
24 | static int read_expire = HZ / 2; /* max time before a read is submitted. */ | ||
25 | static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ | ||
26 | static int writes_starved = 2; /* max times reads can starve a write */ | ||
27 | static int fifo_batch = 16; /* # of sequential requests treated as one | ||
28 | by the above parameters. For throughput. */ | ||
29 | |||
30 | static const int deadline_hash_shift = 5; | ||
31 | #define DL_HASH_BLOCK(sec) ((sec) >> 3) | ||
32 | #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift)) | ||
33 | #define DL_HASH_ENTRIES (1 << deadline_hash_shift) | ||
34 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | ||
35 | #define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash) | ||
36 | #define ON_HASH(drq) (drq)->on_hash | ||
37 | |||
38 | struct deadline_data { | ||
39 | /* | ||
40 | * run time data | ||
41 | */ | ||
42 | |||
43 | /* | ||
44 | * requests (deadline_rq s) are present on both sort_list and fifo_list | ||
45 | */ | ||
46 | struct rb_root sort_list[2]; | ||
47 | struct list_head fifo_list[2]; | ||
48 | |||
49 | /* | ||
50 | * next in sort order. read, write or both are NULL | ||
51 | */ | ||
52 | struct deadline_rq *next_drq[2]; | ||
53 | struct list_head *dispatch; /* driver dispatch queue */ | ||
54 | struct list_head *hash; /* request hash */ | ||
55 | unsigned int batching; /* number of sequential requests made */ | ||
56 | sector_t last_sector; /* head position */ | ||
57 | unsigned int starved; /* times reads have starved writes */ | ||
58 | |||
59 | /* | ||
60 | * settings that change how the i/o scheduler behaves | ||
61 | */ | ||
62 | int fifo_expire[2]; | ||
63 | int fifo_batch; | ||
64 | int writes_starved; | ||
65 | int front_merges; | ||
66 | |||
67 | mempool_t *drq_pool; | ||
68 | }; | ||
69 | |||
70 | /* | ||
71 | * pre-request data. | ||
72 | */ | ||
73 | struct deadline_rq { | ||
74 | /* | ||
75 | * rbtree index, key is the starting offset | ||
76 | */ | ||
77 | struct rb_node rb_node; | ||
78 | sector_t rb_key; | ||
79 | |||
80 | struct request *request; | ||
81 | |||
82 | /* | ||
83 | * request hash, key is the ending offset (for back merge lookup) | ||
84 | */ | ||
85 | struct list_head hash; | ||
86 | char on_hash; | ||
87 | |||
88 | /* | ||
89 | * expire fifo | ||
90 | */ | ||
91 | struct list_head fifo; | ||
92 | unsigned long expires; | ||
93 | }; | ||
94 | |||
95 | static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq); | ||
96 | |||
97 | static kmem_cache_t *drq_pool; | ||
98 | |||
99 | #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private) | ||
100 | |||
101 | /* | ||
102 | * the back merge hash support functions | ||
103 | */ | ||
104 | static inline void __deadline_del_drq_hash(struct deadline_rq *drq) | ||
105 | { | ||
106 | drq->on_hash = 0; | ||
107 | list_del_init(&drq->hash); | ||
108 | } | ||
109 | |||
110 | static inline void deadline_del_drq_hash(struct deadline_rq *drq) | ||
111 | { | ||
112 | if (ON_HASH(drq)) | ||
113 | __deadline_del_drq_hash(drq); | ||
114 | } | ||
115 | |||
116 | static void | ||
117 | deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq) | ||
118 | { | ||
119 | deadline_del_drq_hash(drq); | ||
120 | |||
121 | if (q->last_merge == drq->request) | ||
122 | q->last_merge = NULL; | ||
123 | } | ||
124 | |||
125 | static inline void | ||
126 | deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) | ||
127 | { | ||
128 | struct request *rq = drq->request; | ||
129 | |||
130 | BUG_ON(ON_HASH(drq)); | ||
131 | |||
132 | drq->on_hash = 1; | ||
133 | list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * move hot entry to front of chain | ||
138 | */ | ||
139 | static inline void | ||
140 | deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) | ||
141 | { | ||
142 | struct request *rq = drq->request; | ||
143 | struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; | ||
144 | |||
145 | if (ON_HASH(drq) && drq->hash.prev != head) { | ||
146 | list_del(&drq->hash); | ||
147 | list_add(&drq->hash, head); | ||
148 | } | ||
149 | } | ||
150 | |||
151 | static struct request * | ||
152 | deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) | ||
153 | { | ||
154 | struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; | ||
155 | struct list_head *entry, *next = hash_list->next; | ||
156 | |||
157 | while ((entry = next) != hash_list) { | ||
158 | struct deadline_rq *drq = list_entry_hash(entry); | ||
159 | struct request *__rq = drq->request; | ||
160 | |||
161 | next = entry->next; | ||
162 | |||
163 | BUG_ON(!ON_HASH(drq)); | ||
164 | |||
165 | if (!rq_mergeable(__rq)) { | ||
166 | __deadline_del_drq_hash(drq); | ||
167 | continue; | ||
168 | } | ||
169 | |||
170 | if (rq_hash_key(__rq) == offset) | ||
171 | return __rq; | ||
172 | } | ||
173 | |||
174 | return NULL; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * rb tree support functions | ||
179 | */ | ||
180 | #define RB_NONE (2) | ||
181 | #define RB_EMPTY(root) ((root)->rb_node == NULL) | ||
182 | #define ON_RB(node) ((node)->rb_color != RB_NONE) | ||
183 | #define RB_CLEAR(node) ((node)->rb_color = RB_NONE) | ||
184 | #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) | ||
185 | #define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)]) | ||
186 | #define rq_rb_key(rq) (rq)->sector | ||
187 | |||
188 | static struct deadline_rq * | ||
189 | __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | ||
190 | { | ||
191 | struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node; | ||
192 | struct rb_node *parent = NULL; | ||
193 | struct deadline_rq *__drq; | ||
194 | |||
195 | while (*p) { | ||
196 | parent = *p; | ||
197 | __drq = rb_entry_drq(parent); | ||
198 | |||
199 | if (drq->rb_key < __drq->rb_key) | ||
200 | p = &(*p)->rb_left; | ||
201 | else if (drq->rb_key > __drq->rb_key) | ||
202 | p = &(*p)->rb_right; | ||
203 | else | ||
204 | return __drq; | ||
205 | } | ||
206 | |||
207 | rb_link_node(&drq->rb_node, parent, p); | ||
208 | return NULL; | ||
209 | } | ||
210 | |||
211 | static void | ||
212 | deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | ||
213 | { | ||
214 | struct deadline_rq *__alias; | ||
215 | |||
216 | drq->rb_key = rq_rb_key(drq->request); | ||
217 | |||
218 | retry: | ||
219 | __alias = __deadline_add_drq_rb(dd, drq); | ||
220 | if (!__alias) { | ||
221 | rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); | ||
222 | return; | ||
223 | } | ||
224 | |||
225 | deadline_move_request(dd, __alias); | ||
226 | goto retry; | ||
227 | } | ||
228 | |||
229 | static inline void | ||
230 | deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | ||
231 | { | ||
232 | const int data_dir = rq_data_dir(drq->request); | ||
233 | |||
234 | if (dd->next_drq[data_dir] == drq) { | ||
235 | struct rb_node *rbnext = rb_next(&drq->rb_node); | ||
236 | |||
237 | dd->next_drq[data_dir] = NULL; | ||
238 | if (rbnext) | ||
239 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); | ||
240 | } | ||
241 | |||
242 | if (ON_RB(&drq->rb_node)) { | ||
243 | rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); | ||
244 | RB_CLEAR(&drq->rb_node); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | static struct request * | ||
249 | deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir) | ||
250 | { | ||
251 | struct rb_node *n = dd->sort_list[data_dir].rb_node; | ||
252 | struct deadline_rq *drq; | ||
253 | |||
254 | while (n) { | ||
255 | drq = rb_entry_drq(n); | ||
256 | |||
257 | if (sector < drq->rb_key) | ||
258 | n = n->rb_left; | ||
259 | else if (sector > drq->rb_key) | ||
260 | n = n->rb_right; | ||
261 | else | ||
262 | return drq->request; | ||
263 | } | ||
264 | |||
265 | return NULL; | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * deadline_find_first_drq finds the first (lowest sector numbered) request | ||
270 | * for the specified data_dir. Used to sweep back to the start of the disk | ||
271 | * (1-way elevator) after we process the last (highest sector) request. | ||
272 | */ | ||
273 | static struct deadline_rq * | ||
274 | deadline_find_first_drq(struct deadline_data *dd, int data_dir) | ||
275 | { | ||
276 | struct rb_node *n = dd->sort_list[data_dir].rb_node; | ||
277 | |||
278 | for (;;) { | ||
279 | if (n->rb_left == NULL) | ||
280 | return rb_entry_drq(n); | ||
281 | |||
282 | n = n->rb_left; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * add drq to rbtree and fifo | ||
288 | */ | ||
289 | static inline void | ||
290 | deadline_add_request(struct request_queue *q, struct request *rq) | ||
291 | { | ||
292 | struct deadline_data *dd = q->elevator->elevator_data; | ||
293 | struct deadline_rq *drq = RQ_DATA(rq); | ||
294 | |||
295 | const int data_dir = rq_data_dir(drq->request); | ||
296 | |||
297 | deadline_add_drq_rb(dd, drq); | ||
298 | /* | ||
299 | * set expire time (only used for reads) and add to fifo list | ||
300 | */ | ||
301 | drq->expires = jiffies + dd->fifo_expire[data_dir]; | ||
302 | list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); | ||
303 | |||
304 | if (rq_mergeable(rq)) { | ||
305 | deadline_add_drq_hash(dd, drq); | ||
306 | |||
307 | if (!q->last_merge) | ||
308 | q->last_merge = rq; | ||
309 | } | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * remove rq from rbtree, fifo, and hash | ||
314 | */ | ||
315 | static void deadline_remove_request(request_queue_t *q, struct request *rq) | ||
316 | { | ||
317 | struct deadline_rq *drq = RQ_DATA(rq); | ||
318 | |||
319 | if (drq) { | ||
320 | struct deadline_data *dd = q->elevator->elevator_data; | ||
321 | |||
322 | list_del_init(&drq->fifo); | ||
323 | deadline_remove_merge_hints(q, drq); | ||
324 | deadline_del_drq_rb(dd, drq); | ||
325 | } | ||
326 | } | ||
327 | |||
328 | static int | ||
329 | deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) | ||
330 | { | ||
331 | struct deadline_data *dd = q->elevator->elevator_data; | ||
332 | struct request *__rq; | ||
333 | int ret; | ||
334 | |||
335 | /* | ||
336 | * try last_merge to avoid going to hash | ||
337 | */ | ||
338 | ret = elv_try_last_merge(q, bio); | ||
339 | if (ret != ELEVATOR_NO_MERGE) { | ||
340 | __rq = q->last_merge; | ||
341 | goto out_insert; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * see if the merge hash can satisfy a back merge | ||
346 | */ | ||
347 | __rq = deadline_find_drq_hash(dd, bio->bi_sector); | ||
348 | if (__rq) { | ||
349 | BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); | ||
350 | |||
351 | if (elv_rq_merge_ok(__rq, bio)) { | ||
352 | ret = ELEVATOR_BACK_MERGE; | ||
353 | goto out; | ||
354 | } | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * check for front merge | ||
359 | */ | ||
360 | if (dd->front_merges) { | ||
361 | sector_t rb_key = bio->bi_sector + bio_sectors(bio); | ||
362 | |||
363 | __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio)); | ||
364 | if (__rq) { | ||
365 | BUG_ON(rb_key != rq_rb_key(__rq)); | ||
366 | |||
367 | if (elv_rq_merge_ok(__rq, bio)) { | ||
368 | ret = ELEVATOR_FRONT_MERGE; | ||
369 | goto out; | ||
370 | } | ||
371 | } | ||
372 | } | ||
373 | |||
374 | return ELEVATOR_NO_MERGE; | ||
375 | out: | ||
376 | q->last_merge = __rq; | ||
377 | out_insert: | ||
378 | if (ret) | ||
379 | deadline_hot_drq_hash(dd, RQ_DATA(__rq)); | ||
380 | *req = __rq; | ||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | static void deadline_merged_request(request_queue_t *q, struct request *req) | ||
385 | { | ||
386 | struct deadline_data *dd = q->elevator->elevator_data; | ||
387 | struct deadline_rq *drq = RQ_DATA(req); | ||
388 | |||
389 | /* | ||
390 | * hash always needs to be repositioned, key is end sector | ||
391 | */ | ||
392 | deadline_del_drq_hash(drq); | ||
393 | deadline_add_drq_hash(dd, drq); | ||
394 | |||
395 | /* | ||
396 | * if the merge was a front merge, we need to reposition request | ||
397 | */ | ||
398 | if (rq_rb_key(req) != drq->rb_key) { | ||
399 | deadline_del_drq_rb(dd, drq); | ||
400 | deadline_add_drq_rb(dd, drq); | ||
401 | } | ||
402 | |||
403 | q->last_merge = req; | ||
404 | } | ||
405 | |||
406 | static void | ||
407 | deadline_merged_requests(request_queue_t *q, struct request *req, | ||
408 | struct request *next) | ||
409 | { | ||
410 | struct deadline_data *dd = q->elevator->elevator_data; | ||
411 | struct deadline_rq *drq = RQ_DATA(req); | ||
412 | struct deadline_rq *dnext = RQ_DATA(next); | ||
413 | |||
414 | BUG_ON(!drq); | ||
415 | BUG_ON(!dnext); | ||
416 | |||
417 | /* | ||
418 | * reposition drq (this is the merged request) in hash, and in rbtree | ||
419 | * in case of a front merge | ||
420 | */ | ||
421 | deadline_del_drq_hash(drq); | ||
422 | deadline_add_drq_hash(dd, drq); | ||
423 | |||
424 | if (rq_rb_key(req) != drq->rb_key) { | ||
425 | deadline_del_drq_rb(dd, drq); | ||
426 | deadline_add_drq_rb(dd, drq); | ||
427 | } | ||
428 | |||
429 | /* | ||
430 | * if dnext expires before drq, assign its expire time to drq | ||
431 | * and move into dnext position (dnext will be deleted) in fifo | ||
432 | */ | ||
433 | if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) { | ||
434 | if (time_before(dnext->expires, drq->expires)) { | ||
435 | list_move(&drq->fifo, &dnext->fifo); | ||
436 | drq->expires = dnext->expires; | ||
437 | } | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * kill knowledge of next, this one is a goner | ||
442 | */ | ||
443 | deadline_remove_request(q, next); | ||
444 | } | ||
445 | |||
446 | /* | ||
447 | * move request from sort list to dispatch queue. | ||
448 | */ | ||
449 | static inline void | ||
450 | deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq) | ||
451 | { | ||
452 | request_queue_t *q = drq->request->q; | ||
453 | |||
454 | deadline_remove_request(q, drq->request); | ||
455 | list_add_tail(&drq->request->queuelist, dd->dispatch); | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * move an entry to dispatch queue | ||
460 | */ | ||
461 | static void | ||
462 | deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq) | ||
463 | { | ||
464 | const int data_dir = rq_data_dir(drq->request); | ||
465 | struct rb_node *rbnext = rb_next(&drq->rb_node); | ||
466 | |||
467 | dd->next_drq[READ] = NULL; | ||
468 | dd->next_drq[WRITE] = NULL; | ||
469 | |||
470 | if (rbnext) | ||
471 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); | ||
472 | |||
473 | dd->last_sector = drq->request->sector + drq->request->nr_sectors; | ||
474 | |||
475 | /* | ||
476 | * take it off the sort and fifo list, move | ||
477 | * to dispatch queue | ||
478 | */ | ||
479 | deadline_move_to_dispatch(dd, drq); | ||
480 | } | ||
481 | |||
482 | #define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo) | ||
483 | |||
484 | /* | ||
485 | * deadline_check_fifo returns 0 if there are no expired reads on the fifo, | ||
486 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) | ||
487 | */ | ||
488 | static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | ||
489 | { | ||
490 | struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next); | ||
491 | |||
492 | /* | ||
493 | * drq is expired! | ||
494 | */ | ||
495 | if (time_after(jiffies, drq->expires)) | ||
496 | return 1; | ||
497 | |||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | /* | ||
502 | * deadline_dispatch_requests selects the best request according to | ||
503 | * read/write expire, fifo_batch, etc | ||
504 | */ | ||
505 | static int deadline_dispatch_requests(struct deadline_data *dd) | ||
506 | { | ||
507 | const int reads = !list_empty(&dd->fifo_list[READ]); | ||
508 | const int writes = !list_empty(&dd->fifo_list[WRITE]); | ||
509 | struct deadline_rq *drq; | ||
510 | int data_dir, other_dir; | ||
511 | |||
512 | /* | ||
513 | * batches are currently reads XOR writes | ||
514 | */ | ||
515 | drq = NULL; | ||
516 | |||
517 | if (dd->next_drq[READ]) | ||
518 | drq = dd->next_drq[READ]; | ||
519 | |||
520 | if (dd->next_drq[WRITE]) | ||
521 | drq = dd->next_drq[WRITE]; | ||
522 | |||
523 | if (drq) { | ||
524 | /* we have a "next request" */ | ||
525 | |||
526 | if (dd->last_sector != drq->request->sector) | ||
527 | /* end the batch on a non sequential request */ | ||
528 | dd->batching += dd->fifo_batch; | ||
529 | |||
530 | if (dd->batching < dd->fifo_batch) | ||
531 | /* we are still entitled to batch */ | ||
532 | goto dispatch_request; | ||
533 | } | ||
534 | |||
535 | /* | ||
536 | * at this point we are not running a batch. select the appropriate | ||
537 | * data direction (read / write) | ||
538 | */ | ||
539 | |||
540 | if (reads) { | ||
541 | BUG_ON(RB_EMPTY(&dd->sort_list[READ])); | ||
542 | |||
543 | if (writes && (dd->starved++ >= dd->writes_starved)) | ||
544 | goto dispatch_writes; | ||
545 | |||
546 | data_dir = READ; | ||
547 | other_dir = WRITE; | ||
548 | |||
549 | goto dispatch_find_request; | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * there are either no reads or writes have been starved | ||
554 | */ | ||
555 | |||
556 | if (writes) { | ||
557 | dispatch_writes: | ||
558 | BUG_ON(RB_EMPTY(&dd->sort_list[WRITE])); | ||
559 | |||
560 | dd->starved = 0; | ||
561 | |||
562 | data_dir = WRITE; | ||
563 | other_dir = READ; | ||
564 | |||
565 | goto dispatch_find_request; | ||
566 | } | ||
567 | |||
568 | return 0; | ||
569 | |||
570 | dispatch_find_request: | ||
571 | /* | ||
572 | * we are not running a batch, find best request for selected data_dir | ||
573 | */ | ||
574 | if (deadline_check_fifo(dd, data_dir)) { | ||
575 | /* An expired request exists - satisfy it */ | ||
576 | dd->batching = 0; | ||
577 | drq = list_entry_fifo(dd->fifo_list[data_dir].next); | ||
578 | |||
579 | } else if (dd->next_drq[data_dir]) { | ||
580 | /* | ||
581 | * The last req was the same dir and we have a next request in | ||
582 | * sort order. No expired requests so continue on from here. | ||
583 | */ | ||
584 | drq = dd->next_drq[data_dir]; | ||
585 | } else { | ||
586 | /* | ||
587 | * The last req was the other direction or we have run out of | ||
588 | * higher-sectored requests. Go back to the lowest sectored | ||
589 | * request (1 way elevator) and start a new batch. | ||
590 | */ | ||
591 | dd->batching = 0; | ||
592 | drq = deadline_find_first_drq(dd, data_dir); | ||
593 | } | ||
594 | |||
595 | dispatch_request: | ||
596 | /* | ||
597 | * drq is the selected appropriate request. | ||
598 | */ | ||
599 | dd->batching++; | ||
600 | deadline_move_request(dd, drq); | ||
601 | |||
602 | return 1; | ||
603 | } | ||
604 | |||
605 | static struct request *deadline_next_request(request_queue_t *q) | ||
606 | { | ||
607 | struct deadline_data *dd = q->elevator->elevator_data; | ||
608 | struct request *rq; | ||
609 | |||
610 | /* | ||
611 | * if there are still requests on the dispatch queue, grab the first one | ||
612 | */ | ||
613 | if (!list_empty(dd->dispatch)) { | ||
614 | dispatch: | ||
615 | rq = list_entry_rq(dd->dispatch->next); | ||
616 | return rq; | ||
617 | } | ||
618 | |||
619 | if (deadline_dispatch_requests(dd)) | ||
620 | goto dispatch; | ||
621 | |||
622 | return NULL; | ||
623 | } | ||
624 | |||
625 | static void | ||
626 | deadline_insert_request(request_queue_t *q, struct request *rq, int where) | ||
627 | { | ||
628 | struct deadline_data *dd = q->elevator->elevator_data; | ||
629 | |||
630 | /* barriers must flush the reorder queue */ | ||
631 | if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) | ||
632 | && where == ELEVATOR_INSERT_SORT)) | ||
633 | where = ELEVATOR_INSERT_BACK; | ||
634 | |||
635 | switch (where) { | ||
636 | case ELEVATOR_INSERT_BACK: | ||
637 | while (deadline_dispatch_requests(dd)) | ||
638 | ; | ||
639 | list_add_tail(&rq->queuelist, dd->dispatch); | ||
640 | break; | ||
641 | case ELEVATOR_INSERT_FRONT: | ||
642 | list_add(&rq->queuelist, dd->dispatch); | ||
643 | break; | ||
644 | case ELEVATOR_INSERT_SORT: | ||
645 | BUG_ON(!blk_fs_request(rq)); | ||
646 | deadline_add_request(q, rq); | ||
647 | break; | ||
648 | default: | ||
649 | printk("%s: bad insert point %d\n", __FUNCTION__,where); | ||
650 | return; | ||
651 | } | ||
652 | } | ||
653 | |||
654 | static int deadline_queue_empty(request_queue_t *q) | ||
655 | { | ||
656 | struct deadline_data *dd = q->elevator->elevator_data; | ||
657 | |||
658 | if (!list_empty(&dd->fifo_list[WRITE]) | ||
659 | || !list_empty(&dd->fifo_list[READ]) | ||
660 | || !list_empty(dd->dispatch)) | ||
661 | return 0; | ||
662 | |||
663 | return 1; | ||
664 | } | ||
665 | |||
666 | static struct request * | ||
667 | deadline_former_request(request_queue_t *q, struct request *rq) | ||
668 | { | ||
669 | struct deadline_rq *drq = RQ_DATA(rq); | ||
670 | struct rb_node *rbprev = rb_prev(&drq->rb_node); | ||
671 | |||
672 | if (rbprev) | ||
673 | return rb_entry_drq(rbprev)->request; | ||
674 | |||
675 | return NULL; | ||
676 | } | ||
677 | |||
678 | static struct request * | ||
679 | deadline_latter_request(request_queue_t *q, struct request *rq) | ||
680 | { | ||
681 | struct deadline_rq *drq = RQ_DATA(rq); | ||
682 | struct rb_node *rbnext = rb_next(&drq->rb_node); | ||
683 | |||
684 | if (rbnext) | ||
685 | return rb_entry_drq(rbnext)->request; | ||
686 | |||
687 | return NULL; | ||
688 | } | ||
689 | |||
690 | static void deadline_exit_queue(elevator_t *e) | ||
691 | { | ||
692 | struct deadline_data *dd = e->elevator_data; | ||
693 | |||
694 | BUG_ON(!list_empty(&dd->fifo_list[READ])); | ||
695 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); | ||
696 | |||
697 | mempool_destroy(dd->drq_pool); | ||
698 | kfree(dd->hash); | ||
699 | kfree(dd); | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * initialize elevator private data (deadline_data), and alloc a drq for | ||
704 | * each request on the free lists | ||
705 | */ | ||
706 | static int deadline_init_queue(request_queue_t *q, elevator_t *e) | ||
707 | { | ||
708 | struct deadline_data *dd; | ||
709 | int i; | ||
710 | |||
711 | if (!drq_pool) | ||
712 | return -ENOMEM; | ||
713 | |||
714 | dd = kmalloc(sizeof(*dd), GFP_KERNEL); | ||
715 | if (!dd) | ||
716 | return -ENOMEM; | ||
717 | memset(dd, 0, sizeof(*dd)); | ||
718 | |||
719 | dd->hash = kmalloc(sizeof(struct list_head)*DL_HASH_ENTRIES,GFP_KERNEL); | ||
720 | if (!dd->hash) { | ||
721 | kfree(dd); | ||
722 | return -ENOMEM; | ||
723 | } | ||
724 | |||
725 | dd->drq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, drq_pool); | ||
726 | if (!dd->drq_pool) { | ||
727 | kfree(dd->hash); | ||
728 | kfree(dd); | ||
729 | return -ENOMEM; | ||
730 | } | ||
731 | |||
732 | for (i = 0; i < DL_HASH_ENTRIES; i++) | ||
733 | INIT_LIST_HEAD(&dd->hash[i]); | ||
734 | |||
735 | INIT_LIST_HEAD(&dd->fifo_list[READ]); | ||
736 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | ||
737 | dd->sort_list[READ] = RB_ROOT; | ||
738 | dd->sort_list[WRITE] = RB_ROOT; | ||
739 | dd->dispatch = &q->queue_head; | ||
740 | dd->fifo_expire[READ] = read_expire; | ||
741 | dd->fifo_expire[WRITE] = write_expire; | ||
742 | dd->writes_starved = writes_starved; | ||
743 | dd->front_merges = 1; | ||
744 | dd->fifo_batch = fifo_batch; | ||
745 | e->elevator_data = dd; | ||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | static void deadline_put_request(request_queue_t *q, struct request *rq) | ||
750 | { | ||
751 | struct deadline_data *dd = q->elevator->elevator_data; | ||
752 | struct deadline_rq *drq = RQ_DATA(rq); | ||
753 | |||
754 | if (drq) { | ||
755 | mempool_free(drq, dd->drq_pool); | ||
756 | rq->elevator_private = NULL; | ||
757 | } | ||
758 | } | ||
759 | |||
760 | static int | ||
761 | deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask) | ||
762 | { | ||
763 | struct deadline_data *dd = q->elevator->elevator_data; | ||
764 | struct deadline_rq *drq; | ||
765 | |||
766 | drq = mempool_alloc(dd->drq_pool, gfp_mask); | ||
767 | if (drq) { | ||
768 | memset(drq, 0, sizeof(*drq)); | ||
769 | RB_CLEAR(&drq->rb_node); | ||
770 | drq->request = rq; | ||
771 | |||
772 | INIT_LIST_HEAD(&drq->hash); | ||
773 | drq->on_hash = 0; | ||
774 | |||
775 | INIT_LIST_HEAD(&drq->fifo); | ||
776 | |||
777 | rq->elevator_private = drq; | ||
778 | return 0; | ||
779 | } | ||
780 | |||
781 | return 1; | ||
782 | } | ||
783 | |||
784 | /* | ||
785 | * sysfs parts below | ||
786 | */ | ||
787 | struct deadline_fs_entry { | ||
788 | struct attribute attr; | ||
789 | ssize_t (*show)(struct deadline_data *, char *); | ||
790 | ssize_t (*store)(struct deadline_data *, const char *, size_t); | ||
791 | }; | ||
792 | |||
793 | static ssize_t | ||
794 | deadline_var_show(int var, char *page) | ||
795 | { | ||
796 | return sprintf(page, "%d\n", var); | ||
797 | } | ||
798 | |||
799 | static ssize_t | ||
800 | deadline_var_store(int *var, const char *page, size_t count) | ||
801 | { | ||
802 | char *p = (char *) page; | ||
803 | |||
804 | *var = simple_strtol(p, &p, 10); | ||
805 | return count; | ||
806 | } | ||
807 | |||
808 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | ||
809 | static ssize_t __FUNC(struct deadline_data *dd, char *page) \ | ||
810 | { \ | ||
811 | int __data = __VAR; \ | ||
812 | if (__CONV) \ | ||
813 | __data = jiffies_to_msecs(__data); \ | ||
814 | return deadline_var_show(__data, (page)); \ | ||
815 | } | ||
816 | SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1); | ||
817 | SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1); | ||
818 | SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0); | ||
819 | SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0); | ||
820 | SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0); | ||
821 | #undef SHOW_FUNCTION | ||
822 | |||
823 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | ||
824 | static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \ | ||
825 | { \ | ||
826 | int __data; \ | ||
827 | int ret = deadline_var_store(&__data, (page), count); \ | ||
828 | if (__data < (MIN)) \ | ||
829 | __data = (MIN); \ | ||
830 | else if (__data > (MAX)) \ | ||
831 | __data = (MAX); \ | ||
832 | if (__CONV) \ | ||
833 | *(__PTR) = msecs_to_jiffies(__data); \ | ||
834 | else \ | ||
835 | *(__PTR) = __data; \ | ||
836 | return ret; \ | ||
837 | } | ||
838 | STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); | ||
839 | STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | ||
840 | STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | ||
841 | STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0); | ||
842 | STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0); | ||
843 | #undef STORE_FUNCTION | ||
844 | |||
845 | static struct deadline_fs_entry deadline_readexpire_entry = { | ||
846 | .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR }, | ||
847 | .show = deadline_readexpire_show, | ||
848 | .store = deadline_readexpire_store, | ||
849 | }; | ||
850 | static struct deadline_fs_entry deadline_writeexpire_entry = { | ||
851 | .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR }, | ||
852 | .show = deadline_writeexpire_show, | ||
853 | .store = deadline_writeexpire_store, | ||
854 | }; | ||
855 | static struct deadline_fs_entry deadline_writesstarved_entry = { | ||
856 | .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR }, | ||
857 | .show = deadline_writesstarved_show, | ||
858 | .store = deadline_writesstarved_store, | ||
859 | }; | ||
860 | static struct deadline_fs_entry deadline_frontmerges_entry = { | ||
861 | .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR }, | ||
862 | .show = deadline_frontmerges_show, | ||
863 | .store = deadline_frontmerges_store, | ||
864 | }; | ||
865 | static struct deadline_fs_entry deadline_fifobatch_entry = { | ||
866 | .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR }, | ||
867 | .show = deadline_fifobatch_show, | ||
868 | .store = deadline_fifobatch_store, | ||
869 | }; | ||
870 | |||
871 | static struct attribute *default_attrs[] = { | ||
872 | &deadline_readexpire_entry.attr, | ||
873 | &deadline_writeexpire_entry.attr, | ||
874 | &deadline_writesstarved_entry.attr, | ||
875 | &deadline_frontmerges_entry.attr, | ||
876 | &deadline_fifobatch_entry.attr, | ||
877 | NULL, | ||
878 | }; | ||
879 | |||
880 | #define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr) | ||
881 | |||
882 | static ssize_t | ||
883 | deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
884 | { | ||
885 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
886 | struct deadline_fs_entry *entry = to_deadline(attr); | ||
887 | |||
888 | if (!entry->show) | ||
889 | return 0; | ||
890 | |||
891 | return entry->show(e->elevator_data, page); | ||
892 | } | ||
893 | |||
894 | static ssize_t | ||
895 | deadline_attr_store(struct kobject *kobj, struct attribute *attr, | ||
896 | const char *page, size_t length) | ||
897 | { | ||
898 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
899 | struct deadline_fs_entry *entry = to_deadline(attr); | ||
900 | |||
901 | if (!entry->store) | ||
902 | return -EINVAL; | ||
903 | |||
904 | return entry->store(e->elevator_data, page, length); | ||
905 | } | ||
906 | |||
907 | static struct sysfs_ops deadline_sysfs_ops = { | ||
908 | .show = deadline_attr_show, | ||
909 | .store = deadline_attr_store, | ||
910 | }; | ||
911 | |||
912 | static struct kobj_type deadline_ktype = { | ||
913 | .sysfs_ops = &deadline_sysfs_ops, | ||
914 | .default_attrs = default_attrs, | ||
915 | }; | ||
916 | |||
917 | static struct elevator_type iosched_deadline = { | ||
918 | .ops = { | ||
919 | .elevator_merge_fn = deadline_merge, | ||
920 | .elevator_merged_fn = deadline_merged_request, | ||
921 | .elevator_merge_req_fn = deadline_merged_requests, | ||
922 | .elevator_next_req_fn = deadline_next_request, | ||
923 | .elevator_add_req_fn = deadline_insert_request, | ||
924 | .elevator_remove_req_fn = deadline_remove_request, | ||
925 | .elevator_queue_empty_fn = deadline_queue_empty, | ||
926 | .elevator_former_req_fn = deadline_former_request, | ||
927 | .elevator_latter_req_fn = deadline_latter_request, | ||
928 | .elevator_set_req_fn = deadline_set_request, | ||
929 | .elevator_put_req_fn = deadline_put_request, | ||
930 | .elevator_init_fn = deadline_init_queue, | ||
931 | .elevator_exit_fn = deadline_exit_queue, | ||
932 | }, | ||
933 | |||
934 | .elevator_ktype = &deadline_ktype, | ||
935 | .elevator_name = "deadline", | ||
936 | .elevator_owner = THIS_MODULE, | ||
937 | }; | ||
938 | |||
939 | static int __init deadline_init(void) | ||
940 | { | ||
941 | int ret; | ||
942 | |||
943 | drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq), | ||
944 | 0, 0, NULL, NULL); | ||
945 | |||
946 | if (!drq_pool) | ||
947 | return -ENOMEM; | ||
948 | |||
949 | ret = elv_register(&iosched_deadline); | ||
950 | if (ret) | ||
951 | kmem_cache_destroy(drq_pool); | ||
952 | |||
953 | return ret; | ||
954 | } | ||
955 | |||
956 | static void __exit deadline_exit(void) | ||
957 | { | ||
958 | kmem_cache_destroy(drq_pool); | ||
959 | elv_unregister(&iosched_deadline); | ||
960 | } | ||
961 | |||
962 | module_init(deadline_init); | ||
963 | module_exit(deadline_exit); | ||
964 | |||
965 | MODULE_AUTHOR("Jens Axboe"); | ||
966 | MODULE_LICENSE("GPL"); | ||
967 | MODULE_DESCRIPTION("deadline IO scheduler"); | ||