aboutsummaryrefslogtreecommitdiffstats
path: root/block/deadline-iosched.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-10-02 08:45:08 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-10-02 08:45:08 -0400
commit59458f40e25915a355d8b1d701425fe9f4f9ea23 (patch)
treef1c9a2934df686e36d75f759ab7313b6f0e0e5f9 /block/deadline-iosched.c
parent825f9075d74028d11d7f5932f04e1b5db3022b51 (diff)
parentd834c16516d1ebec4766fc58c059bf01311e6045 (diff)
Merge branch 'master' into gfs2
Diffstat (limited to 'block/deadline-iosched.c')
-rw-r--r--block/deadline-iosched.c464
1 files changed, 74 insertions, 390 deletions
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c7ca9f0b6498..b7c5b34cb7b4 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Deadline i/o scheduler. 2 * Deadline i/o scheduler.
3 * 3 *
4 * Copyright (C) 2002 Jens Axboe <axboe@suse.de> 4 * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
5 */ 5 */
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/fs.h> 7#include <linux/fs.h>
@@ -12,7 +12,6 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/compiler.h> 14#include <linux/compiler.h>
15#include <linux/hash.h>
16#include <linux/rbtree.h> 15#include <linux/rbtree.h>
17 16
18/* 17/*
@@ -24,13 +23,6 @@ static const int writes_starved = 2; /* max times reads can starve a write */
24static const int fifo_batch = 16; /* # of sequential requests treated as one 23static const int fifo_batch = 16; /* # of sequential requests treated as one
25 by the above parameters. For throughput. */ 24 by the above parameters. For throughput. */
26 25
27static const int deadline_hash_shift = 5;
28#define DL_HASH_BLOCK(sec) ((sec) >> 3)
29#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
30#define DL_HASH_ENTRIES (1 << deadline_hash_shift)
31#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
32#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash))
33
34struct deadline_data { 26struct deadline_data {
35 /* 27 /*
36 * run time data 28 * run time data
@@ -45,8 +37,7 @@ struct deadline_data {
45 /* 37 /*
46 * next in sort order. read, write or both are NULL 38 * next in sort order. read, write or both are NULL
47 */ 39 */
48 struct deadline_rq *next_drq[2]; 40 struct request *next_rq[2];
49 struct hlist_head *hash; /* request hash */
50 unsigned int batching; /* number of sequential requests made */ 41 unsigned int batching; /* number of sequential requests made */
51 sector_t last_sector; /* head position */ 42 sector_t last_sector; /* head position */
52 unsigned int starved; /* times reads have starved writes */ 43 unsigned int starved; /* times reads have starved writes */
@@ -58,240 +49,69 @@ struct deadline_data {
58 int fifo_batch; 49 int fifo_batch;
59 int writes_starved; 50 int writes_starved;
60 int front_merges; 51 int front_merges;
61
62 mempool_t *drq_pool;
63}; 52};
64 53
65/* 54static void deadline_move_request(struct deadline_data *, struct request *);
66 * pre-request data.
67 */
68struct deadline_rq {
69 /*
70 * rbtree index, key is the starting offset
71 */
72 struct rb_node rb_node;
73 sector_t rb_key;
74
75 struct request *request;
76
77 /*
78 * request hash, key is the ending offset (for back merge lookup)
79 */
80 struct hlist_node hash;
81
82 /*
83 * expire fifo
84 */
85 struct list_head fifo;
86 unsigned long expires;
87};
88
89static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
90
91static kmem_cache_t *drq_pool;
92
93#define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
94 55
95/* 56#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))])
96 * the back merge hash support functions
97 */
98static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
99{
100 hlist_del_init(&drq->hash);
101}
102
103static inline void deadline_del_drq_hash(struct deadline_rq *drq)
104{
105 if (ON_HASH(drq))
106 __deadline_del_drq_hash(drq);
107}
108
109static inline void
110deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
111{
112 struct request *rq = drq->request;
113
114 BUG_ON(ON_HASH(drq));
115
116 hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
117}
118
119/*
120 * move hot entry to front of chain
121 */
122static inline void
123deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
124{
125 struct request *rq = drq->request;
126 struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
127
128 if (ON_HASH(drq) && &drq->hash != head->first) {
129 hlist_del(&drq->hash);
130 hlist_add_head(&drq->hash, head);
131 }
132}
133
134static struct request *
135deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
136{
137 struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
138 struct hlist_node *entry, *next;
139 struct deadline_rq *drq;
140
141 hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
142 struct request *__rq = drq->request;
143
144 BUG_ON(!ON_HASH(drq));
145
146 if (!rq_mergeable(__rq)) {
147 __deadline_del_drq_hash(drq);
148 continue;
149 }
150
151 if (rq_hash_key(__rq) == offset)
152 return __rq;
153 }
154
155 return NULL;
156}
157
158/*
159 * rb tree support functions
160 */
161#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
162#define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
163#define rq_rb_key(rq) (rq)->sector
164
165static struct deadline_rq *
166__deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
167{
168 struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
169 struct rb_node *parent = NULL;
170 struct deadline_rq *__drq;
171
172 while (*p) {
173 parent = *p;
174 __drq = rb_entry_drq(parent);
175
176 if (drq->rb_key < __drq->rb_key)
177 p = &(*p)->rb_left;
178 else if (drq->rb_key > __drq->rb_key)
179 p = &(*p)->rb_right;
180 else
181 return __drq;
182 }
183
184 rb_link_node(&drq->rb_node, parent, p);
185 return NULL;
186}
187 57
188static void 58static void
189deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) 59deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
190{ 60{
191 struct deadline_rq *__alias; 61 struct rb_root *root = RQ_RB_ROOT(dd, rq);
192 62 struct request *__alias;
193 drq->rb_key = rq_rb_key(drq->request);
194 63
195retry: 64retry:
196 __alias = __deadline_add_drq_rb(dd, drq); 65 __alias = elv_rb_add(root, rq);
197 if (!__alias) { 66 if (unlikely(__alias)) {
198 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); 67 deadline_move_request(dd, __alias);
199 return; 68 goto retry;
200 } 69 }
201
202 deadline_move_request(dd, __alias);
203 goto retry;
204} 70}
205 71
206static inline void 72static inline void
207deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) 73deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
208{ 74{
209 const int data_dir = rq_data_dir(drq->request); 75 const int data_dir = rq_data_dir(rq);
210 76
211 if (dd->next_drq[data_dir] == drq) { 77 if (dd->next_rq[data_dir] == rq) {
212 struct rb_node *rbnext = rb_next(&drq->rb_node); 78 struct rb_node *rbnext = rb_next(&rq->rb_node);
213 79
214 dd->next_drq[data_dir] = NULL; 80 dd->next_rq[data_dir] = NULL;
215 if (rbnext) 81 if (rbnext)
216 dd->next_drq[data_dir] = rb_entry_drq(rbnext); 82 dd->next_rq[data_dir] = rb_entry_rq(rbnext);
217 }
218
219 BUG_ON(!RB_EMPTY_NODE(&drq->rb_node));
220 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
221 RB_CLEAR_NODE(&drq->rb_node);
222}
223
224static struct request *
225deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
226{
227 struct rb_node *n = dd->sort_list[data_dir].rb_node;
228 struct deadline_rq *drq;
229
230 while (n) {
231 drq = rb_entry_drq(n);
232
233 if (sector < drq->rb_key)
234 n = n->rb_left;
235 else if (sector > drq->rb_key)
236 n = n->rb_right;
237 else
238 return drq->request;
239 } 83 }
240 84
241 return NULL; 85 elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
242} 86}
243 87
244/* 88/*
245 * deadline_find_first_drq finds the first (lowest sector numbered) request 89 * add rq to rbtree and fifo
246 * for the specified data_dir. Used to sweep back to the start of the disk
247 * (1-way elevator) after we process the last (highest sector) request.
248 */
249static struct deadline_rq *
250deadline_find_first_drq(struct deadline_data *dd, int data_dir)
251{
252 struct rb_node *n = dd->sort_list[data_dir].rb_node;
253
254 for (;;) {
255 if (n->rb_left == NULL)
256 return rb_entry_drq(n);
257
258 n = n->rb_left;
259 }
260}
261
262/*
263 * add drq to rbtree and fifo
264 */ 90 */
265static void 91static void
266deadline_add_request(struct request_queue *q, struct request *rq) 92deadline_add_request(struct request_queue *q, struct request *rq)
267{ 93{
268 struct deadline_data *dd = q->elevator->elevator_data; 94 struct deadline_data *dd = q->elevator->elevator_data;
269 struct deadline_rq *drq = RQ_DATA(rq); 95 const int data_dir = rq_data_dir(rq);
270 96
271 const int data_dir = rq_data_dir(drq->request); 97 deadline_add_rq_rb(dd, rq);
272 98
273 deadline_add_drq_rb(dd, drq);
274 /* 99 /*
275 * set expire time (only used for reads) and add to fifo list 100 * set expire time (only used for reads) and add to fifo list
276 */ 101 */
277 drq->expires = jiffies + dd->fifo_expire[data_dir]; 102 rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
278 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); 103 list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
279
280 if (rq_mergeable(rq))
281 deadline_add_drq_hash(dd, drq);
282} 104}
283 105
284/* 106/*
285 * remove rq from rbtree, fifo, and hash 107 * remove rq from rbtree and fifo.
286 */ 108 */
287static void deadline_remove_request(request_queue_t *q, struct request *rq) 109static void deadline_remove_request(request_queue_t *q, struct request *rq)
288{ 110{
289 struct deadline_rq *drq = RQ_DATA(rq);
290 struct deadline_data *dd = q->elevator->elevator_data; 111 struct deadline_data *dd = q->elevator->elevator_data;
291 112
292 list_del_init(&drq->fifo); 113 rq_fifo_clear(rq);
293 deadline_del_drq_rb(dd, drq); 114 deadline_del_rq_rb(dd, rq);
294 deadline_del_drq_hash(drq);
295} 115}
296 116
297static int 117static int
@@ -302,27 +122,14 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
302 int ret; 122 int ret;
303 123
304 /* 124 /*
305 * see if the merge hash can satisfy a back merge
306 */
307 __rq = deadline_find_drq_hash(dd, bio->bi_sector);
308 if (__rq) {
309 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
310
311 if (elv_rq_merge_ok(__rq, bio)) {
312 ret = ELEVATOR_BACK_MERGE;
313 goto out;
314 }
315 }
316
317 /*
318 * check for front merge 125 * check for front merge
319 */ 126 */
320 if (dd->front_merges) { 127 if (dd->front_merges) {
321 sector_t rb_key = bio->bi_sector + bio_sectors(bio); 128 sector_t sector = bio->bi_sector + bio_sectors(bio);
322 129
323 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio)); 130 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
324 if (__rq) { 131 if (__rq) {
325 BUG_ON(rb_key != rq_rb_key(__rq)); 132 BUG_ON(sector != __rq->sector);
326 133
327 if (elv_rq_merge_ok(__rq, bio)) { 134 if (elv_rq_merge_ok(__rq, bio)) {
328 ret = ELEVATOR_FRONT_MERGE; 135 ret = ELEVATOR_FRONT_MERGE;
@@ -333,29 +140,21 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
333 140
334 return ELEVATOR_NO_MERGE; 141 return ELEVATOR_NO_MERGE;
335out: 142out:
336 if (ret)
337 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
338 *req = __rq; 143 *req = __rq;
339 return ret; 144 return ret;
340} 145}
341 146
342static void deadline_merged_request(request_queue_t *q, struct request *req) 147static void deadline_merged_request(request_queue_t *q, struct request *req,
148 int type)
343{ 149{
344 struct deadline_data *dd = q->elevator->elevator_data; 150 struct deadline_data *dd = q->elevator->elevator_data;
345 struct deadline_rq *drq = RQ_DATA(req);
346
347 /*
348 * hash always needs to be repositioned, key is end sector
349 */
350 deadline_del_drq_hash(drq);
351 deadline_add_drq_hash(dd, drq);
352 151
353 /* 152 /*
354 * if the merge was a front merge, we need to reposition request 153 * if the merge was a front merge, we need to reposition request
355 */ 154 */
356 if (rq_rb_key(req) != drq->rb_key) { 155 if (type == ELEVATOR_FRONT_MERGE) {
357 deadline_del_drq_rb(dd, drq); 156 elv_rb_del(RQ_RB_ROOT(dd, req), req);
358 deadline_add_drq_rb(dd, drq); 157 deadline_add_rq_rb(dd, req);
359 } 158 }
360} 159}
361 160
@@ -363,33 +162,14 @@ static void
363deadline_merged_requests(request_queue_t *q, struct request *req, 162deadline_merged_requests(request_queue_t *q, struct request *req,
364 struct request *next) 163 struct request *next)
365{ 164{
366 struct deadline_data *dd = q->elevator->elevator_data;
367 struct deadline_rq *drq = RQ_DATA(req);
368 struct deadline_rq *dnext = RQ_DATA(next);
369
370 BUG_ON(!drq);
371 BUG_ON(!dnext);
372
373 /* 165 /*
374 * reposition drq (this is the merged request) in hash, and in rbtree 166 * if next expires before rq, assign its expire time to rq
375 * in case of a front merge 167 * and move into next position (next will be deleted) in fifo
376 */ 168 */
377 deadline_del_drq_hash(drq); 169 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
378 deadline_add_drq_hash(dd, drq); 170 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
379 171 list_move(&req->queuelist, &next->queuelist);
380 if (rq_rb_key(req) != drq->rb_key) { 172 rq_set_fifo_time(req, rq_fifo_time(next));
381 deadline_del_drq_rb(dd, drq);
382 deadline_add_drq_rb(dd, drq);
383 }
384
385 /*
386 * if dnext expires before drq, assign its expire time to drq
387 * and move into dnext position (dnext will be deleted) in fifo
388 */
389 if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
390 if (time_before(dnext->expires, drq->expires)) {
391 list_move(&drq->fifo, &dnext->fifo);
392 drq->expires = dnext->expires;
393 } 173 }
394 } 174 }
395 175
@@ -403,52 +183,50 @@ deadline_merged_requests(request_queue_t *q, struct request *req,
403 * move request from sort list to dispatch queue. 183 * move request from sort list to dispatch queue.
404 */ 184 */
405static inline void 185static inline void
406deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq) 186deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
407{ 187{
408 request_queue_t *q = drq->request->q; 188 request_queue_t *q = rq->q;
409 189
410 deadline_remove_request(q, drq->request); 190 deadline_remove_request(q, rq);
411 elv_dispatch_add_tail(q, drq->request); 191 elv_dispatch_add_tail(q, rq);
412} 192}
413 193
414/* 194/*
415 * move an entry to dispatch queue 195 * move an entry to dispatch queue
416 */ 196 */
417static void 197static void
418deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq) 198deadline_move_request(struct deadline_data *dd, struct request *rq)
419{ 199{
420 const int data_dir = rq_data_dir(drq->request); 200 const int data_dir = rq_data_dir(rq);
421 struct rb_node *rbnext = rb_next(&drq->rb_node); 201 struct rb_node *rbnext = rb_next(&rq->rb_node);
422 202
423 dd->next_drq[READ] = NULL; 203 dd->next_rq[READ] = NULL;
424 dd->next_drq[WRITE] = NULL; 204 dd->next_rq[WRITE] = NULL;
425 205
426 if (rbnext) 206 if (rbnext)
427 dd->next_drq[data_dir] = rb_entry_drq(rbnext); 207 dd->next_rq[data_dir] = rb_entry_rq(rbnext);
428 208
429 dd->last_sector = drq->request->sector + drq->request->nr_sectors; 209 dd->last_sector = rq->sector + rq->nr_sectors;
430 210
431 /* 211 /*
432 * take it off the sort and fifo list, move 212 * take it off the sort and fifo list, move
433 * to dispatch queue 213 * to dispatch queue
434 */ 214 */
435 deadline_move_to_dispatch(dd, drq); 215 deadline_move_to_dispatch(dd, rq);
436} 216}
437 217
438#define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo)
439
440/* 218/*
441 * deadline_check_fifo returns 0 if there are no expired reads on the fifo, 219 * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
442 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) 220 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
443 */ 221 */
444static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) 222static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
445{ 223{
446 struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next); 224 struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
447 225
448 /* 226 /*
449 * drq is expired! 227 * rq is expired!
450 */ 228 */
451 if (time_after(jiffies, drq->expires)) 229 if (time_after(jiffies, rq_fifo_time(rq)))
452 return 1; 230 return 1;
453 231
454 return 0; 232 return 0;
@@ -463,21 +241,21 @@ static int deadline_dispatch_requests(request_queue_t *q, int force)
463 struct deadline_data *dd = q->elevator->elevator_data; 241 struct deadline_data *dd = q->elevator->elevator_data;
464 const int reads = !list_empty(&dd->fifo_list[READ]); 242 const int reads = !list_empty(&dd->fifo_list[READ]);
465 const int writes = !list_empty(&dd->fifo_list[WRITE]); 243 const int writes = !list_empty(&dd->fifo_list[WRITE]);
466 struct deadline_rq *drq; 244 struct request *rq;
467 int data_dir; 245 int data_dir;
468 246
469 /* 247 /*
470 * batches are currently reads XOR writes 248 * batches are currently reads XOR writes
471 */ 249 */
472 if (dd->next_drq[WRITE]) 250 if (dd->next_rq[WRITE])
473 drq = dd->next_drq[WRITE]; 251 rq = dd->next_rq[WRITE];
474 else 252 else
475 drq = dd->next_drq[READ]; 253 rq = dd->next_rq[READ];
476 254
477 if (drq) { 255 if (rq) {
478 /* we have a "next request" */ 256 /* we have a "next request" */
479 257
480 if (dd->last_sector != drq->request->sector) 258 if (dd->last_sector != rq->sector)
481 /* end the batch on a non sequential request */ 259 /* end the batch on a non sequential request */
482 dd->batching += dd->fifo_batch; 260 dd->batching += dd->fifo_batch;
483 261
@@ -526,30 +304,33 @@ dispatch_find_request:
526 if (deadline_check_fifo(dd, data_dir)) { 304 if (deadline_check_fifo(dd, data_dir)) {
527 /* An expired request exists - satisfy it */ 305 /* An expired request exists - satisfy it */
528 dd->batching = 0; 306 dd->batching = 0;
529 drq = list_entry_fifo(dd->fifo_list[data_dir].next); 307 rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
530 308
531 } else if (dd->next_drq[data_dir]) { 309 } else if (dd->next_rq[data_dir]) {
532 /* 310 /*
533 * The last req was the same dir and we have a next request in 311 * The last req was the same dir and we have a next request in
534 * sort order. No expired requests so continue on from here. 312 * sort order. No expired requests so continue on from here.
535 */ 313 */
536 drq = dd->next_drq[data_dir]; 314 rq = dd->next_rq[data_dir];
537 } else { 315 } else {
316 struct rb_node *node;
538 /* 317 /*
539 * The last req was the other direction or we have run out of 318 * The last req was the other direction or we have run out of
540 * higher-sectored requests. Go back to the lowest sectored 319 * higher-sectored requests. Go back to the lowest sectored
541 * request (1 way elevator) and start a new batch. 320 * request (1 way elevator) and start a new batch.
542 */ 321 */
543 dd->batching = 0; 322 dd->batching = 0;
544 drq = deadline_find_first_drq(dd, data_dir); 323 node = rb_first(&dd->sort_list[data_dir]);
324 if (node)
325 rq = rb_entry_rq(node);
545 } 326 }
546 327
547dispatch_request: 328dispatch_request:
548 /* 329 /*
549 * drq is the selected appropriate request. 330 * rq is the selected appropriate request.
550 */ 331 */
551 dd->batching++; 332 dd->batching++;
552 deadline_move_request(dd, drq); 333 deadline_move_request(dd, rq);
553 334
554 return 1; 335 return 1;
555} 336}
@@ -562,30 +343,6 @@ static int deadline_queue_empty(request_queue_t *q)
562 && list_empty(&dd->fifo_list[READ]); 343 && list_empty(&dd->fifo_list[READ]);
563} 344}
564 345
565static struct request *
566deadline_former_request(request_queue_t *q, struct request *rq)
567{
568 struct deadline_rq *drq = RQ_DATA(rq);
569 struct rb_node *rbprev = rb_prev(&drq->rb_node);
570
571 if (rbprev)
572 return rb_entry_drq(rbprev)->request;
573
574 return NULL;
575}
576
577static struct request *
578deadline_latter_request(request_queue_t *q, struct request *rq)
579{
580 struct deadline_rq *drq = RQ_DATA(rq);
581 struct rb_node *rbnext = rb_next(&drq->rb_node);
582
583 if (rbnext)
584 return rb_entry_drq(rbnext)->request;
585
586 return NULL;
587}
588
589static void deadline_exit_queue(elevator_t *e) 346static void deadline_exit_queue(elevator_t *e)
590{ 347{
591 struct deadline_data *dd = e->elevator_data; 348 struct deadline_data *dd = e->elevator_data;
@@ -593,46 +350,21 @@ static void deadline_exit_queue(elevator_t *e)
593 BUG_ON(!list_empty(&dd->fifo_list[READ])); 350 BUG_ON(!list_empty(&dd->fifo_list[READ]));
594 BUG_ON(!list_empty(&dd->fifo_list[WRITE])); 351 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
595 352
596 mempool_destroy(dd->drq_pool);
597 kfree(dd->hash);
598 kfree(dd); 353 kfree(dd);
599} 354}
600 355
601/* 356/*
602 * initialize elevator private data (deadline_data), and alloc a drq for 357 * initialize elevator private data (deadline_data).
603 * each request on the free lists
604 */ 358 */
605static void *deadline_init_queue(request_queue_t *q, elevator_t *e) 359static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
606{ 360{
607 struct deadline_data *dd; 361 struct deadline_data *dd;
608 int i;
609
610 if (!drq_pool)
611 return NULL;
612 362
613 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); 363 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
614 if (!dd) 364 if (!dd)
615 return NULL; 365 return NULL;
616 memset(dd, 0, sizeof(*dd)); 366 memset(dd, 0, sizeof(*dd));
617 367
618 dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
619 GFP_KERNEL, q->node);
620 if (!dd->hash) {
621 kfree(dd);
622 return NULL;
623 }
624
625 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
626 mempool_free_slab, drq_pool, q->node);
627 if (!dd->drq_pool) {
628 kfree(dd->hash);
629 kfree(dd);
630 return NULL;
631 }
632
633 for (i = 0; i < DL_HASH_ENTRIES; i++)
634 INIT_HLIST_HEAD(&dd->hash[i]);
635
636 INIT_LIST_HEAD(&dd->fifo_list[READ]); 368 INIT_LIST_HEAD(&dd->fifo_list[READ]);
637 INIT_LIST_HEAD(&dd->fifo_list[WRITE]); 369 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
638 dd->sort_list[READ] = RB_ROOT; 370 dd->sort_list[READ] = RB_ROOT;
@@ -645,39 +377,6 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
645 return dd; 377 return dd;
646} 378}
647 379
648static void deadline_put_request(request_queue_t *q, struct request *rq)
649{
650 struct deadline_data *dd = q->elevator->elevator_data;
651 struct deadline_rq *drq = RQ_DATA(rq);
652
653 mempool_free(drq, dd->drq_pool);
654 rq->elevator_private = NULL;
655}
656
657static int
658deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
659 gfp_t gfp_mask)
660{
661 struct deadline_data *dd = q->elevator->elevator_data;
662 struct deadline_rq *drq;
663
664 drq = mempool_alloc(dd->drq_pool, gfp_mask);
665 if (drq) {
666 memset(drq, 0, sizeof(*drq));
667 RB_CLEAR_NODE(&drq->rb_node);
668 drq->request = rq;
669
670 INIT_HLIST_NODE(&drq->hash);
671
672 INIT_LIST_HEAD(&drq->fifo);
673
674 rq->elevator_private = drq;
675 return 0;
676 }
677
678 return 1;
679}
680
681/* 380/*
682 * sysfs parts below 381 * sysfs parts below
683 */ 382 */
@@ -757,10 +456,8 @@ static struct elevator_type iosched_deadline = {
757 .elevator_dispatch_fn = deadline_dispatch_requests, 456 .elevator_dispatch_fn = deadline_dispatch_requests,
758 .elevator_add_req_fn = deadline_add_request, 457 .elevator_add_req_fn = deadline_add_request,
759 .elevator_queue_empty_fn = deadline_queue_empty, 458 .elevator_queue_empty_fn = deadline_queue_empty,
760 .elevator_former_req_fn = deadline_former_request, 459 .elevator_former_req_fn = elv_rb_former_request,
761 .elevator_latter_req_fn = deadline_latter_request, 460 .elevator_latter_req_fn = elv_rb_latter_request,
762 .elevator_set_req_fn = deadline_set_request,
763 .elevator_put_req_fn = deadline_put_request,
764 .elevator_init_fn = deadline_init_queue, 461 .elevator_init_fn = deadline_init_queue,
765 .elevator_exit_fn = deadline_exit_queue, 462 .elevator_exit_fn = deadline_exit_queue,
766 }, 463 },
@@ -772,24 +469,11 @@ static struct elevator_type iosched_deadline = {
772 469
773static int __init deadline_init(void) 470static int __init deadline_init(void)
774{ 471{
775 int ret; 472 return elv_register(&iosched_deadline);
776
777 drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
778 0, 0, NULL, NULL);
779
780 if (!drq_pool)
781 return -ENOMEM;
782
783 ret = elv_register(&iosched_deadline);
784 if (ret)
785 kmem_cache_destroy(drq_pool);
786
787 return ret;
788} 473}
789 474
790static void __exit deadline_exit(void) 475static void __exit deadline_exit(void)
791{ 476{
792 kmem_cache_destroy(drq_pool);
793 elv_unregister(&iosched_deadline); 477 elv_unregister(&iosched_deadline);
794} 478}
795 479