aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-13 06:36:41 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:27:00 -0400
commit8840faa1eebba22a9e2f86acddc0cf5145937df4 (patch)
tree4938f01545638879f616aab59d54821ff7739a3f /block
parent9e2585a8a23f3a42f815b2a638725d85a921cd65 (diff)
[PATCH] deadline-iosched: remove elevator private drq request type
A big win, we now save an allocation/free on each request! With the previous rb/hash abstractions, we can just reuse queuelist/donelist for the FIFO data and be done with it. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r--block/deadline-iosched.c194
1 files changed, 52 insertions, 142 deletions
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 8300ba1f15a0..3b3b4414170e 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -37,7 +37,7 @@ struct deadline_data {
37 /* 37 /*
38 * next in sort order. read, write or both are NULL 38 * next in sort order. read, write or both are NULL
39 */ 39 */
40 struct deadline_rq *next_drq[2]; 40 struct request *next_rq[2];
41 unsigned int batching; /* number of sequential requests made */ 41 unsigned int batching; /* number of sequential requests made */
42 sector_t last_sector; /* head position */ 42 sector_t last_sector; /* head position */
43 unsigned int starved; /* times reads have starved writes */ 43 unsigned int starved; /* times reads have starved writes */
@@ -49,34 +49,14 @@ struct deadline_data {
49 int fifo_batch; 49 int fifo_batch;
50 int writes_starved; 50 int writes_starved;
51 int front_merges; 51 int front_merges;
52
53 mempool_t *drq_pool;
54}; 52};
55 53
56/* 54static void deadline_move_request(struct deadline_data *, struct request *);
57 * pre-request data.
58 */
59struct deadline_rq {
60 struct request *request;
61
62 /*
63 * expire fifo
64 */
65 struct list_head fifo;
66 unsigned long expires;
67};
68
69static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
70
71static kmem_cache_t *drq_pool;
72
73#define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
74 55
75#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))]) 56#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))])
76#define DRQ_RB_ROOT(dd, drq) RQ_RB_ROOT((drq)->request)
77 57
78static void 58static void
79deadline_add_drq_rb(struct deadline_data *dd, struct request *rq) 59deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
80{ 60{
81 struct rb_root *root = RQ_RB_ROOT(dd, rq); 61 struct rb_root *root = RQ_RB_ROOT(dd, rq);
82 struct request *__alias; 62 struct request *__alias;
@@ -84,45 +64,43 @@ deadline_add_drq_rb(struct deadline_data *dd, struct request *rq)
84retry: 64retry:
85 __alias = elv_rb_add(root, rq); 65 __alias = elv_rb_add(root, rq);
86 if (unlikely(__alias)) { 66 if (unlikely(__alias)) {
87 deadline_move_request(dd, RQ_DATA(__alias)); 67 deadline_move_request(dd, __alias);
88 goto retry; 68 goto retry;
89 } 69 }
90} 70}
91 71
92static inline void 72static inline void
93deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) 73deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
94{ 74{
95 struct request *rq = drq->request;
96 const int data_dir = rq_data_dir(rq); 75 const int data_dir = rq_data_dir(rq);
97 76
98 if (dd->next_drq[data_dir] == drq) { 77 if (dd->next_rq[data_dir] == rq) {
99 struct rb_node *rbnext = rb_next(&rq->rb_node); 78 struct rb_node *rbnext = rb_next(&rq->rb_node);
100 79
101 dd->next_drq[data_dir] = NULL; 80 dd->next_rq[data_dir] = NULL;
102 if (rbnext) 81 if (rbnext)
103 dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext)); 82 dd->next_rq[data_dir] = rb_entry_rq(rbnext);
104 } 83 }
105 84
106 elv_rb_del(RQ_RB_ROOT(dd, rq), rq); 85 elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
107} 86}
108 87
109/* 88/*
110 * add drq to rbtree and fifo 89 * add rq to rbtree and fifo
111 */ 90 */
112static void 91static void
113deadline_add_request(struct request_queue *q, struct request *rq) 92deadline_add_request(struct request_queue *q, struct request *rq)
114{ 93{
115 struct deadline_data *dd = q->elevator->elevator_data; 94 struct deadline_data *dd = q->elevator->elevator_data;
116 struct deadline_rq *drq = RQ_DATA(rq); 95 const int data_dir = rq_data_dir(rq);
117 const int data_dir = rq_data_dir(drq->request);
118 96
119 deadline_add_drq_rb(dd, rq); 97 deadline_add_rq_rb(dd, rq);
120 98
121 /* 99 /*
122 * set expire time (only used for reads) and add to fifo list 100 * set expire time (only used for reads) and add to fifo list
123 */ 101 */
124 drq->expires = jiffies + dd->fifo_expire[data_dir]; 102 rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
125 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); 103 list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
126} 104}
127 105
128/* 106/*
@@ -130,11 +108,10 @@ deadline_add_request(struct request_queue *q, struct request *rq)
130 */ 108 */
131static void deadline_remove_request(request_queue_t *q, struct request *rq) 109static void deadline_remove_request(request_queue_t *q, struct request *rq)
132{ 110{
133 struct deadline_rq *drq = RQ_DATA(rq);
134 struct deadline_data *dd = q->elevator->elevator_data; 111 struct deadline_data *dd = q->elevator->elevator_data;
135 112
136 list_del_init(&drq->fifo); 113 rq_fifo_clear(rq);
137 deadline_del_drq_rb(dd, drq); 114 deadline_del_rq_rb(dd, rq);
138} 115}
139 116
140static int 117static int
@@ -177,7 +154,7 @@ static void deadline_merged_request(request_queue_t *q, struct request *req,
177 */ 154 */
178 if (type == ELEVATOR_FRONT_MERGE) { 155 if (type == ELEVATOR_FRONT_MERGE) {
179 elv_rb_del(RQ_RB_ROOT(dd, req), req); 156 elv_rb_del(RQ_RB_ROOT(dd, req), req);
180 deadline_add_drq_rb(dd, req); 157 deadline_add_rq_rb(dd, req);
181 } 158 }
182} 159}
183 160
@@ -185,20 +162,14 @@ static void
185deadline_merged_requests(request_queue_t *q, struct request *req, 162deadline_merged_requests(request_queue_t *q, struct request *req,
186 struct request *next) 163 struct request *next)
187{ 164{
188 struct deadline_rq *drq = RQ_DATA(req);
189 struct deadline_rq *dnext = RQ_DATA(next);
190
191 BUG_ON(!drq);
192 BUG_ON(!dnext);
193
194 /* 165 /*
195 * if dnext expires before drq, assign its expire time to drq 166 * if next expires before rq, assign its expire time to rq
196 * and move into dnext position (dnext will be deleted) in fifo 167 * and move into next position (next will be deleted) in fifo
197 */ 168 */
198 if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) { 169 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
199 if (time_before(dnext->expires, drq->expires)) { 170 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
200 list_move(&drq->fifo, &dnext->fifo); 171 list_move(&req->queuelist, &next->queuelist);
201 drq->expires = dnext->expires; 172 rq_set_fifo_time(req, rq_fifo_time(next));
202 } 173 }
203 } 174 }
204 175
@@ -212,53 +183,50 @@ deadline_merged_requests(request_queue_t *q, struct request *req,
212 * move request from sort list to dispatch queue. 183 * move request from sort list to dispatch queue.
213 */ 184 */
214static inline void 185static inline void
215deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq) 186deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
216{ 187{
217 request_queue_t *q = drq->request->q; 188 request_queue_t *q = rq->q;
218 189
219 deadline_remove_request(q, drq->request); 190 deadline_remove_request(q, rq);
220 elv_dispatch_add_tail(q, drq->request); 191 elv_dispatch_add_tail(q, rq);
221} 192}
222 193
223/* 194/*
224 * move an entry to dispatch queue 195 * move an entry to dispatch queue
225 */ 196 */
226static void 197static void
227deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq) 198deadline_move_request(struct deadline_data *dd, struct request *rq)
228{ 199{
229 struct request *rq = drq->request;
230 const int data_dir = rq_data_dir(rq); 200 const int data_dir = rq_data_dir(rq);
231 struct rb_node *rbnext = rb_next(&rq->rb_node); 201 struct rb_node *rbnext = rb_next(&rq->rb_node);
232 202
233 dd->next_drq[READ] = NULL; 203 dd->next_rq[READ] = NULL;
234 dd->next_drq[WRITE] = NULL; 204 dd->next_rq[WRITE] = NULL;
235 205
236 if (rbnext) 206 if (rbnext)
237 dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext)); 207 dd->next_rq[data_dir] = rb_entry_rq(rbnext);
238 208
239 dd->last_sector = drq->request->sector + drq->request->nr_sectors; 209 dd->last_sector = rq->sector + rq->nr_sectors;
240 210
241 /* 211 /*
242 * take it off the sort and fifo list, move 212 * take it off the sort and fifo list, move
243 * to dispatch queue 213 * to dispatch queue
244 */ 214 */
245 deadline_move_to_dispatch(dd, drq); 215 deadline_move_to_dispatch(dd, rq);
246} 216}
247 217
248#define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo)
249
250/* 218/*
251 * deadline_check_fifo returns 0 if there are no expired reads on the fifo, 219 * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
252 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) 220 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
253 */ 221 */
254static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) 222static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
255{ 223{
256 struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next); 224 struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
257 225
258 /* 226 /*
259 * drq is expired! 227 * rq is expired!
260 */ 228 */
261 if (time_after(jiffies, drq->expires)) 229 if (time_after(jiffies, rq_fifo_time(rq)))
262 return 1; 230 return 1;
263 231
264 return 0; 232 return 0;
@@ -273,21 +241,21 @@ static int deadline_dispatch_requests(request_queue_t *q, int force)
273 struct deadline_data *dd = q->elevator->elevator_data; 241 struct deadline_data *dd = q->elevator->elevator_data;
274 const int reads = !list_empty(&dd->fifo_list[READ]); 242 const int reads = !list_empty(&dd->fifo_list[READ]);
275 const int writes = !list_empty(&dd->fifo_list[WRITE]); 243 const int writes = !list_empty(&dd->fifo_list[WRITE]);
276 struct deadline_rq *drq; 244 struct request *rq;
277 int data_dir; 245 int data_dir;
278 246
279 /* 247 /*
280 * batches are currently reads XOR writes 248 * batches are currently reads XOR writes
281 */ 249 */
282 if (dd->next_drq[WRITE]) 250 if (dd->next_rq[WRITE])
283 drq = dd->next_drq[WRITE]; 251 rq = dd->next_rq[WRITE];
284 else 252 else
285 drq = dd->next_drq[READ]; 253 rq = dd->next_rq[READ];
286 254
287 if (drq) { 255 if (rq) {
288 /* we have a "next request" */ 256 /* we have a "next request" */
289 257
290 if (dd->last_sector != drq->request->sector) 258 if (dd->last_sector != rq->sector)
291 /* end the batch on a non sequential request */ 259 /* end the batch on a non sequential request */
292 dd->batching += dd->fifo_batch; 260 dd->batching += dd->fifo_batch;
293 261
@@ -336,34 +304,33 @@ dispatch_find_request:
336 if (deadline_check_fifo(dd, data_dir)) { 304 if (deadline_check_fifo(dd, data_dir)) {
337 /* An expired request exists - satisfy it */ 305 /* An expired request exists - satisfy it */
338 dd->batching = 0; 306 dd->batching = 0;
339 drq = list_entry_fifo(dd->fifo_list[data_dir].next); 307 rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
340 308
341 } else if (dd->next_drq[data_dir]) { 309 } else if (dd->next_rq[data_dir]) {
342 /* 310 /*
343 * The last req was the same dir and we have a next request in 311 * The last req was the same dir and we have a next request in
344 * sort order. No expired requests so continue on from here. 312 * sort order. No expired requests so continue on from here.
345 */ 313 */
346 drq = dd->next_drq[data_dir]; 314 rq = dd->next_rq[data_dir];
347 } else { 315 } else {
348 struct rb_node *n; 316 struct rb_node *node;
349
350 /* 317 /*
351 * The last req was the other direction or we have run out of 318 * The last req was the other direction or we have run out of
352 * higher-sectored requests. Go back to the lowest sectored 319 * higher-sectored requests. Go back to the lowest sectored
353 * request (1 way elevator) and start a new batch. 320 * request (1 way elevator) and start a new batch.
354 */ 321 */
355 dd->batching = 0; 322 dd->batching = 0;
356 n = rb_first(&dd->sort_list[data_dir]); 323 node = rb_first(&dd->sort_list[data_dir]);
357 if (n) 324 if (node)
358 drq = RQ_DATA(rb_entry_rq(n)); 325 rq = rb_entry_rq(node);
359 } 326 }
360 327
361dispatch_request: 328dispatch_request:
362 /* 329 /*
363 * drq is the selected appropriate request. 330 * rq is the selected appropriate request.
364 */ 331 */
365 dd->batching++; 332 dd->batching++;
366 deadline_move_request(dd, drq); 333 deadline_move_request(dd, rq);
367 334
368 return 1; 335 return 1;
369} 336}
@@ -383,33 +350,21 @@ static void deadline_exit_queue(elevator_t *e)
383 BUG_ON(!list_empty(&dd->fifo_list[READ])); 350 BUG_ON(!list_empty(&dd->fifo_list[READ]));
384 BUG_ON(!list_empty(&dd->fifo_list[WRITE])); 351 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
385 352
386 mempool_destroy(dd->drq_pool);
387 kfree(dd); 353 kfree(dd);
388} 354}
389 355
390/* 356/*
391 * initialize elevator private data (deadline_data), and alloc a drq for 357 * initialize elevator private data (deadline_data).
392 * each request on the free lists
393 */ 358 */
394static void *deadline_init_queue(request_queue_t *q, elevator_t *e) 359static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
395{ 360{
396 struct deadline_data *dd; 361 struct deadline_data *dd;
397 362
398 if (!drq_pool)
399 return NULL;
400
401 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); 363 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
402 if (!dd) 364 if (!dd)
403 return NULL; 365 return NULL;
404 memset(dd, 0, sizeof(*dd)); 366 memset(dd, 0, sizeof(*dd));
405 367
406 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
407 mempool_free_slab, drq_pool, q->node);
408 if (!dd->drq_pool) {
409 kfree(dd);
410 return NULL;
411 }
412
413 INIT_LIST_HEAD(&dd->fifo_list[READ]); 368 INIT_LIST_HEAD(&dd->fifo_list[READ]);
414 INIT_LIST_HEAD(&dd->fifo_list[WRITE]); 369 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
415 dd->sort_list[READ] = RB_ROOT; 370 dd->sort_list[READ] = RB_ROOT;
@@ -422,36 +377,6 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
422 return dd; 377 return dd;
423} 378}
424 379
425static void deadline_put_request(request_queue_t *q, struct request *rq)
426{
427 struct deadline_data *dd = q->elevator->elevator_data;
428 struct deadline_rq *drq = RQ_DATA(rq);
429
430 mempool_free(drq, dd->drq_pool);
431 rq->elevator_private = NULL;
432}
433
434static int
435deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
436 gfp_t gfp_mask)
437{
438 struct deadline_data *dd = q->elevator->elevator_data;
439 struct deadline_rq *drq;
440
441 drq = mempool_alloc(dd->drq_pool, gfp_mask);
442 if (drq) {
443 memset(drq, 0, sizeof(*drq));
444 drq->request = rq;
445
446 INIT_LIST_HEAD(&drq->fifo);
447
448 rq->elevator_private = drq;
449 return 0;
450 }
451
452 return 1;
453}
454
455/* 380/*
456 * sysfs parts below 381 * sysfs parts below
457 */ 382 */
@@ -533,8 +458,6 @@ static struct elevator_type iosched_deadline = {
533 .elevator_queue_empty_fn = deadline_queue_empty, 458 .elevator_queue_empty_fn = deadline_queue_empty,
534 .elevator_former_req_fn = elv_rb_former_request, 459 .elevator_former_req_fn = elv_rb_former_request,
535 .elevator_latter_req_fn = elv_rb_latter_request, 460 .elevator_latter_req_fn = elv_rb_latter_request,
536 .elevator_set_req_fn = deadline_set_request,
537 .elevator_put_req_fn = deadline_put_request,
538 .elevator_init_fn = deadline_init_queue, 461 .elevator_init_fn = deadline_init_queue,
539 .elevator_exit_fn = deadline_exit_queue, 462 .elevator_exit_fn = deadline_exit_queue,
540 }, 463 },
@@ -546,24 +469,11 @@ static struct elevator_type iosched_deadline = {
546 469
547static int __init deadline_init(void) 470static int __init deadline_init(void)
548{ 471{
549 int ret; 472 return elv_register(&iosched_deadline);
550
551 drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
552 0, 0, NULL, NULL);
553
554 if (!drq_pool)
555 return -ENOMEM;
556
557 ret = elv_register(&iosched_deadline);
558 if (ret)
559 kmem_cache_destroy(drq_pool);
560
561 return ret;
562} 473}
563 474
564static void __exit deadline_exit(void) 475static void __exit deadline_exit(void)
565{ 476{
566 kmem_cache_destroy(drq_pool);
567 elv_unregister(&iosched_deadline); 477 elv_unregister(&iosched_deadline);
568} 478}
569 479