aboutsummaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-13 05:55:04 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:26:57 -0400
commit2e662b65f05d550b6799ed6bfa9963b82279e6b7 (patch)
tree82911ec73a52d149d74a3d13c3c5eedb269a19cb /block/elevator.c
parent10fd48f2376db52f08bf0420d2c4f580e39269e1 (diff)
[PATCH] elevator: abstract out the rbtree sort handling
The rbtree sort/lookup/reposition logic is mostly duplicated in cfq/deadline/as, so move it to the elevator core. The io schedulers still provide the actual rb root, as we don't want to impose any sort of specific handling on the schedulers. Introduce the helpers and rb_node in struct request to help migrate the IO schedulers. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c123
1 files changed, 109 insertions, 14 deletions
diff --git a/block/elevator.c b/block/elevator.c
index cff1102dac9d..cbbc36ba016a 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -239,6 +239,8 @@ int elevator_init(request_queue_t *q, char *name)
239 return ret; 239 return ret;
240} 240}
241 241
242EXPORT_SYMBOL(elevator_init);
243
242void elevator_exit(elevator_t *e) 244void elevator_exit(elevator_t *e)
243{ 245{
244 mutex_lock(&e->sysfs_lock); 246 mutex_lock(&e->sysfs_lock);
@@ -250,6 +252,8 @@ void elevator_exit(elevator_t *e)
250 kobject_put(&e->kobj); 252 kobject_put(&e->kobj);
251} 253}
252 254
255EXPORT_SYMBOL(elevator_exit);
256
253static inline void __elv_rqhash_del(struct request *rq) 257static inline void __elv_rqhash_del(struct request *rq)
254{ 258{
255 hlist_del_init(&rq->hash); 259 hlist_del_init(&rq->hash);
@@ -298,9 +302,68 @@ static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
298} 302}
299 303
300/* 304/*
305 * RB-tree support functions for inserting/lookup/removal of requests
306 * in a sorted RB tree.
307 */
308struct request *elv_rb_add(struct rb_root *root, struct request *rq)
309{
310 struct rb_node **p = &root->rb_node;
311 struct rb_node *parent = NULL;
312 struct request *__rq;
313
314 while (*p) {
315 parent = *p;
316 __rq = rb_entry(parent, struct request, rb_node);
317
318 if (rq->sector < __rq->sector)
319 p = &(*p)->rb_left;
320 else if (rq->sector > __rq->sector)
321 p = &(*p)->rb_right;
322 else
323 return __rq;
324 }
325
326 rb_link_node(&rq->rb_node, parent, p);
327 rb_insert_color(&rq->rb_node, root);
328 return NULL;
329}
330
331EXPORT_SYMBOL(elv_rb_add);
332
333void elv_rb_del(struct rb_root *root, struct request *rq)
334{
335 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
336 rb_erase(&rq->rb_node, root);
337 RB_CLEAR_NODE(&rq->rb_node);
338}
339
340EXPORT_SYMBOL(elv_rb_del);
341
342struct request *elv_rb_find(struct rb_root *root, sector_t sector)
343{
344 struct rb_node *n = root->rb_node;
345 struct request *rq;
346
347 while (n) {
348 rq = rb_entry(n, struct request, rb_node);
349
350 if (sector < rq->sector)
351 n = n->rb_left;
352 else if (sector > rq->sector)
353 n = n->rb_right;
354 else
355 return rq;
356 }
357
358 return NULL;
359}
360
361EXPORT_SYMBOL(elv_rb_find);
362
363/*
301 * Insert rq into dispatch queue of q. Queue lock must be held on 364 * Insert rq into dispatch queue of q. Queue lock must be held on
302 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be 365 * entry. rq is sort insted into the dispatch queue. To be used by
303 * appended to the dispatch queue. To be used by specific elevators. 366 * specific elevators.
304 */ 367 */
305void elv_dispatch_sort(request_queue_t *q, struct request *rq) 368void elv_dispatch_sort(request_queue_t *q, struct request *rq)
306{ 369{
@@ -335,8 +398,12 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
335 list_add(&rq->queuelist, entry); 398 list_add(&rq->queuelist, entry);
336} 399}
337 400
401EXPORT_SYMBOL(elv_dispatch_sort);
402
338/* 403/*
339 * This should be in elevator.h, but that requires pulling in rq and q 404 * Insert rq into dispatch queue of q. Queue lock must be held on
405 * entry. rq is added to the back of the dispatch queue. To be used by
406 * specific elevators.
340 */ 407 */
341void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) 408void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
342{ 409{
@@ -352,6 +419,8 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
352 list_add_tail(&rq->queuelist, &q->queue_head); 419 list_add_tail(&rq->queuelist, &q->queue_head);
353} 420}
354 421
422EXPORT_SYMBOL(elv_dispatch_add_tail);
423
355int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) 424int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
356{ 425{
357 elevator_t *e = q->elevator; 426 elevator_t *e = q->elevator;
@@ -384,14 +453,15 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
384 return ELEVATOR_NO_MERGE; 453 return ELEVATOR_NO_MERGE;
385} 454}
386 455
387void elv_merged_request(request_queue_t *q, struct request *rq) 456void elv_merged_request(request_queue_t *q, struct request *rq, int type)
388{ 457{
389 elevator_t *e = q->elevator; 458 elevator_t *e = q->elevator;
390 459
391 if (e->ops->elevator_merged_fn) 460 if (e->ops->elevator_merged_fn)
392 e->ops->elevator_merged_fn(q, rq); 461 e->ops->elevator_merged_fn(q, rq, type);
393 462
394 elv_rqhash_reposition(q, rq); 463 if (type == ELEVATOR_BACK_MERGE)
464 elv_rqhash_reposition(q, rq);
395 465
396 q->last_merge = rq; 466 q->last_merge = rq;
397} 467}
@@ -577,6 +647,8 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
577 elv_insert(q, rq, where); 647 elv_insert(q, rq, where);
578} 648}
579 649
650EXPORT_SYMBOL(__elv_add_request);
651
580void elv_add_request(request_queue_t *q, struct request *rq, int where, 652void elv_add_request(request_queue_t *q, struct request *rq, int where,
581 int plug) 653 int plug)
582{ 654{
@@ -587,6 +659,8 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
587 spin_unlock_irqrestore(q->queue_lock, flags); 659 spin_unlock_irqrestore(q->queue_lock, flags);
588} 660}
589 661
662EXPORT_SYMBOL(elv_add_request);
663
590static inline struct request *__elv_next_request(request_queue_t *q) 664static inline struct request *__elv_next_request(request_queue_t *q)
591{ 665{
592 struct request *rq; 666 struct request *rq;
@@ -670,6 +744,8 @@ struct request *elv_next_request(request_queue_t *q)
670 return rq; 744 return rq;
671} 745}
672 746
747EXPORT_SYMBOL(elv_next_request);
748
673void elv_dequeue_request(request_queue_t *q, struct request *rq) 749void elv_dequeue_request(request_queue_t *q, struct request *rq)
674{ 750{
675 BUG_ON(list_empty(&rq->queuelist)); 751 BUG_ON(list_empty(&rq->queuelist));
@@ -686,6 +762,8 @@ void elv_dequeue_request(request_queue_t *q, struct request *rq)
686 q->in_flight++; 762 q->in_flight++;
687} 763}
688 764
765EXPORT_SYMBOL(elv_dequeue_request);
766
689int elv_queue_empty(request_queue_t *q) 767int elv_queue_empty(request_queue_t *q)
690{ 768{
691 elevator_t *e = q->elevator; 769 elevator_t *e = q->elevator;
@@ -699,6 +777,8 @@ int elv_queue_empty(request_queue_t *q)
699 return 1; 777 return 1;
700} 778}
701 779
780EXPORT_SYMBOL(elv_queue_empty);
781
702struct request *elv_latter_request(request_queue_t *q, struct request *rq) 782struct request *elv_latter_request(request_queue_t *q, struct request *rq)
703{ 783{
704 elevator_t *e = q->elevator; 784 elevator_t *e = q->elevator;
@@ -1025,11 +1105,26 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
1025 return len; 1105 return len;
1026} 1106}
1027 1107
1028EXPORT_SYMBOL(elv_dispatch_sort); 1108struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
1029EXPORT_SYMBOL(elv_add_request); 1109{
1030EXPORT_SYMBOL(__elv_add_request); 1110 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1031EXPORT_SYMBOL(elv_next_request); 1111
1032EXPORT_SYMBOL(elv_dequeue_request); 1112 if (rbprev)
1033EXPORT_SYMBOL(elv_queue_empty); 1113 return rb_entry_rq(rbprev);
1034EXPORT_SYMBOL(elevator_exit); 1114
1035EXPORT_SYMBOL(elevator_init); 1115 return NULL;
1116}
1117
1118EXPORT_SYMBOL(elv_rb_former_request);
1119
1120struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
1121{
1122 struct rb_node *rbnext = rb_next(&rq->rb_node);
1123
1124 if (rbnext)
1125 return rb_entry_rq(rbnext);
1126
1127 return NULL;
1128}
1129
1130EXPORT_SYMBOL(elv_rb_latter_request);