aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-13 06:33:14 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:26:58 -0400
commit21183b07ee4be405362af8454f3647781c77df1b (patch)
tree753d327a8e6d1e0fc7b41eecd68ea52dbb8b24dc
parente37f346e347e5035c80760df2be0fcb2824f6c16 (diff)
[PATCH] cfq-iosched: migrate to using the elevator rb functions
This removes the rbtree handling from CFQ. Signed-off-by: Jens Axboe <axboe@suse.de>
-rw-r--r--block/cfq-iosched.c164
1 files changed, 41 insertions, 123 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1b803c0c90f1..b2fd8cac2147 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -46,12 +46,6 @@ static DEFINE_SPINLOCK(cfq_exit_lock);
46 46
47#define RQ_DATA(rq) (rq)->elevator_private 47#define RQ_DATA(rq) (rq)->elevator_private
48 48
49/*
50 * rb-tree defines
51 */
52#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
53#define rq_rb_key(rq) (rq)->sector
54
55static kmem_cache_t *crq_pool; 49static kmem_cache_t *crq_pool;
56static kmem_cache_t *cfq_pool; 50static kmem_cache_t *cfq_pool;
57static kmem_cache_t *cfq_ioc_pool; 51static kmem_cache_t *cfq_ioc_pool;
@@ -185,8 +179,6 @@ struct cfq_queue {
185}; 179};
186 180
187struct cfq_rq { 181struct cfq_rq {
188 struct rb_node rb_node;
189 sector_t rb_key;
190 struct request *request; 182 struct request *request;
191 183
192 struct cfq_queue *cfq_queue; 184 struct cfq_queue *cfq_queue;
@@ -376,33 +368,27 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
376 */ 368 */
377static struct cfq_rq * 369static struct cfq_rq *
378cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 370cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
379 struct cfq_rq *last) 371 struct cfq_rq *last_crq)
380{ 372{
381 struct cfq_rq *crq_next = NULL, *crq_prev = NULL; 373 struct request *last = last_crq->request;
382 struct rb_node *rbnext, *rbprev; 374 struct rb_node *rbnext = rb_next(&last->rb_node);
383 375 struct rb_node *rbprev = rb_prev(&last->rb_node);
384 if (!(rbnext = rb_next(&last->rb_node))) { 376 struct cfq_rq *next = NULL, *prev = NULL;
385 rbnext = rb_first(&cfqq->sort_list);
386 if (rbnext == &last->rb_node)
387 rbnext = NULL;
388 }
389 377
390 rbprev = rb_prev(&last->rb_node); 378 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
391 379
392 if (rbprev) 380 if (rbprev)
393 crq_prev = rb_entry_crq(rbprev); 381 prev = RQ_DATA(rb_entry_rq(rbprev));
394 if (rbnext)
395 crq_next = rb_entry_crq(rbnext);
396
397 return cfq_choose_req(cfqd, crq_next, crq_prev);
398}
399 382
400static void cfq_update_next_crq(struct cfq_rq *crq) 383 if (rbnext)
401{ 384 next = RQ_DATA(rb_entry_rq(rbnext));
402 struct cfq_queue *cfqq = crq->cfq_queue; 385 else {
386 rbnext = rb_first(&cfqq->sort_list);
387 if (rbnext && rbnext != &last->rb_node)
388 next = RQ_DATA(rb_entry_rq(rbnext));
389 }
403 390
404 if (cfqq->next_crq == crq) 391 return cfq_choose_req(cfqd, next, prev);
405 cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
406} 392}
407 393
408static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) 394static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
@@ -497,71 +483,34 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq)
497 BUG_ON(!cfqq->queued[sync]); 483 BUG_ON(!cfqq->queued[sync]);
498 cfqq->queued[sync]--; 484 cfqq->queued[sync]--;
499 485
500 cfq_update_next_crq(crq); 486 elv_rb_del(&cfqq->sort_list, crq->request);
501
502 rb_erase(&crq->rb_node, &cfqq->sort_list);
503 487
504 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 488 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
505 cfq_del_cfqq_rr(cfqd, cfqq); 489 cfq_del_cfqq_rr(cfqd, cfqq);
506} 490}
507 491
508static struct cfq_rq *
509__cfq_add_crq_rb(struct cfq_rq *crq)
510{
511 struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
512 struct rb_node *parent = NULL;
513 struct cfq_rq *__crq;
514
515 while (*p) {
516 parent = *p;
517 __crq = rb_entry_crq(parent);
518
519 if (crq->rb_key < __crq->rb_key)
520 p = &(*p)->rb_left;
521 else if (crq->rb_key > __crq->rb_key)
522 p = &(*p)->rb_right;
523 else
524 return __crq;
525 }
526
527 rb_link_node(&crq->rb_node, parent, p);
528 return NULL;
529}
530
531static void cfq_add_crq_rb(struct cfq_rq *crq) 492static void cfq_add_crq_rb(struct cfq_rq *crq)
532{ 493{
533 struct cfq_queue *cfqq = crq->cfq_queue; 494 struct cfq_queue *cfqq = crq->cfq_queue;
534 struct cfq_data *cfqd = cfqq->cfqd; 495 struct cfq_data *cfqd = cfqq->cfqd;
535 struct request *rq = crq->request; 496 struct request *rq = crq->request;
536 struct cfq_rq *__alias; 497 struct request *__alias;
537 498
538 crq->rb_key = rq_rb_key(rq);
539 cfqq->queued[cfq_crq_is_sync(crq)]++; 499 cfqq->queued[cfq_crq_is_sync(crq)]++;
540 500
541 /* 501 /*
542 * looks a little odd, but the first insert might return an alias. 502 * looks a little odd, but the first insert might return an alias.
543 * if that happens, put the alias on the dispatch list 503 * if that happens, put the alias on the dispatch list
544 */ 504 */
545 while ((__alias = __cfq_add_crq_rb(crq)) != NULL) 505 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
546 cfq_dispatch_insert(cfqd->queue, __alias); 506 cfq_dispatch_insert(cfqd->queue, RQ_DATA(__alias));
547
548 rb_insert_color(&crq->rb_node, &cfqq->sort_list);
549
550 if (!cfq_cfqq_on_rr(cfqq))
551 cfq_add_cfqq_rr(cfqd, cfqq);
552
553 /*
554 * check if this request is a better next-serve candidate
555 */
556 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
557} 507}
558 508
559static inline void 509static inline void
560cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) 510cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
561{ 511{
562 rb_erase(&crq->rb_node, &cfqq->sort_list); 512 elv_rb_del(&cfqq->sort_list, crq->request);
563 cfqq->queued[cfq_crq_is_sync(crq)]--; 513 cfqq->queued[cfq_crq_is_sync(crq)]--;
564
565 cfq_add_crq_rb(crq); 514 cfq_add_crq_rb(crq);
566} 515}
567 516
@@ -570,28 +519,13 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
570{ 519{
571 struct task_struct *tsk = current; 520 struct task_struct *tsk = current;
572 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); 521 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
522 sector_t sector = bio->bi_sector + bio_sectors(bio);
573 struct cfq_queue *cfqq; 523 struct cfq_queue *cfqq;
574 struct rb_node *n;
575 sector_t sector;
576 524
577 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); 525 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
578 if (!cfqq) 526 if (cfqq)
579 goto out; 527 return elv_rb_find(&cfqq->sort_list, sector);
580
581 sector = bio->bi_sector + bio_sectors(bio);
582 n = cfqq->sort_list.rb_node;
583 while (n) {
584 struct cfq_rq *crq = rb_entry_crq(n);
585
586 if (sector < crq->rb_key)
587 n = n->rb_left;
588 else if (sector > crq->rb_key)
589 n = n->rb_right;
590 else
591 return crq->request;
592 }
593 528
594out:
595 return NULL; 529 return NULL;
596} 530}
597 531
@@ -622,6 +556,10 @@ static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
622static void cfq_remove_request(struct request *rq) 556static void cfq_remove_request(struct request *rq)
623{ 557{
624 struct cfq_rq *crq = RQ_DATA(rq); 558 struct cfq_rq *crq = RQ_DATA(rq);
559 struct cfq_queue *cfqq = crq->cfq_queue;
560
561 if (cfqq->next_crq == crq)
562 cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
625 563
626 list_del_init(&rq->queuelist); 564 list_del_init(&rq->queuelist);
627 cfq_del_crq_rb(crq); 565 cfq_del_crq_rb(crq);
@@ -642,14 +580,14 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
642 return ELEVATOR_NO_MERGE; 580 return ELEVATOR_NO_MERGE;
643} 581}
644 582
645static void cfq_merged_request(request_queue_t *q, struct request *req) 583static void cfq_merged_request(request_queue_t *q, struct request *req,
584 int type)
646{ 585{
647 struct cfq_rq *crq = RQ_DATA(req); 586 struct cfq_rq *crq = RQ_DATA(req);
648 587
649 if (rq_rb_key(req) != crq->rb_key) { 588 if (type == ELEVATOR_FRONT_MERGE) {
650 struct cfq_queue *cfqq = crq->cfq_queue; 589 struct cfq_queue *cfqq = crq->cfq_queue;
651 590
652 cfq_update_next_crq(crq);
653 cfq_reposition_crq_rb(cfqq, crq); 591 cfq_reposition_crq_rb(cfqq, crq);
654 } 592 }
655} 593}
@@ -658,8 +596,6 @@ static void
658cfq_merged_requests(request_queue_t *q, struct request *rq, 596cfq_merged_requests(request_queue_t *q, struct request *rq,
659 struct request *next) 597 struct request *next)
660{ 598{
661 cfq_merged_request(q, rq);
662
663 /* 599 /*
664 * reposition in fifo if next is older than rq 600 * reposition in fifo if next is older than rq
665 */ 601 */
@@ -881,7 +817,6 @@ static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
881 struct cfq_queue *cfqq = crq->cfq_queue; 817 struct cfq_queue *cfqq = crq->cfq_queue;
882 struct request *rq; 818 struct request *rq;
883 819
884 cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
885 cfq_remove_request(crq->request); 820 cfq_remove_request(crq->request);
886 cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; 821 cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
887 elv_dispatch_sort(q, crq->request); 822 elv_dispatch_sort(q, crq->request);
@@ -1700,6 +1635,12 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1700 struct cfq_io_context *cic = crq->io_context; 1635 struct cfq_io_context *cic = crq->io_context;
1701 1636
1702 /* 1637 /*
1638 * check if this request is a better next-serve candidate
1639 */
1640 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
1641 BUG_ON(!cfqq->next_crq);
1642
1643 /*
1703 * we never wait for an async request and we don't allow preemption 1644 * we never wait for an async request and we don't allow preemption
1704 * of an async request. so just return early 1645 * of an async request. so just return early
1705 */ 1646 */
@@ -1756,6 +1697,9 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq)
1756 1697
1757 cfq_add_crq_rb(crq); 1698 cfq_add_crq_rb(crq);
1758 1699
1700 if (!cfq_cfqq_on_rr(cfqq))
1701 cfq_add_cfqq_rr(cfqd, cfqq);
1702
1759 list_add_tail(&rq->queuelist, &cfqq->fifo); 1703 list_add_tail(&rq->queuelist, &cfqq->fifo);
1760 1704
1761 cfq_crq_enqueued(cfqd, cfqq, crq); 1705 cfq_crq_enqueued(cfqd, cfqq, crq);
@@ -1803,30 +1747,6 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
1803 } 1747 }
1804} 1748}
1805 1749
1806static struct request *
1807cfq_former_request(request_queue_t *q, struct request *rq)
1808{
1809 struct cfq_rq *crq = RQ_DATA(rq);
1810 struct rb_node *rbprev = rb_prev(&crq->rb_node);
1811
1812 if (rbprev)
1813 return rb_entry_crq(rbprev)->request;
1814
1815 return NULL;
1816}
1817
1818static struct request *
1819cfq_latter_request(request_queue_t *q, struct request *rq)
1820{
1821 struct cfq_rq *crq = RQ_DATA(rq);
1822 struct rb_node *rbnext = rb_next(&crq->rb_node);
1823
1824 if (rbnext)
1825 return rb_entry_crq(rbnext)->request;
1826
1827 return NULL;
1828}
1829
1830/* 1750/*
1831 * we temporarily boost lower priority queues if they are holding fs exclusive 1751 * we temporarily boost lower priority queues if they are holding fs exclusive
1832 * resources. they are boosted to normal prio (CLASS_BE/4) 1752 * resources. they are boosted to normal prio (CLASS_BE/4)
@@ -1982,8 +1902,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
1982 1902
1983 crq = mempool_alloc(cfqd->crq_pool, gfp_mask); 1903 crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
1984 if (crq) { 1904 if (crq) {
1985 RB_CLEAR_NODE(&crq->rb_node);
1986 crq->rb_key = 0;
1987 crq->request = rq; 1905 crq->request = rq;
1988 crq->cfq_queue = cfqq; 1906 crq->cfq_queue = cfqq;
1989 crq->io_context = cic; 1907 crq->io_context = cic;
@@ -2345,8 +2263,8 @@ static struct elevator_type iosched_cfq = {
2345 .elevator_deactivate_req_fn = cfq_deactivate_request, 2263 .elevator_deactivate_req_fn = cfq_deactivate_request,
2346 .elevator_queue_empty_fn = cfq_queue_empty, 2264 .elevator_queue_empty_fn = cfq_queue_empty,
2347 .elevator_completed_req_fn = cfq_completed_request, 2265 .elevator_completed_req_fn = cfq_completed_request,
2348 .elevator_former_req_fn = cfq_former_request, 2266 .elevator_former_req_fn = elv_rb_former_request,
2349 .elevator_latter_req_fn = cfq_latter_request, 2267 .elevator_latter_req_fn = elv_rb_latter_request,
2350 .elevator_set_req_fn = cfq_set_request, 2268 .elevator_set_req_fn = cfq_set_request,
2351 .elevator_put_req_fn = cfq_put_request, 2269 .elevator_put_req_fn = cfq_put_request,
2352 .elevator_may_queue_fn = cfq_may_queue, 2270 .elevator_may_queue_fn = cfq_may_queue,