aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-04-19 06:03:34 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2007-04-30 03:01:22 -0400
commitedd75ffd92a5b7f6244431e8ff6c32b846f9ba86 (patch)
treea6b8d9be552f7eeb36a66693339d3ea840f2904e
parent67e6b49e39e9b9bf5ce1351ef21dad391856183f (diff)
cfq-iosched: get rid of ->cur_rr and ->cfq_list
It's only used for preemption now that the IDLE and RT queues also use the rbtree. If we pass an 'add_front' variable to cfq_service_tree_add(), we can set ->rb_key to 0 to force insertion at the front of the tree. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/cfq-iosched.c87
1 files changed, 32 insertions, 55 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4a0397022f5b..a8437042e28a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -45,9 +45,6 @@ static int cfq_slice_idle = HZ / 125;
45 */ 45 */
46#define CFQ_QHASH_SHIFT 6 46#define CFQ_QHASH_SHIFT 6
47#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT) 47#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
48#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
49
50#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
51 48
52#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) 49#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
53#define RQ_CFQQ(rq) ((rq)->elevator_private2) 50#define RQ_CFQQ(rq) ((rq)->elevator_private2)
@@ -91,7 +88,6 @@ struct cfq_data {
91 * rr list of queues with requests and the count of them 88 * rr list of queues with requests and the count of them
92 */ 89 */
93 struct cfq_rb_root service_tree; 90 struct cfq_rb_root service_tree;
94 struct list_head cur_rr;
95 unsigned int busy_queues; 91 unsigned int busy_queues;
96 92
97 /* 93 /*
@@ -146,8 +142,6 @@ struct cfq_queue {
146 struct hlist_node cfq_hash; 142 struct hlist_node cfq_hash;
147 /* hash key */ 143 /* hash key */
148 unsigned int key; 144 unsigned int key;
149 /* member of the rr/busy/cur/idle cfqd list */
150 struct list_head cfq_list;
151 /* service_tree member */ 145 /* service_tree member */
152 struct rb_node rb_node; 146 struct rb_node rb_node;
153 /* service_tree key */ 147 /* service_tree key */
@@ -452,16 +446,19 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
452 * we will service the queues. 446 * we will service the queues.
453 */ 447 */
454static void cfq_service_tree_add(struct cfq_data *cfqd, 448static void cfq_service_tree_add(struct cfq_data *cfqd,
455 struct cfq_queue *cfqq) 449 struct cfq_queue *cfqq, int add_front)
456{ 450{
457 struct rb_node **p = &cfqd->service_tree.rb.rb_node; 451 struct rb_node **p = &cfqd->service_tree.rb.rb_node;
458 struct rb_node *parent = NULL; 452 struct rb_node *parent = NULL;
459 unsigned long rb_key; 453 unsigned long rb_key;
460 int left; 454 int left;
461 455
462 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 456 if (!add_front) {
463 rb_key += cfqq->slice_resid; 457 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
464 cfqq->slice_resid = 0; 458 rb_key += cfqq->slice_resid;
459 cfqq->slice_resid = 0;
460 } else
461 rb_key = 0;
465 462
466 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 463 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
467 /* 464 /*
@@ -516,13 +513,13 @@ static void cfq_service_tree_add(struct cfq_data *cfqd,
516/* 513/*
517 * Update cfqq's position in the service tree. 514 * Update cfqq's position in the service tree.
518 */ 515 */
519static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) 516static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
520{ 517{
521 /* 518 /*
522 * Resorting requires the cfqq to be on the RR list already. 519 * Resorting requires the cfqq to be on the RR list already.
523 */ 520 */
524 if (cfq_cfqq_on_rr(cfqq)) 521 if (cfq_cfqq_on_rr(cfqq))
525 cfq_service_tree_add(cfqq->cfqd, cfqq); 522 cfq_service_tree_add(cfqd, cfqq, 0);
526} 523}
527 524
528/* 525/*
@@ -536,7 +533,7 @@ cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
536 cfq_mark_cfqq_on_rr(cfqq); 533 cfq_mark_cfqq_on_rr(cfqq);
537 cfqd->busy_queues++; 534 cfqd->busy_queues++;
538 535
539 cfq_resort_rr_list(cfqq, 0); 536 cfq_resort_rr_list(cfqd, cfqq);
540} 537}
541 538
542/* 539/*
@@ -548,7 +545,6 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
548{ 545{
549 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 546 BUG_ON(!cfq_cfqq_on_rr(cfqq));
550 cfq_clear_cfqq_on_rr(cfqq); 547 cfq_clear_cfqq_on_rr(cfqq);
551 list_del_init(&cfqq->cfq_list);
552 548
553 if (!RB_EMPTY_NODE(&cfqq->rb_node)) 549 if (!RB_EMPTY_NODE(&cfqq->rb_node))
554 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 550 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
@@ -771,7 +767,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
771 if (timed_out && !cfq_cfqq_slice_new(cfqq)) 767 if (timed_out && !cfq_cfqq_slice_new(cfqq))
772 cfqq->slice_resid = cfqq->slice_end - jiffies; 768 cfqq->slice_resid = cfqq->slice_end - jiffies;
773 769
774 cfq_resort_rr_list(cfqq, preempted); 770 cfq_resort_rr_list(cfqd, cfqq);
775 771
776 if (cfqq == cfqd->active_queue) 772 if (cfqq == cfqd->active_queue)
777 cfqd->active_queue = NULL; 773 cfqd->active_queue = NULL;
@@ -799,31 +795,28 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted,
799 */ 795 */
800static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 796static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
801{ 797{
802 struct cfq_queue *cfqq = NULL; 798 struct cfq_queue *cfqq;
799 struct rb_node *n;
803 800
804 if (!list_empty(&cfqd->cur_rr)) { 801 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
805 /* 802 return NULL;
806 * if current list is non-empty, grab first entry.
807 */
808 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
809 } else if (!RB_EMPTY_ROOT(&cfqd->service_tree.rb)) {
810 struct rb_node *n = cfq_rb_first(&cfqd->service_tree);
811 803
812 cfqq = rb_entry(n, struct cfq_queue, rb_node); 804 n = cfq_rb_first(&cfqd->service_tree);
813 if (cfq_class_idle(cfqq)) { 805 cfqq = rb_entry(n, struct cfq_queue, rb_node);
814 unsigned long end;
815 806
816 /* 807 if (cfq_class_idle(cfqq)) {
817 * if we have idle queues and no rt or be queues had 808 unsigned long end;
818 * pending requests, either allow immediate service if 809
819 * the grace period has passed or arm the idle grace 810 /*
820 * timer 811 * if we have idle queues and no rt or be queues had
821 */ 812 * pending requests, either allow immediate service if
822 end = cfqd->last_end_request + CFQ_IDLE_GRACE; 813 * the grace period has passed or arm the idle grace
823 if (time_before(jiffies, end)) { 814 * timer
824 mod_timer(&cfqd->idle_class_timer, end); 815 */
825 cfqq = NULL; 816 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
826 } 817 if (time_before(jiffies, end)) {
818 mod_timer(&cfqd->idle_class_timer, end);
819 cfqq = NULL;
827 } 820 }
828 } 821 }
829 822
@@ -1075,18 +1068,6 @@ static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1075 return dispatched; 1068 return dispatched;
1076} 1069}
1077 1070
1078static int cfq_forced_dispatch_cfqqs(struct list_head *list)
1079{
1080 struct cfq_queue *cfqq, *next;
1081 int dispatched;
1082
1083 dispatched = 0;
1084 list_for_each_entry_safe(cfqq, next, list, cfq_list)
1085 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1086
1087 return dispatched;
1088}
1089
1090/* 1071/*
1091 * Drain our current requests. Used for barriers and when switching 1072 * Drain our current requests. Used for barriers and when switching
1092 * io schedulers on-the-fly. 1073 * io schedulers on-the-fly.
@@ -1102,8 +1083,6 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1102 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1083 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1103 } 1084 }
1104 1085
1105 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
1106
1107 cfq_slice_expired(cfqd, 0, 0); 1086 cfq_slice_expired(cfqd, 0, 0);
1108 1087
1109 BUG_ON(cfqd->busy_queues); 1088 BUG_ON(cfqd->busy_queues);
@@ -1433,7 +1412,6 @@ retry:
1433 memset(cfqq, 0, sizeof(*cfqq)); 1412 memset(cfqq, 0, sizeof(*cfqq));
1434 1413
1435 INIT_HLIST_NODE(&cfqq->cfq_hash); 1414 INIT_HLIST_NODE(&cfqq->cfq_hash);
1436 INIT_LIST_HEAD(&cfqq->cfq_list);
1437 RB_CLEAR_NODE(&cfqq->rb_node); 1415 RB_CLEAR_NODE(&cfqq->rb_node);
1438 INIT_LIST_HEAD(&cfqq->fifo); 1416 INIT_LIST_HEAD(&cfqq->fifo);
1439 1417
@@ -1712,8 +1690,8 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1712 * so we know that it will be selected next. 1690 * so we know that it will be selected next.
1713 */ 1691 */
1714 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 1692 BUG_ON(!cfq_cfqq_on_rr(cfqq));
1715 list_del_init(&cfqq->cfq_list); 1693
1716 list_add(&cfqq->cfq_list, &cfqd->cur_rr); 1694 cfq_service_tree_add(cfqd, cfqq, 1);
1717 1695
1718 cfqq->slice_end = 0; 1696 cfqq->slice_end = 0;
1719 cfq_mark_cfqq_slice_new(cfqq); 1697 cfq_mark_cfqq_slice_new(cfqq);
@@ -2077,7 +2055,6 @@ static void *cfq_init_queue(request_queue_t *q)
2077 memset(cfqd, 0, sizeof(*cfqd)); 2055 memset(cfqd, 0, sizeof(*cfqd));
2078 2056
2079 cfqd->service_tree = CFQ_RB_ROOT; 2057 cfqd->service_tree = CFQ_RB_ROOT;
2080 INIT_LIST_HEAD(&cfqd->cur_rr);
2081 INIT_LIST_HEAD(&cfqd->cic_list); 2058 INIT_LIST_HEAD(&cfqd->cic_list);
2082 2059
2083 cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node); 2060 cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);