aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorCorrado Zoccolo <czoccolo@gmail.com>2009-10-27 14:16:03 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-10-28 04:23:26 -0400
commitc0324a020e5b351f100569b128715985f1023af8 (patch)
tree56d99ec857349fa07dd9ddfdacfa091c7c11af44 /block/cfq-iosched.c
parentaa6f6a3de18131348f70951efb2c56d806033e09 (diff)
cfq-iosched: reimplement priorities using different service trees
We use different service trees for different priority classes. This allows a simplification in the service tree insertion code, that no longer has to consider priority while walking the tree. Signed-off-by: Corrado Zoccolo <czoccolo@gmail.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c116
1 files changed, 82 insertions, 34 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c95c69e199f4..6e5c3d715ebe 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -134,15 +134,31 @@ struct cfq_queue {
134}; 134};
135 135
136/* 136/*
137 * Index in the service_trees.
138 * IDLE is handled separately, so it has negative index
139 */
140enum wl_prio_t {
141 IDLE_WORKLOAD = -1,
142 BE_WORKLOAD = 0,
143 RT_WORKLOAD = 1
144};
145
146/*
137 * Per block device queue structure 147 * Per block device queue structure
138 */ 148 */
139struct cfq_data { 149struct cfq_data {
140 struct request_queue *queue; 150 struct request_queue *queue;
141 151
142 /* 152 /*
143 * rr list of queues with requests and the count of them 153 * rr lists of queues with requests, onle rr for each priority class.
154 * Counts are embedded in the cfq_rb_root
155 */
156 struct cfq_rb_root service_trees[2];
157 struct cfq_rb_root service_tree_idle;
158 /*
159 * The priority currently being served
144 */ 160 */
145 struct cfq_rb_root service_tree; 161 enum wl_prio_t serving_prio;
146 162
147 /* 163 /*
148 * Each priority tree is sorted by next_request position. These 164 * Each priority tree is sorted by next_request position. These
@@ -152,7 +168,6 @@ struct cfq_data {
152 struct rb_root prio_trees[CFQ_PRIO_LISTS]; 168 struct rb_root prio_trees[CFQ_PRIO_LISTS];
153 169
154 unsigned int busy_queues; 170 unsigned int busy_queues;
155 unsigned int busy_rt_queues;
156 unsigned int busy_queues_avg[2]; 171 unsigned int busy_queues_avg[2];
157 172
158 int rq_in_driver[2]; 173 int rq_in_driver[2];
@@ -205,6 +220,15 @@ struct cfq_data {
205 unsigned long last_end_sync_rq; 220 unsigned long last_end_sync_rq;
206}; 221};
207 222
223static struct cfq_rb_root *service_tree_for(enum wl_prio_t prio,
224 struct cfq_data *cfqd)
225{
226 if (prio == IDLE_WORKLOAD)
227 return &cfqd->service_tree_idle;
228
229 return &cfqd->service_trees[prio];
230}
231
208enum cfqq_state_flags { 232enum cfqq_state_flags {
209 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 233 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
210 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 234 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
@@ -249,6 +273,23 @@ CFQ_CFQQ_FNS(coop);
249#define cfq_log(cfqd, fmt, args...) \ 273#define cfq_log(cfqd, fmt, args...) \
250 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 274 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
251 275
276static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
277{
278 if (cfq_class_idle(cfqq))
279 return IDLE_WORKLOAD;
280 if (cfq_class_rt(cfqq))
281 return RT_WORKLOAD;
282 return BE_WORKLOAD;
283}
284
285static inline int cfq_busy_queues_wl(enum wl_prio_t wl, struct cfq_data *cfqd)
286{
287 if (wl == IDLE_WORKLOAD)
288 return cfqd->service_tree_idle.count;
289
290 return cfqd->service_trees[wl].count;
291}
292
252static void cfq_dispatch_insert(struct request_queue *, struct request *); 293static void cfq_dispatch_insert(struct request_queue *, struct request *);
253static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, 294static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
254 struct io_context *, gfp_t); 295 struct io_context *, gfp_t);
@@ -332,10 +373,7 @@ cfq_get_avg_queues(struct cfq_data *cfqd, bool rt) {
332 unsigned min_q, max_q; 373 unsigned min_q, max_q;
333 unsigned mult = cfq_hist_divisor - 1; 374 unsigned mult = cfq_hist_divisor - 1;
334 unsigned round = cfq_hist_divisor / 2; 375 unsigned round = cfq_hist_divisor / 2;
335 unsigned busy = cfqd->busy_rt_queues; 376 unsigned busy = cfq_busy_queues_wl(rt, cfqd);
336
337 if (!rt)
338 busy = cfqd->busy_queues - cfqd->busy_rt_queues;
339 377
340 min_q = min(cfqd->busy_queues_avg[rt], busy); 378 min_q = min(cfqd->busy_queues_avg[rt], busy);
341 max_q = max(cfqd->busy_queues_avg[rt], busy); 379 max_q = max(cfqd->busy_queues_avg[rt], busy);
@@ -546,7 +584,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
546} 584}
547 585
548/* 586/*
549 * The cfqd->service_tree holds all pending cfq_queue's that have 587 * The cfqd->service_trees holds all pending cfq_queue's that have
550 * requests waiting to be processed. It is sorted in the order that 588 * requests waiting to be processed. It is sorted in the order that
551 * we will service the queues. 589 * we will service the queues.
552 */ 590 */
@@ -556,9 +594,10 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
556 struct rb_node **p, *parent; 594 struct rb_node **p, *parent;
557 struct cfq_queue *__cfqq; 595 struct cfq_queue *__cfqq;
558 unsigned long rb_key; 596 unsigned long rb_key;
559 struct cfq_rb_root *service_tree = &cfqd->service_tree; 597 struct cfq_rb_root *service_tree;
560 int left; 598 int left;
561 599
600 service_tree = service_tree_for(cfqq_prio(cfqq), cfqd);
562 if (cfq_class_idle(cfqq)) { 601 if (cfq_class_idle(cfqq)) {
563 rb_key = CFQ_IDLE_DELAY; 602 rb_key = CFQ_IDLE_DELAY;
564 parent = rb_last(&service_tree->rb); 603 parent = rb_last(&service_tree->rb);
@@ -587,7 +626,8 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
587 /* 626 /*
588 * same position, nothing more to do 627 * same position, nothing more to do
589 */ 628 */
590 if (rb_key == cfqq->rb_key) 629 if (rb_key == cfqq->rb_key &&
630 cfqq->service_tree == service_tree)
591 return; 631 return;
592 632
593 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); 633 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
@@ -605,25 +645,14 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
605 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 645 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
606 646
607 /* 647 /*
608 * sort RT queues first, we always want to give 648 * sort by key, that represents service time.
609 * preference to them. IDLE queues goes to the back.
610 * after that, sort on the next service time.
611 */ 649 */
612 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) 650 if (time_before(rb_key, __cfqq->rb_key))
613 n = &(*p)->rb_left; 651 n = &(*p)->rb_left;
614 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq)) 652 else {
615 n = &(*p)->rb_right;
616 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
617 n = &(*p)->rb_left;
618 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
619 n = &(*p)->rb_right;
620 else if (time_before(rb_key, __cfqq->rb_key))
621 n = &(*p)->rb_left;
622 else
623 n = &(*p)->rb_right; 653 n = &(*p)->rb_right;
624
625 if (n == &(*p)->rb_right)
626 left = 0; 654 left = 0;
655 }
627 656
628 p = n; 657 p = n;
629 } 658 }
@@ -722,8 +751,7 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
722 BUG_ON(cfq_cfqq_on_rr(cfqq)); 751 BUG_ON(cfq_cfqq_on_rr(cfqq));
723 cfq_mark_cfqq_on_rr(cfqq); 752 cfq_mark_cfqq_on_rr(cfqq);
724 cfqd->busy_queues++; 753 cfqd->busy_queues++;
725 if (cfq_class_rt(cfqq)) 754
726 cfqd->busy_rt_queues++;
727 cfq_resort_rr_list(cfqd, cfqq); 755 cfq_resort_rr_list(cfqd, cfqq);
728} 756}
729 757
@@ -748,8 +776,6 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
748 776
749 BUG_ON(!cfqd->busy_queues); 777 BUG_ON(!cfqd->busy_queues);
750 cfqd->busy_queues--; 778 cfqd->busy_queues--;
751 if (cfq_class_rt(cfqq))
752 cfqd->busy_rt_queues--;
753} 779}
754 780
755/* 781/*
@@ -1003,10 +1029,12 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1003 */ 1029 */
1004static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 1030static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1005{ 1031{
1006 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) 1032 struct cfq_rb_root *service_tree =
1007 return NULL; 1033 service_tree_for(cfqd->serving_prio, cfqd);
1008 1034
1009 return cfq_rb_first(&cfqd->service_tree); 1035 if (RB_EMPTY_ROOT(&service_tree->rb))
1036 return NULL;
1037 return cfq_rb_first(service_tree);
1010} 1038}
1011 1039
1012/* 1040/*
@@ -1123,6 +1151,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1123 if (CFQQ_SEEKY(cfqq)) 1151 if (CFQQ_SEEKY(cfqq))
1124 return NULL; 1152 return NULL;
1125 1153
1154 /*
1155 * Do not merge queues of different priority classes
1156 */
1157 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1158 return NULL;
1159
1126 return cfqq; 1160 return cfqq;
1127} 1161}
1128 1162
@@ -1336,6 +1370,14 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1336expire: 1370expire:
1337 cfq_slice_expired(cfqd, 0); 1371 cfq_slice_expired(cfqd, 0);
1338new_queue: 1372new_queue:
1373 if (!new_cfqq) {
1374 if (cfq_busy_queues_wl(RT_WORKLOAD, cfqd))
1375 cfqd->serving_prio = RT_WORKLOAD;
1376 else if (cfq_busy_queues_wl(BE_WORKLOAD, cfqd))
1377 cfqd->serving_prio = BE_WORKLOAD;
1378 else
1379 cfqd->serving_prio = IDLE_WORKLOAD;
1380 }
1339 cfqq = cfq_set_active_queue(cfqd, new_cfqq); 1381 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
1340keep_queue: 1382keep_queue:
1341 return cfqq; 1383 return cfqq;
@@ -1362,8 +1404,12 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1362{ 1404{
1363 struct cfq_queue *cfqq; 1405 struct cfq_queue *cfqq;
1364 int dispatched = 0; 1406 int dispatched = 0;
1407 int i;
1408 for (i = 0; i < 2; ++i)
1409 while ((cfqq = cfq_rb_first(&cfqd->service_trees[i])) != NULL)
1410 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1365 1411
1366 while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL) 1412 while ((cfqq = cfq_rb_first(&cfqd->service_tree_idle)) != NULL)
1367 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1413 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1368 1414
1369 cfq_slice_expired(cfqd, 0); 1415 cfq_slice_expired(cfqd, 0);
@@ -2710,7 +2756,9 @@ static void *cfq_init_queue(struct request_queue *q)
2710 if (!cfqd) 2756 if (!cfqd)
2711 return NULL; 2757 return NULL;
2712 2758
2713 cfqd->service_tree = CFQ_RB_ROOT; 2759 for (i = 0; i < 2; ++i)
2760 cfqd->service_trees[i] = CFQ_RB_ROOT;
2761 cfqd->service_tree_idle = CFQ_RB_ROOT;
2714 2762
2715 /* 2763 /*
2716 * Not strictly needed (since RB_ROOT just clears the node and we 2764 * Not strictly needed (since RB_ROOT just clears the node and we