aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c626
1 files changed, 529 insertions, 97 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index aa1e9535e358..13b612f9f27a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -27,6 +27,8 @@ static const int cfq_slice_sync = HZ / 10;
27static int cfq_slice_async = HZ / 25; 27static int cfq_slice_async = HZ / 25;
28static const int cfq_slice_async_rq = 2; 28static const int cfq_slice_async_rq = 2;
29static int cfq_slice_idle = HZ / 125; 29static int cfq_slice_idle = HZ / 125;
30static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
31static const int cfq_hist_divisor = 4;
30 32
31/* 33/*
32 * offset from end of service tree 34 * offset from end of service tree
@@ -38,6 +40,12 @@ static int cfq_slice_idle = HZ / 125;
38 */ 40 */
39#define CFQ_MIN_TT (2) 41#define CFQ_MIN_TT (2)
40 42
43/*
44 * Allow merged cfqqs to perform this amount of seeky I/O before
45 * deciding to break the queues up again.
46 */
47#define CFQQ_COOP_TOUT (HZ)
48
41#define CFQ_SLICE_SCALE (5) 49#define CFQ_SLICE_SCALE (5)
42#define CFQ_HW_QUEUE_MIN (5) 50#define CFQ_HW_QUEUE_MIN (5)
43 51
@@ -67,8 +75,9 @@ static DEFINE_SPINLOCK(ioc_gone_lock);
67struct cfq_rb_root { 75struct cfq_rb_root {
68 struct rb_root rb; 76 struct rb_root rb;
69 struct rb_node *left; 77 struct rb_node *left;
78 unsigned count;
70}; 79};
71#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } 80#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, }
72 81
73/* 82/*
74 * Per process-grouping structure 83 * Per process-grouping structure
@@ -112,19 +121,56 @@ struct cfq_queue {
112 unsigned short ioprio, org_ioprio; 121 unsigned short ioprio, org_ioprio;
113 unsigned short ioprio_class, org_ioprio_class; 122 unsigned short ioprio_class, org_ioprio_class;
114 123
124 unsigned int seek_samples;
125 u64 seek_total;
126 sector_t seek_mean;
127 sector_t last_request_pos;
128 unsigned long seeky_start;
129
115 pid_t pid; 130 pid_t pid;
131
132 struct cfq_rb_root *service_tree;
133 struct cfq_queue *new_cfqq;
116}; 134};
117 135
118/* 136/*
137 * First index in the service_trees.
138 * IDLE is handled separately, so it has negative index
139 */
140enum wl_prio_t {
141 IDLE_WORKLOAD = -1,
142 BE_WORKLOAD = 0,
143 RT_WORKLOAD = 1
144};
145
146/*
147 * Second index in the service_trees.
148 */
149enum wl_type_t {
150 ASYNC_WORKLOAD = 0,
151 SYNC_NOIDLE_WORKLOAD = 1,
152 SYNC_WORKLOAD = 2
153};
154
155
156/*
119 * Per block device queue structure 157 * Per block device queue structure
120 */ 158 */
121struct cfq_data { 159struct cfq_data {
122 struct request_queue *queue; 160 struct request_queue *queue;
123 161
124 /* 162 /*
125 * rr list of queues with requests and the count of them 163 * rr lists of queues with requests, onle rr for each priority class.
164 * Counts are embedded in the cfq_rb_root
165 */
166 struct cfq_rb_root service_trees[2][3];
167 struct cfq_rb_root service_tree_idle;
168 /*
169 * The priority currently being served
126 */ 170 */
127 struct cfq_rb_root service_tree; 171 enum wl_prio_t serving_prio;
172 enum wl_type_t serving_type;
173 unsigned long workload_expires;
128 174
129 /* 175 /*
130 * Each priority tree is sorted by next_request position. These 176 * Each priority tree is sorted by next_request position. These
@@ -134,6 +180,7 @@ struct cfq_data {
134 struct rb_root prio_trees[CFQ_PRIO_LISTS]; 180 struct rb_root prio_trees[CFQ_PRIO_LISTS];
135 181
136 unsigned int busy_queues; 182 unsigned int busy_queues;
183 unsigned int busy_queues_avg[2];
137 184
138 int rq_in_driver[2]; 185 int rq_in_driver[2];
139 int sync_flight; 186 int sync_flight;
@@ -185,6 +232,16 @@ struct cfq_data {
185 unsigned long last_end_sync_rq; 232 unsigned long last_end_sync_rq;
186}; 233};
187 234
235static struct cfq_rb_root *service_tree_for(enum wl_prio_t prio,
236 enum wl_type_t type,
237 struct cfq_data *cfqd)
238{
239 if (prio == IDLE_WORKLOAD)
240 return &cfqd->service_tree_idle;
241
242 return &cfqd->service_trees[prio][type];
243}
244
188enum cfqq_state_flags { 245enum cfqq_state_flags {
189 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 246 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
190 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 247 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
@@ -195,7 +252,7 @@ enum cfqq_state_flags {
195 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 252 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
196 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 253 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
197 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 254 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
198 CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */ 255 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
199 CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */ 256 CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */
200}; 257};
201 258
@@ -231,6 +288,35 @@ CFQ_CFQQ_FNS(coop_preempt);
231#define cfq_log(cfqd, fmt, args...) \ 288#define cfq_log(cfqd, fmt, args...) \
232 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 289 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
233 290
291static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
292{
293 if (cfq_class_idle(cfqq))
294 return IDLE_WORKLOAD;
295 if (cfq_class_rt(cfqq))
296 return RT_WORKLOAD;
297 return BE_WORKLOAD;
298}
299
300
301static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
302{
303 if (!cfq_cfqq_sync(cfqq))
304 return ASYNC_WORKLOAD;
305 if (!cfq_cfqq_idle_window(cfqq))
306 return SYNC_NOIDLE_WORKLOAD;
307 return SYNC_WORKLOAD;
308}
309
310static inline int cfq_busy_queues_wl(enum wl_prio_t wl, struct cfq_data *cfqd)
311{
312 if (wl == IDLE_WORKLOAD)
313 return cfqd->service_tree_idle.count;
314
315 return cfqd->service_trees[wl][ASYNC_WORKLOAD].count
316 + cfqd->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
317 + cfqd->service_trees[wl][SYNC_WORKLOAD].count;
318}
319
234static void cfq_dispatch_insert(struct request_queue *, struct request *); 320static void cfq_dispatch_insert(struct request_queue *, struct request *);
235static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, 321static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
236 struct io_context *, gfp_t); 322 struct io_context *, gfp_t);
@@ -303,10 +389,49 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
303 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); 389 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
304} 390}
305 391
392/*
393 * get averaged number of queues of RT/BE priority.
394 * average is updated, with a formula that gives more weight to higher numbers,
395 * to quickly follows sudden increases and decrease slowly
396 */
397
398static inline unsigned cfq_get_avg_queues(struct cfq_data *cfqd, bool rt)
399{
400 unsigned min_q, max_q;
401 unsigned mult = cfq_hist_divisor - 1;
402 unsigned round = cfq_hist_divisor / 2;
403 unsigned busy = cfq_busy_queues_wl(rt, cfqd);
404
405 min_q = min(cfqd->busy_queues_avg[rt], busy);
406 max_q = max(cfqd->busy_queues_avg[rt], busy);
407 cfqd->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
408 cfq_hist_divisor;
409 return cfqd->busy_queues_avg[rt];
410}
411
306static inline void 412static inline void
307cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 413cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
308{ 414{
309 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; 415 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
416 if (cfqd->cfq_latency) {
417 /* interested queues (we consider only the ones with the same
418 * priority class) */
419 unsigned iq = cfq_get_avg_queues(cfqd, cfq_class_rt(cfqq));
420 unsigned sync_slice = cfqd->cfq_slice[1];
421 unsigned expect_latency = sync_slice * iq;
422 if (expect_latency > cfq_target_latency) {
423 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
424 /* scale low_slice according to IO priority
425 * and sync vs async */
426 unsigned low_slice =
427 min(slice, base_low_slice * slice / sync_slice);
428 /* the adapted slice value is scaled to fit all iqs
429 * into the target latency */
430 slice = max(slice * cfq_target_latency / expect_latency,
431 low_slice);
432 }
433 }
434 cfqq->slice_end = jiffies + slice;
310 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); 435 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
311} 436}
312 437
@@ -445,6 +570,7 @@ static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
445 if (root->left == n) 570 if (root->left == n)
446 root->left = NULL; 571 root->left = NULL;
447 rb_erase_init(n, &root->rb); 572 rb_erase_init(n, &root->rb);
573 --root->count;
448} 574}
449 575
450/* 576/*
@@ -485,7 +611,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
485} 611}
486 612
487/* 613/*
488 * The cfqd->service_tree holds all pending cfq_queue's that have 614 * The cfqd->service_trees holds all pending cfq_queue's that have
489 * requests waiting to be processed. It is sorted in the order that 615 * requests waiting to be processed. It is sorted in the order that
490 * we will service the queues. 616 * we will service the queues.
491 */ 617 */
@@ -495,11 +621,13 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
495 struct rb_node **p, *parent; 621 struct rb_node **p, *parent;
496 struct cfq_queue *__cfqq; 622 struct cfq_queue *__cfqq;
497 unsigned long rb_key; 623 unsigned long rb_key;
624 struct cfq_rb_root *service_tree;
498 int left; 625 int left;
499 626
627 service_tree = service_tree_for(cfqq_prio(cfqq), cfqq_type(cfqq), cfqd);
500 if (cfq_class_idle(cfqq)) { 628 if (cfq_class_idle(cfqq)) {
501 rb_key = CFQ_IDLE_DELAY; 629 rb_key = CFQ_IDLE_DELAY;
502 parent = rb_last(&cfqd->service_tree.rb); 630 parent = rb_last(&service_tree->rb);
503 if (parent && parent != &cfqq->rb_node) { 631 if (parent && parent != &cfqq->rb_node) {
504 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 632 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
505 rb_key += __cfqq->rb_key; 633 rb_key += __cfqq->rb_key;
@@ -517,7 +645,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
517 cfqq->slice_resid = 0; 645 cfqq->slice_resid = 0;
518 } else { 646 } else {
519 rb_key = -HZ; 647 rb_key = -HZ;
520 __cfqq = cfq_rb_first(&cfqd->service_tree); 648 __cfqq = cfq_rb_first(service_tree);
521 rb_key += __cfqq ? __cfqq->rb_key : jiffies; 649 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
522 } 650 }
523 651
@@ -525,15 +653,18 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
525 /* 653 /*
526 * same position, nothing more to do 654 * same position, nothing more to do
527 */ 655 */
528 if (rb_key == cfqq->rb_key) 656 if (rb_key == cfqq->rb_key &&
657 cfqq->service_tree == service_tree)
529 return; 658 return;
530 659
531 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 660 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
661 cfqq->service_tree = NULL;
532 } 662 }
533 663
534 left = 1; 664 left = 1;
535 parent = NULL; 665 parent = NULL;
536 p = &cfqd->service_tree.rb.rb_node; 666 cfqq->service_tree = service_tree;
667 p = &service_tree->rb.rb_node;
537 while (*p) { 668 while (*p) {
538 struct rb_node **n; 669 struct rb_node **n;
539 670
@@ -541,35 +672,25 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
541 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 672 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
542 673
543 /* 674 /*
544 * sort RT queues first, we always want to give 675 * sort by key, that represents service time.
545 * preference to them. IDLE queues goes to the back.
546 * after that, sort on the next service time.
547 */ 676 */
548 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) 677 if (time_before(rb_key, __cfqq->rb_key))
549 n = &(*p)->rb_left;
550 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
551 n = &(*p)->rb_right;
552 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
553 n = &(*p)->rb_left; 678 n = &(*p)->rb_left;
554 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) 679 else {
555 n = &(*p)->rb_right;
556 else if (time_before(rb_key, __cfqq->rb_key))
557 n = &(*p)->rb_left;
558 else
559 n = &(*p)->rb_right; 680 n = &(*p)->rb_right;
560
561 if (n == &(*p)->rb_right)
562 left = 0; 681 left = 0;
682 }
563 683
564 p = n; 684 p = n;
565 } 685 }
566 686
567 if (left) 687 if (left)
568 cfqd->service_tree.left = &cfqq->rb_node; 688 service_tree->left = &cfqq->rb_node;
569 689
570 cfqq->rb_key = rb_key; 690 cfqq->rb_key = rb_key;
571 rb_link_node(&cfqq->rb_node, parent, p); 691 rb_link_node(&cfqq->rb_node, parent, p);
572 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); 692 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
693 service_tree->count++;
573} 694}
574 695
575static struct cfq_queue * 696static struct cfq_queue *
@@ -671,8 +792,10 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
671 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 792 BUG_ON(!cfq_cfqq_on_rr(cfqq));
672 cfq_clear_cfqq_on_rr(cfqq); 793 cfq_clear_cfqq_on_rr(cfqq);
673 794
674 if (!RB_EMPTY_NODE(&cfqq->rb_node)) 795 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
675 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 796 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
797 cfqq->service_tree = NULL;
798 }
676 if (cfqq->p_root) { 799 if (cfqq->p_root) {
677 rb_erase(&cfqq->p_node, cfqq->p_root); 800 rb_erase(&cfqq->p_node, cfqq->p_root);
678 cfqq->p_root = NULL; 801 cfqq->p_root = NULL;
@@ -933,10 +1056,12 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
933 */ 1056 */
934static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 1057static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
935{ 1058{
936 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) 1059 struct cfq_rb_root *service_tree =
937 return NULL; 1060 service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd);
938 1061
939 return cfq_rb_first(&cfqd->service_tree); 1062 if (RB_EMPTY_ROOT(&service_tree->rb))
1063 return NULL;
1064 return cfq_rb_first(service_tree);
940} 1065}
941 1066
942/* 1067/*
@@ -947,6 +1072,7 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
947{ 1072{
948 if (!cfqq) { 1073 if (!cfqq) {
949 cfqq = cfq_get_next_queue(cfqd); 1074 cfqq = cfq_get_next_queue(cfqd);
1075
950 if (cfqq && !cfq_cfqq_coop_preempt(cfqq)) 1076 if (cfqq && !cfq_cfqq_coop_preempt(cfqq))
951 cfq_clear_cfqq_coop(cfqq); 1077 cfq_clear_cfqq_coop(cfqq);
952 } 1078 }
@@ -967,16 +1093,16 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
967 return cfqd->last_position - blk_rq_pos(rq); 1093 return cfqd->last_position - blk_rq_pos(rq);
968} 1094}
969 1095
970#define CIC_SEEK_THR 8 * 1024 1096#define CFQQ_SEEK_THR 8 * 1024
971#define CIC_SEEKY(cic) ((cic)->seek_mean > CIC_SEEK_THR) 1097#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
972 1098
973static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) 1099static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1100 struct request *rq)
974{ 1101{
975 struct cfq_io_context *cic = cfqd->active_cic; 1102 sector_t sdist = cfqq->seek_mean;
976 sector_t sdist = cic->seek_mean;
977 1103
978 if (!sample_valid(cic->seek_samples)) 1104 if (!sample_valid(cfqq->seek_samples))
979 sdist = CIC_SEEK_THR; 1105 sdist = CFQQ_SEEK_THR;
980 1106
981 return cfq_dist_from_last(cfqd, rq) <= sdist; 1107 return cfq_dist_from_last(cfqd, rq) <= sdist;
982} 1108}
@@ -1005,7 +1131,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1005 * will contain the closest sector. 1131 * will contain the closest sector.
1006 */ 1132 */
1007 __cfqq = rb_entry(parent, struct cfq_queue, p_node); 1133 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1008 if (cfq_rq_close(cfqd, __cfqq->next_rq)) 1134 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1009 return __cfqq; 1135 return __cfqq;
1010 1136
1011 if (blk_rq_pos(__cfqq->next_rq) < sector) 1137 if (blk_rq_pos(__cfqq->next_rq) < sector)
@@ -1016,7 +1142,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1016 return NULL; 1142 return NULL;
1017 1143
1018 __cfqq = rb_entry(node, struct cfq_queue, p_node); 1144 __cfqq = rb_entry(node, struct cfq_queue, p_node);
1019 if (cfq_rq_close(cfqd, __cfqq->next_rq)) 1145 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1020 return __cfqq; 1146 return __cfqq;
1021 1147
1022 return NULL; 1148 return NULL;
@@ -1033,16 +1159,13 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1033 * assumption. 1159 * assumption.
1034 */ 1160 */
1035static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, 1161static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1036 struct cfq_queue *cur_cfqq, 1162 struct cfq_queue *cur_cfqq)
1037 bool probe)
1038{ 1163{
1039 struct cfq_queue *cfqq; 1164 struct cfq_queue *cfqq;
1040 1165
1041 /* 1166 if (!cfq_cfqq_sync(cur_cfqq))
1042 * A valid cfq_io_context is necessary to compare requests against 1167 return NULL;
1043 * the seek_mean of the current cfqq. 1168 if (CFQQ_SEEKY(cur_cfqq))
1044 */
1045 if (!cfqd->active_cic)
1046 return NULL; 1169 return NULL;
1047 1170
1048 /* 1171 /*
@@ -1054,14 +1177,53 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1054 if (!cfqq) 1177 if (!cfqq)
1055 return NULL; 1178 return NULL;
1056 1179
1057 if (cfq_cfqq_coop(cfqq)) 1180 /*
1181 * It only makes sense to merge sync queues.
1182 */
1183 if (!cfq_cfqq_sync(cfqq))
1184 return NULL;
1185 if (CFQQ_SEEKY(cfqq))
1186 return NULL;
1187
1188 /*
1189 * Do not merge queues of different priority classes
1190 */
1191 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1058 return NULL; 1192 return NULL;
1059 1193
1060 if (!probe)
1061 cfq_mark_cfqq_coop(cfqq);
1062 return cfqq; 1194 return cfqq;
1063} 1195}
1064 1196
1197/*
1198 * Determine whether we should enforce idle window for this queue.
1199 */
1200
1201static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1202{
1203 enum wl_prio_t prio = cfqq_prio(cfqq);
1204 struct cfq_rb_root *service_tree = cfqq->service_tree;
1205
1206 /* We never do for idle class queues. */
1207 if (prio == IDLE_WORKLOAD)
1208 return false;
1209
1210 /* We do for queues that were marked with idle window flag. */
1211 if (cfq_cfqq_idle_window(cfqq))
1212 return true;
1213
1214 /*
1215 * Otherwise, we do only if they are the last ones
1216 * in their service tree.
1217 */
1218 if (!service_tree)
1219 service_tree = service_tree_for(prio, cfqq_type(cfqq), cfqd);
1220
1221 if (service_tree->count == 0)
1222 return true;
1223
1224 return (service_tree->count == 1 && cfq_rb_first(service_tree) == cfqq);
1225}
1226
1065static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1227static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1066{ 1228{
1067 struct cfq_queue *cfqq = cfqd->active_queue; 1229 struct cfq_queue *cfqq = cfqd->active_queue;
@@ -1082,7 +1244,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1082 /* 1244 /*
1083 * idle is disabled, either manually or by past process history 1245 * idle is disabled, either manually or by past process history
1084 */ 1246 */
1085 if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) 1247 if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
1086 return; 1248 return;
1087 1249
1088 /* 1250 /*
@@ -1109,14 +1271,20 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1109 1271
1110 cfq_mark_cfqq_wait_request(cfqq); 1272 cfq_mark_cfqq_wait_request(cfqq);
1111 1273
1112 /*
1113 * we don't want to idle for seeks, but we do want to allow
1114 * fair distribution of slice time for a process doing back-to-back
1115 * seeks. so allow a little bit of time for him to submit a new rq
1116 */
1117 sl = cfqd->cfq_slice_idle; 1274 sl = cfqd->cfq_slice_idle;
1118 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 1275 /* are we servicing noidle tree, and there are more queues?
1276 * non-rotational or NCQ: no idle
1277 * non-NCQ rotational : very small idle, to allow
1278 * fair distribution of slice time for a process doing back-to-back
1279 * seeks.
1280 */
1281 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
1282 service_tree_for(cfqd->serving_prio, SYNC_NOIDLE_WORKLOAD, cfqd)
1283 ->count > 0) {
1284 if (blk_queue_nonrot(cfqd->queue) || cfqd->hw_tag)
1285 return;
1119 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); 1286 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
1287 }
1120 1288
1121 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1289 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1122 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1290 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
@@ -1175,6 +1343,152 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1175} 1343}
1176 1344
1177/* 1345/*
1346 * Must be called with the queue_lock held.
1347 */
1348static int cfqq_process_refs(struct cfq_queue *cfqq)
1349{
1350 int process_refs, io_refs;
1351
1352 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
1353 process_refs = atomic_read(&cfqq->ref) - io_refs;
1354 BUG_ON(process_refs < 0);
1355 return process_refs;
1356}
1357
1358static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1359{
1360 int process_refs, new_process_refs;
1361 struct cfq_queue *__cfqq;
1362
1363 /* Avoid a circular list and skip interim queue merges */
1364 while ((__cfqq = new_cfqq->new_cfqq)) {
1365 if (__cfqq == cfqq)
1366 return;
1367 new_cfqq = __cfqq;
1368 }
1369
1370 process_refs = cfqq_process_refs(cfqq);
1371 /*
1372 * If the process for the cfqq has gone away, there is no
1373 * sense in merging the queues.
1374 */
1375 if (process_refs == 0)
1376 return;
1377
1378 /*
1379 * Merge in the direction of the lesser amount of work.
1380 */
1381 new_process_refs = cfqq_process_refs(new_cfqq);
1382 if (new_process_refs >= process_refs) {
1383 cfqq->new_cfqq = new_cfqq;
1384 atomic_add(process_refs, &new_cfqq->ref);
1385 } else {
1386 new_cfqq->new_cfqq = cfqq;
1387 atomic_add(new_process_refs, &cfqq->ref);
1388 }
1389}
1390
1391static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, enum wl_prio_t prio,
1392 bool prio_changed)
1393{
1394 struct cfq_queue *queue;
1395 int i;
1396 bool key_valid = false;
1397 unsigned long lowest_key = 0;
1398 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
1399
1400 if (prio_changed) {
1401 /*
1402 * When priorities switched, we prefer starting
1403 * from SYNC_NOIDLE (first choice), or just SYNC
1404 * over ASYNC
1405 */
1406 if (service_tree_for(prio, cur_best, cfqd)->count)
1407 return cur_best;
1408 cur_best = SYNC_WORKLOAD;
1409 if (service_tree_for(prio, cur_best, cfqd)->count)
1410 return cur_best;
1411
1412 return ASYNC_WORKLOAD;
1413 }
1414
1415 for (i = 0; i < 3; ++i) {
1416 /* otherwise, select the one with lowest rb_key */
1417 queue = cfq_rb_first(service_tree_for(prio, i, cfqd));
1418 if (queue &&
1419 (!key_valid || time_before(queue->rb_key, lowest_key))) {
1420 lowest_key = queue->rb_key;
1421 cur_best = i;
1422 key_valid = true;
1423 }
1424 }
1425
1426 return cur_best;
1427}
1428
1429static void choose_service_tree(struct cfq_data *cfqd)
1430{
1431 enum wl_prio_t previous_prio = cfqd->serving_prio;
1432 bool prio_changed;
1433 unsigned slice;
1434 unsigned count;
1435
1436 /* Choose next priority. RT > BE > IDLE */
1437 if (cfq_busy_queues_wl(RT_WORKLOAD, cfqd))
1438 cfqd->serving_prio = RT_WORKLOAD;
1439 else if (cfq_busy_queues_wl(BE_WORKLOAD, cfqd))
1440 cfqd->serving_prio = BE_WORKLOAD;
1441 else {
1442 cfqd->serving_prio = IDLE_WORKLOAD;
1443 cfqd->workload_expires = jiffies + 1;
1444 return;
1445 }
1446
1447 /*
1448 * For RT and BE, we have to choose also the type
1449 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
1450 * expiration time
1451 */
1452 prio_changed = (cfqd->serving_prio != previous_prio);
1453 count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd)
1454 ->count;
1455
1456 /*
1457 * If priority didn't change, check workload expiration,
1458 * and that we still have other queues ready
1459 */
1460 if (!prio_changed && count &&
1461 !time_after(jiffies, cfqd->workload_expires))
1462 return;
1463
1464 /* otherwise select new workload type */
1465 cfqd->serving_type =
1466 cfq_choose_wl(cfqd, cfqd->serving_prio, prio_changed);
1467 count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd)
1468 ->count;
1469
1470 /*
1471 * the workload slice is computed as a fraction of target latency
1472 * proportional to the number of queues in that workload, over
1473 * all the queues in the same priority class
1474 */
1475 slice = cfq_target_latency * count /
1476 max_t(unsigned, cfqd->busy_queues_avg[cfqd->serving_prio],
1477 cfq_busy_queues_wl(cfqd->serving_prio, cfqd));
1478
1479 if (cfqd->serving_type == ASYNC_WORKLOAD)
1480 /* async workload slice is scaled down according to
1481 * the sync/async slice ratio. */
1482 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
1483 else
1484 /* sync workload slice is at least 2 * cfq_slice_idle */
1485 slice = max(slice, 2 * cfqd->cfq_slice_idle);
1486
1487 slice = max_t(unsigned, slice, CFQ_MIN_TT);
1488 cfqd->workload_expires = jiffies + slice;
1489}
1490
1491/*
1178 * Select a queue for service. If we have a current active queue, 1492 * Select a queue for service. If we have a current active queue,
1179 * check whether to continue servicing it, or retrieve and set a new one. 1493 * check whether to continue servicing it, or retrieve and set a new one.
1180 */ 1494 */
@@ -1203,11 +1517,14 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1203 * If another queue has a request waiting within our mean seek 1517 * If another queue has a request waiting within our mean seek
1204 * distance, let it run. The expire code will check for close 1518 * distance, let it run. The expire code will check for close
1205 * cooperators and put the close queue at the front of the service 1519 * cooperators and put the close queue at the front of the service
1206 * tree. 1520 * tree. If possible, merge the expiring queue with the new cfqq.
1207 */ 1521 */
1208 new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0); 1522 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
1209 if (new_cfqq) 1523 if (new_cfqq) {
1524 if (!cfqq->new_cfqq)
1525 cfq_setup_merge(cfqq, new_cfqq);
1210 goto expire; 1526 goto expire;
1527 }
1211 1528
1212 /* 1529 /*
1213 * No requests pending. If the active queue still has requests in 1530 * No requests pending. If the active queue still has requests in
@@ -1215,7 +1532,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1215 * conditions to happen (or time out) before selecting a new queue. 1532 * conditions to happen (or time out) before selecting a new queue.
1216 */ 1533 */
1217 if (timer_pending(&cfqd->idle_slice_timer) || 1534 if (timer_pending(&cfqd->idle_slice_timer) ||
1218 (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) { 1535 (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
1219 cfqq = NULL; 1536 cfqq = NULL;
1220 goto keep_queue; 1537 goto keep_queue;
1221 } 1538 }
@@ -1223,6 +1540,13 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1223expire: 1540expire:
1224 cfq_slice_expired(cfqd, 0); 1541 cfq_slice_expired(cfqd, 0);
1225new_queue: 1542new_queue:
1543 /*
1544 * Current queue expired. Check if we have to switch to a new
1545 * service tree
1546 */
1547 if (!new_cfqq)
1548 choose_service_tree(cfqd);
1549
1226 cfqq = cfq_set_active_queue(cfqd, new_cfqq); 1550 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
1227keep_queue: 1551keep_queue:
1228 return cfqq; 1552 return cfqq;
@@ -1249,8 +1573,14 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1249{ 1573{
1250 struct cfq_queue *cfqq; 1574 struct cfq_queue *cfqq;
1251 int dispatched = 0; 1575 int dispatched = 0;
1252 1576 int i, j;
1253 while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL) 1577 for (i = 0; i < 2; ++i)
1578 for (j = 0; j < 3; ++j)
1579 while ((cfqq = cfq_rb_first(&cfqd->service_trees[i][j]))
1580 != NULL)
1581 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1582
1583 while ((cfqq = cfq_rb_first(&cfqd->service_tree_idle)) != NULL)
1254 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1584 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1255 1585
1256 cfq_slice_expired(cfqd, 0); 1586 cfq_slice_expired(cfqd, 0);
@@ -1268,7 +1598,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1268 /* 1598 /*
1269 * Drain async requests before we start sync IO 1599 * Drain async requests before we start sync IO
1270 */ 1600 */
1271 if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) 1601 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
1272 return false; 1602 return false;
1273 1603
1274 /* 1604 /*
@@ -1518,11 +1848,29 @@ static void cfq_free_io_context(struct io_context *ioc)
1518 1848
1519static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1849static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1520{ 1850{
1851 struct cfq_queue *__cfqq, *next;
1852
1521 if (unlikely(cfqq == cfqd->active_queue)) { 1853 if (unlikely(cfqq == cfqd->active_queue)) {
1522 __cfq_slice_expired(cfqd, cfqq, 0); 1854 __cfq_slice_expired(cfqd, cfqq, 0);
1523 cfq_schedule_dispatch(cfqd); 1855 cfq_schedule_dispatch(cfqd);
1524 } 1856 }
1525 1857
1858 /*
1859 * If this queue was scheduled to merge with another queue, be
1860 * sure to drop the reference taken on that queue (and others in
1861 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
1862 */
1863 __cfqq = cfqq->new_cfqq;
1864 while (__cfqq) {
1865 if (__cfqq == cfqq) {
1866 WARN(1, "cfqq->new_cfqq loop detected\n");
1867 break;
1868 }
1869 next = __cfqq->new_cfqq;
1870 cfq_put_queue(__cfqq);
1871 __cfqq = next;
1872 }
1873
1526 cfq_put_queue(cfqq); 1874 cfq_put_queue(cfqq);
1527} 1875}
1528 1876
@@ -1952,33 +2300,46 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1952} 2300}
1953 2301
1954static void 2302static void
1955cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, 2303cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1956 struct request *rq) 2304 struct request *rq)
1957{ 2305{
1958 sector_t sdist; 2306 sector_t sdist;
1959 u64 total; 2307 u64 total;
1960 2308
1961 if (!cic->last_request_pos) 2309 if (!cfqq->last_request_pos)
1962 sdist = 0; 2310 sdist = 0;
1963 else if (cic->last_request_pos < blk_rq_pos(rq)) 2311 else if (cfqq->last_request_pos < blk_rq_pos(rq))
1964 sdist = blk_rq_pos(rq) - cic->last_request_pos; 2312 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
1965 else 2313 else
1966 sdist = cic->last_request_pos - blk_rq_pos(rq); 2314 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
1967 2315
1968 /* 2316 /*
1969 * Don't allow the seek distance to get too large from the 2317 * Don't allow the seek distance to get too large from the
1970 * odd fragment, pagein, etc 2318 * odd fragment, pagein, etc
1971 */ 2319 */
1972 if (cic->seek_samples <= 60) /* second&third seek */ 2320 if (cfqq->seek_samples <= 60) /* second&third seek */
1973 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); 2321 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
1974 else 2322 else
1975 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); 2323 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
1976 2324
1977 cic->seek_samples = (7*cic->seek_samples + 256) / 8; 2325 cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
1978 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; 2326 cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
1979 total = cic->seek_total + (cic->seek_samples/2); 2327 total = cfqq->seek_total + (cfqq->seek_samples/2);
1980 do_div(total, cic->seek_samples); 2328 do_div(total, cfqq->seek_samples);
1981 cic->seek_mean = (sector_t)total; 2329 cfqq->seek_mean = (sector_t)total;
2330
2331 /*
2332 * If this cfqq is shared between multiple processes, check to
2333 * make sure that those processes are still issuing I/Os within
2334 * the mean seek distance. If not, it may be time to break the
2335 * queues apart again.
2336 */
2337 if (cfq_cfqq_coop(cfqq)) {
2338 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
2339 cfqq->seeky_start = jiffies;
2340 else if (!CFQQ_SEEKY(cfqq))
2341 cfqq->seeky_start = 0;
2342 }
1982} 2343}
1983 2344
1984/* 2345/*
@@ -2000,13 +2361,10 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2000 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 2361 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
2001 2362
2002 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 2363 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
2003 (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) 2364 (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq)))
2004 enable_idle = 0; 2365 enable_idle = 0;
2005 else if (sample_valid(cic->ttime_samples)) { 2366 else if (sample_valid(cic->ttime_samples)) {
2006 unsigned int slice_idle = cfqd->cfq_slice_idle; 2367 if (cic->ttime_mean > cfqd->cfq_slice_idle)
2007 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
2008 slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
2009 if (cic->ttime_mean > slice_idle)
2010 enable_idle = 0; 2368 enable_idle = 0;
2011 else 2369 else
2012 enable_idle = 1; 2370 enable_idle = 1;
@@ -2044,6 +2402,10 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2044 if (cfq_class_idle(cfqq)) 2402 if (cfq_class_idle(cfqq))
2045 return true; 2403 return true;
2046 2404
2405 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD
2406 && new_cfqq->service_tree == cfqq->service_tree)
2407 return true;
2408
2047 /* 2409 /*
2048 * if the new request is sync, but the currently running queue is 2410 * if the new request is sync, but the currently running queue is
2049 * not, let the sync request have priority. 2411 * not, let the sync request have priority.
@@ -2071,7 +2433,8 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
2071 * if this request is as-good as one we would expect from the 2433 * if this request is as-good as one we would expect from the
2072 * current cfqq, let it preempt 2434 * current cfqq, let it preempt
2073 */ 2435 */
2074 if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) || 2436 if (cfq_rq_close(cfqd, cfqq, rq))
2437 if (cfq_rq_close(cfqd, cfqq, rq) && (!cfq_cfqq_coop(new_cfqq) ||
2075 cfqd->busy_queues == 1)) { 2438 cfqd->busy_queues == 1)) {
2076 /* 2439 /*
2077 * Mark new queue coop_preempt, so its coop flag will not be 2440 * Mark new queue coop_preempt, so its coop flag will not be
@@ -2121,10 +2484,10 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2121 cfqq->meta_pending++; 2484 cfqq->meta_pending++;
2122 2485
2123 cfq_update_io_thinktime(cfqd, cic); 2486 cfq_update_io_thinktime(cfqd, cic);
2124 cfq_update_io_seektime(cfqd, cic, rq); 2487 cfq_update_io_seektime(cfqd, cfqq, rq);
2125 cfq_update_idle_window(cfqd, cfqq, cic); 2488 cfq_update_idle_window(cfqd, cfqq, cic);
2126 2489
2127 cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 2490 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2128 2491
2129 if (cfqq == cfqd->active_queue) { 2492 if (cfqq == cfqd->active_queue) {
2130 /* 2493 /*
@@ -2165,10 +2528,9 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
2165 cfq_log_cfqq(cfqd, cfqq, "insert_request"); 2528 cfq_log_cfqq(cfqd, cfqq, "insert_request");
2166 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); 2529 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
2167 2530
2168 cfq_add_rq_rb(rq);
2169
2170 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 2531 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
2171 list_add_tail(&rq->queuelist, &cfqq->fifo); 2532 list_add_tail(&rq->queuelist, &cfqq->fifo);
2533 cfq_add_rq_rb(rq);
2172 2534
2173 cfq_rq_enqueued(cfqd, cfqq, rq); 2535 cfq_rq_enqueued(cfqd, cfqq, rq);
2174} 2536}
@@ -2179,6 +2541,8 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
2179 */ 2541 */
2180static void cfq_update_hw_tag(struct cfq_data *cfqd) 2542static void cfq_update_hw_tag(struct cfq_data *cfqd)
2181{ 2543{
2544 struct cfq_queue *cfqq = cfqd->active_queue;
2545
2182 if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak) 2546 if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
2183 cfqd->rq_in_driver_peak = rq_in_driver(cfqd); 2547 cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
2184 2548
@@ -2186,6 +2550,16 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
2186 rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN) 2550 rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
2187 return; 2551 return;
2188 2552
2553 /*
2554 * If active queue hasn't enough requests and can idle, cfq might not
2555 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
2556 * case
2557 */
2558 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
2559 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
2560 CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN)
2561 return;
2562
2189 if (cfqd->hw_tag_samples++ < 50) 2563 if (cfqd->hw_tag_samples++ < 50)
2190 return; 2564 return;
2191 2565
@@ -2243,7 +2617,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2243 */ 2617 */
2244 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 2618 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
2245 cfq_slice_expired(cfqd, 1); 2619 cfq_slice_expired(cfqd, 1);
2246 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) && 2620 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq) &&
2247 sync && !rq_noidle(rq)) 2621 sync && !rq_noidle(rq))
2248 cfq_arm_slice_timer(cfqd); 2622 cfq_arm_slice_timer(cfqd);
2249 } 2623 }
@@ -2269,12 +2643,10 @@ static void cfq_prio_boost(struct cfq_queue *cfqq)
2269 cfqq->ioprio = IOPRIO_NORM; 2643 cfqq->ioprio = IOPRIO_NORM;
2270 } else { 2644 } else {
2271 /* 2645 /*
2272 * check if we need to unboost the queue 2646 * unboost the queue (if needed)
2273 */ 2647 */
2274 if (cfqq->ioprio_class != cfqq->org_ioprio_class) 2648 cfqq->ioprio_class = cfqq->org_ioprio_class;
2275 cfqq->ioprio_class = cfqq->org_ioprio_class; 2649 cfqq->ioprio = cfqq->org_ioprio;
2276 if (cfqq->ioprio != cfqq->org_ioprio)
2277 cfqq->ioprio = cfqq->org_ioprio;
2278 } 2650 }
2279} 2651}
2280 2652
@@ -2338,6 +2710,43 @@ static void cfq_put_request(struct request *rq)
2338 } 2710 }
2339} 2711}
2340 2712
2713static struct cfq_queue *
2714cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
2715 struct cfq_queue *cfqq)
2716{
2717 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
2718 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
2719 cfq_mark_cfqq_coop(cfqq->new_cfqq);
2720 cfq_put_queue(cfqq);
2721 return cic_to_cfqq(cic, 1);
2722}
2723
2724static int should_split_cfqq(struct cfq_queue *cfqq)
2725{
2726 if (cfqq->seeky_start &&
2727 time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
2728 return 1;
2729 return 0;
2730}
2731
2732/*
2733 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
2734 * was the last process referring to said cfqq.
2735 */
2736static struct cfq_queue *
2737split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
2738{
2739 if (cfqq_process_refs(cfqq) == 1) {
2740 cfqq->seeky_start = 0;
2741 cfqq->pid = current->pid;
2742 cfq_clear_cfqq_coop(cfqq);
2743 return cfqq;
2744 }
2745
2746 cic_set_cfqq(cic, NULL, 1);
2747 cfq_put_queue(cfqq);
2748 return NULL;
2749}
2341/* 2750/*
2342 * Allocate cfq data structures associated with this request. 2751 * Allocate cfq data structures associated with this request.
2343 */ 2752 */
@@ -2360,10 +2769,30 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2360 if (!cic) 2769 if (!cic)
2361 goto queue_fail; 2770 goto queue_fail;
2362 2771
2772new_queue:
2363 cfqq = cic_to_cfqq(cic, is_sync); 2773 cfqq = cic_to_cfqq(cic, is_sync);
2364 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 2774 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2365 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); 2775 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2366 cic_set_cfqq(cic, cfqq, is_sync); 2776 cic_set_cfqq(cic, cfqq, is_sync);
2777 } else {
2778 /*
2779 * If the queue was seeky for too long, break it apart.
2780 */
2781 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
2782 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
2783 cfqq = split_cfqq(cic, cfqq);
2784 if (!cfqq)
2785 goto new_queue;
2786 }
2787
2788 /*
2789 * Check to see if this queue is scheduled to merge with
2790 * another, closely cooperating queue. The merging of
2791 * queues happens here as it must be done in process context.
2792 * The reference on new_cfqq was taken in merge_cfqqs.
2793 */
2794 if (cfqq->new_cfqq)
2795 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
2367 } 2796 }
2368 2797
2369 cfqq->allocated[rw]++; 2798 cfqq->allocated[rw]++;
@@ -2500,13 +2929,16 @@ static void cfq_exit_queue(struct elevator_queue *e)
2500static void *cfq_init_queue(struct request_queue *q) 2929static void *cfq_init_queue(struct request_queue *q)
2501{ 2930{
2502 struct cfq_data *cfqd; 2931 struct cfq_data *cfqd;
2503 int i; 2932 int i, j;
2504 2933
2505 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 2934 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2506 if (!cfqd) 2935 if (!cfqd)
2507 return NULL; 2936 return NULL;
2508 2937
2509 cfqd->service_tree = CFQ_RB_ROOT; 2938 for (i = 0; i < 2; ++i)
2939 for (j = 0; j < 3; ++j)
2940 cfqd->service_trees[i][j] = CFQ_RB_ROOT;
2941 cfqd->service_tree_idle = CFQ_RB_ROOT;
2510 2942
2511 /* 2943 /*
2512 * Not strictly needed (since RB_ROOT just clears the node and we 2944 * Not strictly needed (since RB_ROOT just clears the node and we