aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c56
1 files changed, 30 insertions, 26 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4cd59b0d7c15..9b186fd6bf47 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -87,7 +87,6 @@ struct cfq_rb_root {
87 unsigned count; 87 unsigned count;
88 unsigned total_weight; 88 unsigned total_weight;
89 u64 min_vdisktime; 89 u64 min_vdisktime;
90 struct rb_node *active;
91}; 90};
92#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \ 91#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
93 .count = 0, .min_vdisktime = 0, } 92 .count = 0, .min_vdisktime = 0, }
@@ -180,7 +179,6 @@ struct cfq_group {
180 /* group service_tree key */ 179 /* group service_tree key */
181 u64 vdisktime; 180 u64 vdisktime;
182 unsigned int weight; 181 unsigned int weight;
183 bool on_st;
184 182
185 /* number of cfqq currently on this group */ 183 /* number of cfqq currently on this group */
186 int nr_cfqq; 184 int nr_cfqq;
@@ -563,11 +561,6 @@ static void update_min_vdisktime(struct cfq_rb_root *st)
563 u64 vdisktime = st->min_vdisktime; 561 u64 vdisktime = st->min_vdisktime;
564 struct cfq_group *cfqg; 562 struct cfq_group *cfqg;
565 563
566 if (st->active) {
567 cfqg = rb_entry_cfqg(st->active);
568 vdisktime = cfqg->vdisktime;
569 }
570
571 if (st->left) { 564 if (st->left) {
572 cfqg = rb_entry_cfqg(st->left); 565 cfqg = rb_entry_cfqg(st->left);
573 vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime); 566 vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
@@ -646,11 +639,11 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
646static inline bool cfq_slice_used(struct cfq_queue *cfqq) 639static inline bool cfq_slice_used(struct cfq_queue *cfqq)
647{ 640{
648 if (cfq_cfqq_slice_new(cfqq)) 641 if (cfq_cfqq_slice_new(cfqq))
649 return 0; 642 return false;
650 if (time_before(jiffies, cfqq->slice_end)) 643 if (time_before(jiffies, cfqq->slice_end))
651 return 0; 644 return false;
652 645
653 return 1; 646 return true;
654} 647}
655 648
656/* 649/*
@@ -869,7 +862,7 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
869 struct rb_node *n; 862 struct rb_node *n;
870 863
871 cfqg->nr_cfqq++; 864 cfqg->nr_cfqq++;
872 if (cfqg->on_st) 865 if (!RB_EMPTY_NODE(&cfqg->rb_node))
873 return; 866 return;
874 867
875 /* 868 /*
@@ -885,7 +878,6 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
885 cfqg->vdisktime = st->min_vdisktime; 878 cfqg->vdisktime = st->min_vdisktime;
886 879
887 __cfq_group_service_tree_add(st, cfqg); 880 __cfq_group_service_tree_add(st, cfqg);
888 cfqg->on_st = true;
889 st->total_weight += cfqg->weight; 881 st->total_weight += cfqg->weight;
890} 882}
891 883
@@ -894,9 +886,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
894{ 886{
895 struct cfq_rb_root *st = &cfqd->grp_service_tree; 887 struct cfq_rb_root *st = &cfqd->grp_service_tree;
896 888
897 if (st->active == &cfqg->rb_node)
898 st->active = NULL;
899
900 BUG_ON(cfqg->nr_cfqq < 1); 889 BUG_ON(cfqg->nr_cfqq < 1);
901 cfqg->nr_cfqq--; 890 cfqg->nr_cfqq--;
902 891
@@ -905,7 +894,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
905 return; 894 return;
906 895
907 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); 896 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
908 cfqg->on_st = false;
909 st->total_weight -= cfqg->weight; 897 st->total_weight -= cfqg->weight;
910 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 898 if (!RB_EMPTY_NODE(&cfqg->rb_node))
911 cfq_rb_erase(&cfqg->rb_node, st); 899 cfq_rb_erase(&cfqg->rb_node, st);
@@ -1095,7 +1083,7 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
1095 if (!atomic_dec_and_test(&cfqg->ref)) 1083 if (!atomic_dec_and_test(&cfqg->ref))
1096 return; 1084 return;
1097 for_each_cfqg_st(cfqg, i, j, st) 1085 for_each_cfqg_st(cfqg, i, j, st)
1098 BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL); 1086 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1099 kfree(cfqg); 1087 kfree(cfqg);
1100} 1088}
1101 1089
@@ -1687,9 +1675,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1687 if (cfqq == cfqd->active_queue) 1675 if (cfqq == cfqd->active_queue)
1688 cfqd->active_queue = NULL; 1676 cfqd->active_queue = NULL;
1689 1677
1690 if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
1691 cfqd->grp_service_tree.active = NULL;
1692
1693 if (cfqd->active_cic) { 1678 if (cfqd->active_cic) {
1694 put_io_context(cfqd->active_cic->ioc); 1679 put_io_context(cfqd->active_cic->ioc);
1695 cfqd->active_cic = NULL; 1680 cfqd->active_cic = NULL;
@@ -1901,10 +1886,10 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1901 * in their service tree. 1886 * in their service tree.
1902 */ 1887 */
1903 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq)) 1888 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1904 return 1; 1889 return true;
1905 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", 1890 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1906 service_tree->count); 1891 service_tree->count);
1907 return 0; 1892 return false;
1908} 1893}
1909 1894
1910static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1895static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -2116,6 +2101,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2116 unsigned count; 2101 unsigned count;
2117 struct cfq_rb_root *st; 2102 struct cfq_rb_root *st;
2118 unsigned group_slice; 2103 unsigned group_slice;
2104 enum wl_prio_t original_prio = cfqd->serving_prio;
2119 2105
2120 if (!cfqg) { 2106 if (!cfqg) {
2121 cfqd->serving_prio = IDLE_WORKLOAD; 2107 cfqd->serving_prio = IDLE_WORKLOAD;
@@ -2134,6 +2120,9 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2134 return; 2120 return;
2135 } 2121 }
2136 2122
2123 if (original_prio != cfqd->serving_prio)
2124 goto new_workload;
2125
2137 /* 2126 /*
2138 * For RT and BE, we have to choose also the type 2127 * For RT and BE, we have to choose also the type
2139 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload 2128 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
@@ -2148,6 +2137,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2148 if (count && !time_after(jiffies, cfqd->workload_expires)) 2137 if (count && !time_after(jiffies, cfqd->workload_expires))
2149 return; 2138 return;
2150 2139
2140new_workload:
2151 /* otherwise select new workload type */ 2141 /* otherwise select new workload type */
2152 cfqd->serving_type = 2142 cfqd->serving_type =
2153 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio); 2143 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
@@ -2199,7 +2189,6 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2199 if (RB_EMPTY_ROOT(&st->rb)) 2189 if (RB_EMPTY_ROOT(&st->rb))
2200 return NULL; 2190 return NULL;
2201 cfqg = cfq_rb_first_group(st); 2191 cfqg = cfq_rb_first_group(st);
2202 st->active = &cfqg->rb_node;
2203 update_min_vdisktime(st); 2192 update_min_vdisktime(st);
2204 return cfqg; 2193 return cfqg;
2205} 2194}
@@ -2293,6 +2282,17 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2293 goto keep_queue; 2282 goto keep_queue;
2294 } 2283 }
2295 2284
2285 /*
2286 * This is a deep seek queue, but the device is much faster than
2287 * the queue can deliver, don't idle
2288 **/
2289 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2290 (cfq_cfqq_slice_new(cfqq) ||
2291 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2292 cfq_clear_cfqq_deep(cfqq);
2293 cfq_clear_cfqq_idle_window(cfqq);
2294 }
2295
2296 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { 2296 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2297 cfqq = NULL; 2297 cfqq = NULL;
2298 goto keep_queue; 2298 goto keep_queue;
@@ -2367,12 +2367,12 @@ static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2367{ 2367{
2368 /* the queue hasn't finished any request, can't estimate */ 2368 /* the queue hasn't finished any request, can't estimate */
2369 if (cfq_cfqq_slice_new(cfqq)) 2369 if (cfq_cfqq_slice_new(cfqq))
2370 return 1; 2370 return true;
2371 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, 2371 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2372 cfqq->slice_end)) 2372 cfqq->slice_end))
2373 return 1; 2373 return true;
2374 2374
2375 return 0; 2375 return false;
2376} 2376}
2377 2377
2378static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2378static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
@@ -3265,6 +3265,10 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3265 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 3265 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3266 return true; 3266 return true;
3267 3267
3268 /* An idle queue should not be idle now for some reason */
3269 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3270 return true;
3271
3268 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 3272 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3269 return false; 3273 return false;
3270 3274