aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c140
1 files changed, 84 insertions, 56 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 968455c57e1a..f27ff3efe6cd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -87,7 +87,6 @@ struct cfq_rb_root {
87 unsigned count; 87 unsigned count;
88 unsigned total_weight; 88 unsigned total_weight;
89 u64 min_vdisktime; 89 u64 min_vdisktime;
90 struct rb_node *active;
91}; 90};
92#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \ 91#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
93 .count = 0, .min_vdisktime = 0, } 92 .count = 0, .min_vdisktime = 0, }
@@ -97,7 +96,7 @@ struct cfq_rb_root {
97 */ 96 */
98struct cfq_queue { 97struct cfq_queue {
99 /* reference count */ 98 /* reference count */
100 atomic_t ref; 99 int ref;
101 /* various state flags, see below */ 100 /* various state flags, see below */
102 unsigned int flags; 101 unsigned int flags;
103 /* parent cfq_data */ 102 /* parent cfq_data */
@@ -180,7 +179,6 @@ struct cfq_group {
180 /* group service_tree key */ 179 /* group service_tree key */
181 u64 vdisktime; 180 u64 vdisktime;
182 unsigned int weight; 181 unsigned int weight;
183 bool on_st;
184 182
185 /* number of cfqq currently on this group */ 183 /* number of cfqq currently on this group */
186 int nr_cfqq; 184 int nr_cfqq;
@@ -209,7 +207,7 @@ struct cfq_group {
209 struct blkio_group blkg; 207 struct blkio_group blkg;
210#ifdef CONFIG_CFQ_GROUP_IOSCHED 208#ifdef CONFIG_CFQ_GROUP_IOSCHED
211 struct hlist_node cfqd_node; 209 struct hlist_node cfqd_node;
212 atomic_t ref; 210 int ref;
213#endif 211#endif
214 /* number of requests that are on the dispatch list or inside driver */ 212 /* number of requests that are on the dispatch list or inside driver */
215 int dispatched; 213 int dispatched;
@@ -563,11 +561,6 @@ static void update_min_vdisktime(struct cfq_rb_root *st)
563 u64 vdisktime = st->min_vdisktime; 561 u64 vdisktime = st->min_vdisktime;
564 struct cfq_group *cfqg; 562 struct cfq_group *cfqg;
565 563
566 if (st->active) {
567 cfqg = rb_entry_cfqg(st->active);
568 vdisktime = cfqg->vdisktime;
569 }
570
571 if (st->left) { 564 if (st->left) {
572 cfqg = rb_entry_cfqg(st->left); 565 cfqg = rb_entry_cfqg(st->left);
573 vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime); 566 vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
@@ -605,8 +598,8 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
605 return cfq_target_latency * cfqg->weight / st->total_weight; 598 return cfq_target_latency * cfqg->weight / st->total_weight;
606} 599}
607 600
608static inline void 601static inline unsigned
609cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 602cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
610{ 603{
611 unsigned slice = cfq_prio_to_slice(cfqd, cfqq); 604 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
612 if (cfqd->cfq_latency) { 605 if (cfqd->cfq_latency) {
@@ -632,6 +625,14 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
632 low_slice); 625 low_slice);
633 } 626 }
634 } 627 }
628 return slice;
629}
630
631static inline void
632cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
633{
634 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
635
635 cfqq->slice_start = jiffies; 636 cfqq->slice_start = jiffies;
636 cfqq->slice_end = jiffies + slice; 637 cfqq->slice_end = jiffies + slice;
637 cfqq->allocated_slice = slice; 638 cfqq->allocated_slice = slice;
@@ -646,11 +647,11 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
646static inline bool cfq_slice_used(struct cfq_queue *cfqq) 647static inline bool cfq_slice_used(struct cfq_queue *cfqq)
647{ 648{
648 if (cfq_cfqq_slice_new(cfqq)) 649 if (cfq_cfqq_slice_new(cfqq))
649 return 0; 650 return false;
650 if (time_before(jiffies, cfqq->slice_end)) 651 if (time_before(jiffies, cfqq->slice_end))
651 return 0; 652 return false;
652 653
653 return 1; 654 return true;
654} 655}
655 656
656/* 657/*
@@ -869,7 +870,7 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
869 struct rb_node *n; 870 struct rb_node *n;
870 871
871 cfqg->nr_cfqq++; 872 cfqg->nr_cfqq++;
872 if (cfqg->on_st) 873 if (!RB_EMPTY_NODE(&cfqg->rb_node))
873 return; 874 return;
874 875
875 /* 876 /*
@@ -885,7 +886,6 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
885 cfqg->vdisktime = st->min_vdisktime; 886 cfqg->vdisktime = st->min_vdisktime;
886 887
887 __cfq_group_service_tree_add(st, cfqg); 888 __cfq_group_service_tree_add(st, cfqg);
888 cfqg->on_st = true;
889 st->total_weight += cfqg->weight; 889 st->total_weight += cfqg->weight;
890} 890}
891 891
@@ -894,9 +894,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
894{ 894{
895 struct cfq_rb_root *st = &cfqd->grp_service_tree; 895 struct cfq_rb_root *st = &cfqd->grp_service_tree;
896 896
897 if (st->active == &cfqg->rb_node)
898 st->active = NULL;
899
900 BUG_ON(cfqg->nr_cfqq < 1); 897 BUG_ON(cfqg->nr_cfqq < 1);
901 cfqg->nr_cfqq--; 898 cfqg->nr_cfqq--;
902 899
@@ -905,7 +902,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
905 return; 902 return;
906 903
907 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); 904 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
908 cfqg->on_st = false;
909 st->total_weight -= cfqg->weight; 905 st->total_weight -= cfqg->weight;
910 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 906 if (!RB_EMPTY_NODE(&cfqg->rb_node))
911 cfq_rb_erase(&cfqg->rb_node, st); 907 cfq_rb_erase(&cfqg->rb_node, st);
@@ -1026,11 +1022,11 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
1026 * elevator which will be dropped by either elevator exit 1022 * elevator which will be dropped by either elevator exit
1027 * or cgroup deletion path depending on who is exiting first. 1023 * or cgroup deletion path depending on who is exiting first.
1028 */ 1024 */
1029 atomic_set(&cfqg->ref, 1); 1025 cfqg->ref = 1;
1030 1026
1031 /* 1027 /*
1032 * Add group onto cgroup list. It might happen that bdi->dev is 1028 * Add group onto cgroup list. It might happen that bdi->dev is
1033 * not initiliazed yet. Initialize this new group without major 1029 * not initialized yet. Initialize this new group without major
1034 * and minor info and this info will be filled in once a new thread 1030 * and minor info and this info will be filled in once a new thread
1035 * comes for IO. See code above. 1031 * comes for IO. See code above.
1036 */ 1032 */
@@ -1071,7 +1067,7 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1071 1067
1072static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) 1068static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1073{ 1069{
1074 atomic_inc(&cfqg->ref); 1070 cfqg->ref++;
1075 return cfqg; 1071 return cfqg;
1076} 1072}
1077 1073
@@ -1083,7 +1079,7 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1083 1079
1084 cfqq->cfqg = cfqg; 1080 cfqq->cfqg = cfqg;
1085 /* cfqq reference on cfqg */ 1081 /* cfqq reference on cfqg */
1086 atomic_inc(&cfqq->cfqg->ref); 1082 cfqq->cfqg->ref++;
1087} 1083}
1088 1084
1089static void cfq_put_cfqg(struct cfq_group *cfqg) 1085static void cfq_put_cfqg(struct cfq_group *cfqg)
@@ -1091,11 +1087,12 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
1091 struct cfq_rb_root *st; 1087 struct cfq_rb_root *st;
1092 int i, j; 1088 int i, j;
1093 1089
1094 BUG_ON(atomic_read(&cfqg->ref) <= 0); 1090 BUG_ON(cfqg->ref <= 0);
1095 if (!atomic_dec_and_test(&cfqg->ref)) 1091 cfqg->ref--;
1092 if (cfqg->ref)
1096 return; 1093 return;
1097 for_each_cfqg_st(cfqg, i, j, st) 1094 for_each_cfqg_st(cfqg, i, j, st)
1098 BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL); 1095 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1099 kfree(cfqg); 1096 kfree(cfqg);
1100} 1097}
1101 1098
@@ -1200,7 +1197,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1200 cfq_group_service_tree_del(cfqd, cfqq->cfqg); 1197 cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1201 cfqq->orig_cfqg = cfqq->cfqg; 1198 cfqq->orig_cfqg = cfqq->cfqg;
1202 cfqq->cfqg = &cfqd->root_group; 1199 cfqq->cfqg = &cfqd->root_group;
1203 atomic_inc(&cfqd->root_group.ref); 1200 cfqd->root_group.ref++;
1204 group_changed = 1; 1201 group_changed = 1;
1205 } else if (!cfqd->cfq_group_isolation 1202 } else if (!cfqd->cfq_group_isolation
1206 && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) { 1203 && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
@@ -1672,8 +1669,11 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1672 /* 1669 /*
1673 * store what was left of this slice, if the queue idled/timed out 1670 * store what was left of this slice, if the queue idled/timed out
1674 */ 1671 */
1675 if (timed_out && !cfq_cfqq_slice_new(cfqq)) { 1672 if (timed_out) {
1676 cfqq->slice_resid = cfqq->slice_end - jiffies; 1673 if (cfq_cfqq_slice_new(cfqq))
1674 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
1675 else
1676 cfqq->slice_resid = cfqq->slice_end - jiffies;
1677 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); 1677 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1678 } 1678 }
1679 1679
@@ -1687,9 +1687,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1687 if (cfqq == cfqd->active_queue) 1687 if (cfqq == cfqd->active_queue)
1688 cfqd->active_queue = NULL; 1688 cfqd->active_queue = NULL;
1689 1689
1690 if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
1691 cfqd->grp_service_tree.active = NULL;
1692
1693 if (cfqd->active_cic) { 1690 if (cfqd->active_cic) {
1694 put_io_context(cfqd->active_cic->ioc); 1691 put_io_context(cfqd->active_cic->ioc);
1695 cfqd->active_cic = NULL; 1692 cfqd->active_cic = NULL;
@@ -1901,10 +1898,10 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1901 * in their service tree. 1898 * in their service tree.
1902 */ 1899 */
1903 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq)) 1900 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1904 return 1; 1901 return true;
1905 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", 1902 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1906 service_tree->count); 1903 service_tree->count);
1907 return 0; 1904 return false;
1908} 1905}
1909 1906
1910static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1907static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -2040,7 +2037,7 @@ static int cfqq_process_refs(struct cfq_queue *cfqq)
2040 int process_refs, io_refs; 2037 int process_refs, io_refs;
2041 2038
2042 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; 2039 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2043 process_refs = atomic_read(&cfqq->ref) - io_refs; 2040 process_refs = cfqq->ref - io_refs;
2044 BUG_ON(process_refs < 0); 2041 BUG_ON(process_refs < 0);
2045 return process_refs; 2042 return process_refs;
2046} 2043}
@@ -2080,10 +2077,10 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2080 */ 2077 */
2081 if (new_process_refs >= process_refs) { 2078 if (new_process_refs >= process_refs) {
2082 cfqq->new_cfqq = new_cfqq; 2079 cfqq->new_cfqq = new_cfqq;
2083 atomic_add(process_refs, &new_cfqq->ref); 2080 new_cfqq->ref += process_refs;
2084 } else { 2081 } else {
2085 new_cfqq->new_cfqq = cfqq; 2082 new_cfqq->new_cfqq = cfqq;
2086 atomic_add(new_process_refs, &cfqq->ref); 2083 cfqq->ref += new_process_refs;
2087 } 2084 }
2088} 2085}
2089 2086
@@ -2116,12 +2113,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2116 unsigned count; 2113 unsigned count;
2117 struct cfq_rb_root *st; 2114 struct cfq_rb_root *st;
2118 unsigned group_slice; 2115 unsigned group_slice;
2119 2116 enum wl_prio_t original_prio = cfqd->serving_prio;
2120 if (!cfqg) {
2121 cfqd->serving_prio = IDLE_WORKLOAD;
2122 cfqd->workload_expires = jiffies + 1;
2123 return;
2124 }
2125 2117
2126 /* Choose next priority. RT > BE > IDLE */ 2118 /* Choose next priority. RT > BE > IDLE */
2127 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) 2119 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
@@ -2134,6 +2126,9 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2134 return; 2126 return;
2135 } 2127 }
2136 2128
2129 if (original_prio != cfqd->serving_prio)
2130 goto new_workload;
2131
2137 /* 2132 /*
2138 * For RT and BE, we have to choose also the type 2133 * For RT and BE, we have to choose also the type
2139 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload 2134 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
@@ -2148,6 +2143,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2148 if (count && !time_after(jiffies, cfqd->workload_expires)) 2143 if (count && !time_after(jiffies, cfqd->workload_expires))
2149 return; 2144 return;
2150 2145
2146new_workload:
2151 /* otherwise select new workload type */ 2147 /* otherwise select new workload type */
2152 cfqd->serving_type = 2148 cfqd->serving_type =
2153 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio); 2149 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
@@ -2199,7 +2195,6 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2199 if (RB_EMPTY_ROOT(&st->rb)) 2195 if (RB_EMPTY_ROOT(&st->rb))
2200 return NULL; 2196 return NULL;
2201 cfqg = cfq_rb_first_group(st); 2197 cfqg = cfq_rb_first_group(st);
2202 st->active = &cfqg->rb_node;
2203 update_min_vdisktime(st); 2198 update_min_vdisktime(st);
2204 return cfqg; 2199 return cfqg;
2205} 2200}
@@ -2293,6 +2288,17 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2293 goto keep_queue; 2288 goto keep_queue;
2294 } 2289 }
2295 2290
2291 /*
2292 * This is a deep seek queue, but the device is much faster than
2293 * the queue can deliver, don't idle
2294 **/
2295 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2296 (cfq_cfqq_slice_new(cfqq) ||
2297 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2298 cfq_clear_cfqq_deep(cfqq);
2299 cfq_clear_cfqq_idle_window(cfqq);
2300 }
2301
2296 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { 2302 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2297 cfqq = NULL; 2303 cfqq = NULL;
2298 goto keep_queue; 2304 goto keep_queue;
@@ -2367,12 +2373,12 @@ static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2367{ 2373{
2368 /* the queue hasn't finished any request, can't estimate */ 2374 /* the queue hasn't finished any request, can't estimate */
2369 if (cfq_cfqq_slice_new(cfqq)) 2375 if (cfq_cfqq_slice_new(cfqq))
2370 return 1; 2376 return true;
2371 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, 2377 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2372 cfqq->slice_end)) 2378 cfqq->slice_end))
2373 return 1; 2379 return true;
2374 2380
2375 return 0; 2381 return false;
2376} 2382}
2377 2383
2378static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2384static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
@@ -2538,9 +2544,10 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
2538 struct cfq_data *cfqd = cfqq->cfqd; 2544 struct cfq_data *cfqd = cfqq->cfqd;
2539 struct cfq_group *cfqg, *orig_cfqg; 2545 struct cfq_group *cfqg, *orig_cfqg;
2540 2546
2541 BUG_ON(atomic_read(&cfqq->ref) <= 0); 2547 BUG_ON(cfqq->ref <= 0);
2542 2548
2543 if (!atomic_dec_and_test(&cfqq->ref)) 2549 cfqq->ref--;
2550 if (cfqq->ref)
2544 return; 2551 return;
2545 2552
2546 cfq_log_cfqq(cfqd, cfqq, "put_queue"); 2553 cfq_log_cfqq(cfqd, cfqq, "put_queue");
@@ -2843,7 +2850,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2843 RB_CLEAR_NODE(&cfqq->p_node); 2850 RB_CLEAR_NODE(&cfqq->p_node);
2844 INIT_LIST_HEAD(&cfqq->fifo); 2851 INIT_LIST_HEAD(&cfqq->fifo);
2845 2852
2846 atomic_set(&cfqq->ref, 0); 2853 cfqq->ref = 0;
2847 cfqq->cfqd = cfqd; 2854 cfqq->cfqd = cfqd;
2848 2855
2849 cfq_mark_cfqq_prio_changed(cfqq); 2856 cfq_mark_cfqq_prio_changed(cfqq);
@@ -2979,11 +2986,11 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2979 * pin the queue now that it's allocated, scheduler exit will prune it 2986 * pin the queue now that it's allocated, scheduler exit will prune it
2980 */ 2987 */
2981 if (!is_sync && !(*async_cfqq)) { 2988 if (!is_sync && !(*async_cfqq)) {
2982 atomic_inc(&cfqq->ref); 2989 cfqq->ref++;
2983 *async_cfqq = cfqq; 2990 *async_cfqq = cfqq;
2984 } 2991 }
2985 2992
2986 atomic_inc(&cfqq->ref); 2993 cfqq->ref++;
2987 return cfqq; 2994 return cfqq;
2988} 2995}
2989 2996
@@ -3265,6 +3272,10 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3265 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 3272 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3266 return true; 3273 return true;
3267 3274
3275 /* An idle queue should not be idle now for some reason */
3276 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3277 return true;
3278
3268 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 3279 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3269 return false; 3280 return false;
3270 3281
@@ -3284,10 +3295,19 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3284 */ 3295 */
3285static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3296static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3286{ 3297{
3298 struct cfq_queue *old_cfqq = cfqd->active_queue;
3299
3287 cfq_log_cfqq(cfqd, cfqq, "preempt"); 3300 cfq_log_cfqq(cfqd, cfqq, "preempt");
3288 cfq_slice_expired(cfqd, 1); 3301 cfq_slice_expired(cfqd, 1);
3289 3302
3290 /* 3303 /*
3304 * workload type is changed, don't save slice, otherwise preempt
3305 * doesn't happen
3306 */
3307 if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
3308 cfqq->cfqg->saved_workload_slice = 0;
3309
3310 /*
3291 * Put the new queue at the front of the of the current list, 3311 * Put the new queue at the front of the of the current list,
3292 * so we know that it will be selected next. 3312 * so we know that it will be selected next.
3293 */ 3313 */
@@ -3412,6 +3432,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3412{ 3432{
3413 struct cfq_io_context *cic = cfqd->active_cic; 3433 struct cfq_io_context *cic = cfqd->active_cic;
3414 3434
3435 /* If the queue already has requests, don't wait */
3436 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3437 return false;
3438
3415 /* If there are other queues in the group, don't wait */ 3439 /* If there are other queues in the group, don't wait */
3416 if (cfqq->cfqg->nr_cfqq > 1) 3440 if (cfqq->cfqg->nr_cfqq > 1)
3417 return false; 3441 return false;
@@ -3681,10 +3705,10 @@ new_queue:
3681 } 3705 }
3682 3706
3683 cfqq->allocated[rw]++; 3707 cfqq->allocated[rw]++;
3684 atomic_inc(&cfqq->ref);
3685 3708
3686 spin_unlock_irqrestore(q->queue_lock, flags); 3709 spin_unlock_irqrestore(q->queue_lock, flags);
3687 3710
3711 cfqq->ref++;
3688 rq->elevator_private[0] = cic; 3712 rq->elevator_private[0] = cic;
3689 rq->elevator_private[1] = cfqq; 3713 rq->elevator_private[1] = cfqq;
3690 rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); 3714 rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
@@ -3862,6 +3886,10 @@ static void *cfq_init_queue(struct request_queue *q)
3862 if (!cfqd) 3886 if (!cfqd)
3863 return NULL; 3887 return NULL;
3864 3888
3889 /*
3890 * Don't need take queue_lock in the routine, since we are
3891 * initializing the ioscheduler, and nobody is using cfqd
3892 */
3865 cfqd->cic_index = i; 3893 cfqd->cic_index = i;
3866 3894
3867 /* Init root service tree */ 3895 /* Init root service tree */
@@ -3881,7 +3909,7 @@ static void *cfq_init_queue(struct request_queue *q)
3881 * Take a reference to root group which we never drop. This is just 3909 * Take a reference to root group which we never drop. This is just
3882 * to make sure that cfq_put_cfqg() does not try to kfree root group 3910 * to make sure that cfq_put_cfqg() does not try to kfree root group
3883 */ 3911 */
3884 atomic_set(&cfqg->ref, 1); 3912 cfqg->ref = 1;
3885 rcu_read_lock(); 3913 rcu_read_lock();
3886 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, 3914 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3887 (void *)cfqd, 0); 3915 (void *)cfqd, 0);
@@ -3901,7 +3929,7 @@ static void *cfq_init_queue(struct request_queue *q)
3901 * will not attempt to free it. 3929 * will not attempt to free it.
3902 */ 3930 */
3903 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); 3931 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3904 atomic_inc(&cfqd->oom_cfqq.ref); 3932 cfqd->oom_cfqq.ref++;
3905 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); 3933 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3906 3934
3907 INIT_LIST_HEAD(&cfqd->cic_list); 3935 INIT_LIST_HEAD(&cfqd->cic_list);