diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 629 |
1 files changed, 511 insertions, 118 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ec52807cdd09..4f0ade74cfd0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -85,7 +85,6 @@ struct cfq_rb_root { | |||
85 | struct rb_root rb; | 85 | struct rb_root rb; |
86 | struct rb_node *left; | 86 | struct rb_node *left; |
87 | unsigned count; | 87 | unsigned count; |
88 | unsigned total_weight; | ||
89 | u64 min_vdisktime; | 88 | u64 min_vdisktime; |
90 | struct cfq_ttime ttime; | 89 | struct cfq_ttime ttime; |
91 | }; | 90 | }; |
@@ -155,7 +154,7 @@ struct cfq_queue { | |||
155 | * First index in the service_trees. | 154 | * First index in the service_trees. |
156 | * IDLE is handled separately, so it has negative index | 155 | * IDLE is handled separately, so it has negative index |
157 | */ | 156 | */ |
158 | enum wl_prio_t { | 157 | enum wl_class_t { |
159 | BE_WORKLOAD = 0, | 158 | BE_WORKLOAD = 0, |
160 | RT_WORKLOAD = 1, | 159 | RT_WORKLOAD = 1, |
161 | IDLE_WORKLOAD = 2, | 160 | IDLE_WORKLOAD = 2, |
@@ -223,10 +222,45 @@ struct cfq_group { | |||
223 | 222 | ||
224 | /* group service_tree key */ | 223 | /* group service_tree key */ |
225 | u64 vdisktime; | 224 | u64 vdisktime; |
225 | |||
226 | /* | ||
227 | * The number of active cfqgs and sum of their weights under this | ||
228 | * cfqg. This covers this cfqg's leaf_weight and all children's | ||
229 | * weights, but does not cover weights of further descendants. | ||
230 | * | ||
231 | * If a cfqg is on the service tree, it's active. An active cfqg | ||
232 | * also activates its parent and contributes to the children_weight | ||
233 | * of the parent. | ||
234 | */ | ||
235 | int nr_active; | ||
236 | unsigned int children_weight; | ||
237 | |||
238 | /* | ||
239 | * vfraction is the fraction of vdisktime that the tasks in this | ||
240 | * cfqg are entitled to. This is determined by compounding the | ||
241 | * ratios walking up from this cfqg to the root. | ||
242 | * | ||
243 | * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all | ||
244 | * vfractions on a service tree is approximately 1. The sum may | ||
245 | * deviate a bit due to rounding errors and fluctuations caused by | ||
246 | * cfqgs entering and leaving the service tree. | ||
247 | */ | ||
248 | unsigned int vfraction; | ||
249 | |||
250 | /* | ||
251 | * There are two weights - (internal) weight is the weight of this | ||
252 | * cfqg against the sibling cfqgs. leaf_weight is the wight of | ||
253 | * this cfqg against the child cfqgs. For the root cfqg, both | ||
254 | * weights are kept in sync for backward compatibility. | ||
255 | */ | ||
226 | unsigned int weight; | 256 | unsigned int weight; |
227 | unsigned int new_weight; | 257 | unsigned int new_weight; |
228 | unsigned int dev_weight; | 258 | unsigned int dev_weight; |
229 | 259 | ||
260 | unsigned int leaf_weight; | ||
261 | unsigned int new_leaf_weight; | ||
262 | unsigned int dev_leaf_weight; | ||
263 | |||
230 | /* number of cfqq currently on this group */ | 264 | /* number of cfqq currently on this group */ |
231 | int nr_cfqq; | 265 | int nr_cfqq; |
232 | 266 | ||
@@ -248,14 +282,15 @@ struct cfq_group { | |||
248 | struct cfq_rb_root service_trees[2][3]; | 282 | struct cfq_rb_root service_trees[2][3]; |
249 | struct cfq_rb_root service_tree_idle; | 283 | struct cfq_rb_root service_tree_idle; |
250 | 284 | ||
251 | unsigned long saved_workload_slice; | 285 | unsigned long saved_wl_slice; |
252 | enum wl_type_t saved_workload; | 286 | enum wl_type_t saved_wl_type; |
253 | enum wl_prio_t saved_serving_prio; | 287 | enum wl_class_t saved_wl_class; |
254 | 288 | ||
255 | /* number of requests that are on the dispatch list or inside driver */ | 289 | /* number of requests that are on the dispatch list or inside driver */ |
256 | int dispatched; | 290 | int dispatched; |
257 | struct cfq_ttime ttime; | 291 | struct cfq_ttime ttime; |
258 | struct cfqg_stats stats; | 292 | struct cfqg_stats stats; /* stats for this cfqg */ |
293 | struct cfqg_stats dead_stats; /* stats pushed from dead children */ | ||
259 | }; | 294 | }; |
260 | 295 | ||
261 | struct cfq_io_cq { | 296 | struct cfq_io_cq { |
@@ -280,8 +315,8 @@ struct cfq_data { | |||
280 | /* | 315 | /* |
281 | * The priority currently being served | 316 | * The priority currently being served |
282 | */ | 317 | */ |
283 | enum wl_prio_t serving_prio; | 318 | enum wl_class_t serving_wl_class; |
284 | enum wl_type_t serving_type; | 319 | enum wl_type_t serving_wl_type; |
285 | unsigned long workload_expires; | 320 | unsigned long workload_expires; |
286 | struct cfq_group *serving_group; | 321 | struct cfq_group *serving_group; |
287 | 322 | ||
@@ -353,17 +388,17 @@ struct cfq_data { | |||
353 | 388 | ||
354 | static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); | 389 | static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); |
355 | 390 | ||
356 | static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, | 391 | static struct cfq_rb_root *st_for(struct cfq_group *cfqg, |
357 | enum wl_prio_t prio, | 392 | enum wl_class_t class, |
358 | enum wl_type_t type) | 393 | enum wl_type_t type) |
359 | { | 394 | { |
360 | if (!cfqg) | 395 | if (!cfqg) |
361 | return NULL; | 396 | return NULL; |
362 | 397 | ||
363 | if (prio == IDLE_WORKLOAD) | 398 | if (class == IDLE_WORKLOAD) |
364 | return &cfqg->service_tree_idle; | 399 | return &cfqg->service_tree_idle; |
365 | 400 | ||
366 | return &cfqg->service_trees[prio][type]; | 401 | return &cfqg->service_trees[class][type]; |
367 | } | 402 | } |
368 | 403 | ||
369 | enum cfqq_state_flags { | 404 | enum cfqq_state_flags { |
@@ -502,7 +537,7 @@ static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) | |||
502 | { | 537 | { |
503 | struct cfqg_stats *stats = &cfqg->stats; | 538 | struct cfqg_stats *stats = &cfqg->stats; |
504 | 539 | ||
505 | if (blkg_rwstat_sum(&stats->queued)) | 540 | if (blkg_rwstat_total(&stats->queued)) |
506 | return; | 541 | return; |
507 | 542 | ||
508 | /* | 543 | /* |
@@ -546,7 +581,7 @@ static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) | |||
546 | struct cfqg_stats *stats = &cfqg->stats; | 581 | struct cfqg_stats *stats = &cfqg->stats; |
547 | 582 | ||
548 | blkg_stat_add(&stats->avg_queue_size_sum, | 583 | blkg_stat_add(&stats->avg_queue_size_sum, |
549 | blkg_rwstat_sum(&stats->queued)); | 584 | blkg_rwstat_total(&stats->queued)); |
550 | blkg_stat_add(&stats->avg_queue_size_samples, 1); | 585 | blkg_stat_add(&stats->avg_queue_size_samples, 1); |
551 | cfqg_stats_update_group_wait_time(stats); | 586 | cfqg_stats_update_group_wait_time(stats); |
552 | } | 587 | } |
@@ -572,6 +607,13 @@ static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg) | |||
572 | return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq)); | 607 | return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq)); |
573 | } | 608 | } |
574 | 609 | ||
610 | static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) | ||
611 | { | ||
612 | struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent; | ||
613 | |||
614 | return pblkg ? blkg_to_cfqg(pblkg) : NULL; | ||
615 | } | ||
616 | |||
575 | static inline void cfqg_get(struct cfq_group *cfqg) | 617 | static inline void cfqg_get(struct cfq_group *cfqg) |
576 | { | 618 | { |
577 | return blkg_get(cfqg_to_blkg(cfqg)); | 619 | return blkg_get(cfqg_to_blkg(cfqg)); |
@@ -586,8 +628,9 @@ static inline void cfqg_put(struct cfq_group *cfqg) | |||
586 | char __pbuf[128]; \ | 628 | char __pbuf[128]; \ |
587 | \ | 629 | \ |
588 | blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \ | 630 | blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \ |
589 | blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ | 631 | blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \ |
590 | cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ | 632 | cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ |
633 | cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ | ||
591 | __pbuf, ##args); \ | 634 | __pbuf, ##args); \ |
592 | } while (0) | 635 | } while (0) |
593 | 636 | ||
@@ -646,11 +689,9 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, | |||
646 | io_start_time - start_time); | 689 | io_start_time - start_time); |
647 | } | 690 | } |
648 | 691 | ||
649 | static void cfq_pd_reset_stats(struct blkcg_gq *blkg) | 692 | /* @stats = 0 */ |
693 | static void cfqg_stats_reset(struct cfqg_stats *stats) | ||
650 | { | 694 | { |
651 | struct cfq_group *cfqg = blkg_to_cfqg(blkg); | ||
652 | struct cfqg_stats *stats = &cfqg->stats; | ||
653 | |||
654 | /* queued stats shouldn't be cleared */ | 695 | /* queued stats shouldn't be cleared */ |
655 | blkg_rwstat_reset(&stats->service_bytes); | 696 | blkg_rwstat_reset(&stats->service_bytes); |
656 | blkg_rwstat_reset(&stats->serviced); | 697 | blkg_rwstat_reset(&stats->serviced); |
@@ -669,13 +710,58 @@ static void cfq_pd_reset_stats(struct blkcg_gq *blkg) | |||
669 | #endif | 710 | #endif |
670 | } | 711 | } |
671 | 712 | ||
713 | /* @to += @from */ | ||
714 | static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from) | ||
715 | { | ||
716 | /* queued stats shouldn't be cleared */ | ||
717 | blkg_rwstat_merge(&to->service_bytes, &from->service_bytes); | ||
718 | blkg_rwstat_merge(&to->serviced, &from->serviced); | ||
719 | blkg_rwstat_merge(&to->merged, &from->merged); | ||
720 | blkg_rwstat_merge(&to->service_time, &from->service_time); | ||
721 | blkg_rwstat_merge(&to->wait_time, &from->wait_time); | ||
722 | blkg_stat_merge(&from->time, &from->time); | ||
723 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
724 | blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time); | ||
725 | blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum); | ||
726 | blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples); | ||
727 | blkg_stat_merge(&to->dequeue, &from->dequeue); | ||
728 | blkg_stat_merge(&to->group_wait_time, &from->group_wait_time); | ||
729 | blkg_stat_merge(&to->idle_time, &from->idle_time); | ||
730 | blkg_stat_merge(&to->empty_time, &from->empty_time); | ||
731 | #endif | ||
732 | } | ||
733 | |||
734 | /* | ||
735 | * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors' | ||
736 | * recursive stats can still account for the amount used by this cfqg after | ||
737 | * it's gone. | ||
738 | */ | ||
739 | static void cfqg_stats_xfer_dead(struct cfq_group *cfqg) | ||
740 | { | ||
741 | struct cfq_group *parent = cfqg_parent(cfqg); | ||
742 | |||
743 | lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock); | ||
744 | |||
745 | if (unlikely(!parent)) | ||
746 | return; | ||
747 | |||
748 | cfqg_stats_merge(&parent->dead_stats, &cfqg->stats); | ||
749 | cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats); | ||
750 | cfqg_stats_reset(&cfqg->stats); | ||
751 | cfqg_stats_reset(&cfqg->dead_stats); | ||
752 | } | ||
753 | |||
672 | #else /* CONFIG_CFQ_GROUP_IOSCHED */ | 754 | #else /* CONFIG_CFQ_GROUP_IOSCHED */ |
673 | 755 | ||
756 | static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; } | ||
674 | static inline void cfqg_get(struct cfq_group *cfqg) { } | 757 | static inline void cfqg_get(struct cfq_group *cfqg) { } |
675 | static inline void cfqg_put(struct cfq_group *cfqg) { } | 758 | static inline void cfqg_put(struct cfq_group *cfqg) { } |
676 | 759 | ||
677 | #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ | 760 | #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ |
678 | blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) | 761 | blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \ |
762 | cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ | ||
763 | cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ | ||
764 | ##args) | ||
679 | #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) | 765 | #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) |
680 | 766 | ||
681 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, | 767 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, |
@@ -732,7 +818,7 @@ static inline bool iops_mode(struct cfq_data *cfqd) | |||
732 | return false; | 818 | return false; |
733 | } | 819 | } |
734 | 820 | ||
735 | static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) | 821 | static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq) |
736 | { | 822 | { |
737 | if (cfq_class_idle(cfqq)) | 823 | if (cfq_class_idle(cfqq)) |
738 | return IDLE_WORKLOAD; | 824 | return IDLE_WORKLOAD; |
@@ -751,23 +837,23 @@ static enum wl_type_t cfqq_type(struct cfq_queue *cfqq) | |||
751 | return SYNC_WORKLOAD; | 837 | return SYNC_WORKLOAD; |
752 | } | 838 | } |
753 | 839 | ||
754 | static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl, | 840 | static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class, |
755 | struct cfq_data *cfqd, | 841 | struct cfq_data *cfqd, |
756 | struct cfq_group *cfqg) | 842 | struct cfq_group *cfqg) |
757 | { | 843 | { |
758 | if (wl == IDLE_WORKLOAD) | 844 | if (wl_class == IDLE_WORKLOAD) |
759 | return cfqg->service_tree_idle.count; | 845 | return cfqg->service_tree_idle.count; |
760 | 846 | ||
761 | return cfqg->service_trees[wl][ASYNC_WORKLOAD].count | 847 | return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count + |
762 | + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count | 848 | cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count + |
763 | + cfqg->service_trees[wl][SYNC_WORKLOAD].count; | 849 | cfqg->service_trees[wl_class][SYNC_WORKLOAD].count; |
764 | } | 850 | } |
765 | 851 | ||
766 | static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, | 852 | static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, |
767 | struct cfq_group *cfqg) | 853 | struct cfq_group *cfqg) |
768 | { | 854 | { |
769 | return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count | 855 | return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + |
770 | + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; | 856 | cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; |
771 | } | 857 | } |
772 | 858 | ||
773 | static void cfq_dispatch_insert(struct request_queue *, struct request *); | 859 | static void cfq_dispatch_insert(struct request_queue *, struct request *); |
@@ -847,13 +933,27 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
847 | return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); | 933 | return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); |
848 | } | 934 | } |
849 | 935 | ||
850 | static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg) | 936 | /** |
937 | * cfqg_scale_charge - scale disk time charge according to cfqg weight | ||
938 | * @charge: disk time being charged | ||
939 | * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT | ||
940 | * | ||
941 | * Scale @charge according to @vfraction, which is in range (0, 1]. The | ||
942 | * scaling is inversely proportional. | ||
943 | * | ||
944 | * scaled = charge / vfraction | ||
945 | * | ||
946 | * The result is also in fixed point w/ CFQ_SERVICE_SHIFT. | ||
947 | */ | ||
948 | static inline u64 cfqg_scale_charge(unsigned long charge, | ||
949 | unsigned int vfraction) | ||
851 | { | 950 | { |
852 | u64 d = delta << CFQ_SERVICE_SHIFT; | 951 | u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */ |
853 | 952 | ||
854 | d = d * CFQ_WEIGHT_DEFAULT; | 953 | /* charge / vfraction */ |
855 | do_div(d, cfqg->weight); | 954 | c <<= CFQ_SERVICE_SHIFT; |
856 | return d; | 955 | do_div(c, vfraction); |
956 | return c; | ||
857 | } | 957 | } |
858 | 958 | ||
859 | static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) | 959 | static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) |
@@ -909,9 +1009,7 @@ static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, | |||
909 | static inline unsigned | 1009 | static inline unsigned |
910 | cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) | 1010 | cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) |
911 | { | 1011 | { |
912 | struct cfq_rb_root *st = &cfqd->grp_service_tree; | 1012 | return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; |
913 | |||
914 | return cfqd->cfq_target_latency * cfqg->weight / st->total_weight; | ||
915 | } | 1013 | } |
916 | 1014 | ||
917 | static inline unsigned | 1015 | static inline unsigned |
@@ -1178,20 +1276,61 @@ static void | |||
1178 | cfq_update_group_weight(struct cfq_group *cfqg) | 1276 | cfq_update_group_weight(struct cfq_group *cfqg) |
1179 | { | 1277 | { |
1180 | BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); | 1278 | BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); |
1279 | |||
1181 | if (cfqg->new_weight) { | 1280 | if (cfqg->new_weight) { |
1182 | cfqg->weight = cfqg->new_weight; | 1281 | cfqg->weight = cfqg->new_weight; |
1183 | cfqg->new_weight = 0; | 1282 | cfqg->new_weight = 0; |
1184 | } | 1283 | } |
1284 | |||
1285 | if (cfqg->new_leaf_weight) { | ||
1286 | cfqg->leaf_weight = cfqg->new_leaf_weight; | ||
1287 | cfqg->new_leaf_weight = 0; | ||
1288 | } | ||
1185 | } | 1289 | } |
1186 | 1290 | ||
1187 | static void | 1291 | static void |
1188 | cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) | 1292 | cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) |
1189 | { | 1293 | { |
1294 | unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */ | ||
1295 | struct cfq_group *pos = cfqg; | ||
1296 | struct cfq_group *parent; | ||
1297 | bool propagate; | ||
1298 | |||
1299 | /* add to the service tree */ | ||
1190 | BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); | 1300 | BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); |
1191 | 1301 | ||
1192 | cfq_update_group_weight(cfqg); | 1302 | cfq_update_group_weight(cfqg); |
1193 | __cfq_group_service_tree_add(st, cfqg); | 1303 | __cfq_group_service_tree_add(st, cfqg); |
1194 | st->total_weight += cfqg->weight; | 1304 | |
1305 | /* | ||
1306 | * Activate @cfqg and calculate the portion of vfraction @cfqg is | ||
1307 | * entitled to. vfraction is calculated by walking the tree | ||
1308 | * towards the root calculating the fraction it has at each level. | ||
1309 | * The compounded ratio is how much vfraction @cfqg owns. | ||
1310 | * | ||
1311 | * Start with the proportion tasks in this cfqg has against active | ||
1312 | * children cfqgs - its leaf_weight against children_weight. | ||
1313 | */ | ||
1314 | propagate = !pos->nr_active++; | ||
1315 | pos->children_weight += pos->leaf_weight; | ||
1316 | vfr = vfr * pos->leaf_weight / pos->children_weight; | ||
1317 | |||
1318 | /* | ||
1319 | * Compound ->weight walking up the tree. Both activation and | ||
1320 | * vfraction calculation are done in the same loop. Propagation | ||
1321 | * stops once an already activated node is met. vfraction | ||
1322 | * calculation should always continue to the root. | ||
1323 | */ | ||
1324 | while ((parent = cfqg_parent(pos))) { | ||
1325 | if (propagate) { | ||
1326 | propagate = !parent->nr_active++; | ||
1327 | parent->children_weight += pos->weight; | ||
1328 | } | ||
1329 | vfr = vfr * pos->weight / parent->children_weight; | ||
1330 | pos = parent; | ||
1331 | } | ||
1332 | |||
1333 | cfqg->vfraction = max_t(unsigned, vfr, 1); | ||
1195 | } | 1334 | } |
1196 | 1335 | ||
1197 | static void | 1336 | static void |
@@ -1222,7 +1361,32 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
1222 | static void | 1361 | static void |
1223 | cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg) | 1362 | cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg) |
1224 | { | 1363 | { |
1225 | st->total_weight -= cfqg->weight; | 1364 | struct cfq_group *pos = cfqg; |
1365 | bool propagate; | ||
1366 | |||
1367 | /* | ||
1368 | * Undo activation from cfq_group_service_tree_add(). Deactivate | ||
1369 | * @cfqg and propagate deactivation upwards. | ||
1370 | */ | ||
1371 | propagate = !--pos->nr_active; | ||
1372 | pos->children_weight -= pos->leaf_weight; | ||
1373 | |||
1374 | while (propagate) { | ||
1375 | struct cfq_group *parent = cfqg_parent(pos); | ||
1376 | |||
1377 | /* @pos has 0 nr_active at this point */ | ||
1378 | WARN_ON_ONCE(pos->children_weight); | ||
1379 | pos->vfraction = 0; | ||
1380 | |||
1381 | if (!parent) | ||
1382 | break; | ||
1383 | |||
1384 | propagate = !--parent->nr_active; | ||
1385 | parent->children_weight -= pos->weight; | ||
1386 | pos = parent; | ||
1387 | } | ||
1388 | |||
1389 | /* remove from the service tree */ | ||
1226 | if (!RB_EMPTY_NODE(&cfqg->rb_node)) | 1390 | if (!RB_EMPTY_NODE(&cfqg->rb_node)) |
1227 | cfq_rb_erase(&cfqg->rb_node, st); | 1391 | cfq_rb_erase(&cfqg->rb_node, st); |
1228 | } | 1392 | } |
@@ -1241,7 +1405,7 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
1241 | 1405 | ||
1242 | cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); | 1406 | cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); |
1243 | cfq_group_service_tree_del(st, cfqg); | 1407 | cfq_group_service_tree_del(st, cfqg); |
1244 | cfqg->saved_workload_slice = 0; | 1408 | cfqg->saved_wl_slice = 0; |
1245 | cfqg_stats_update_dequeue(cfqg); | 1409 | cfqg_stats_update_dequeue(cfqg); |
1246 | } | 1410 | } |
1247 | 1411 | ||
@@ -1284,6 +1448,7 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
1284 | unsigned int used_sl, charge, unaccounted_sl = 0; | 1448 | unsigned int used_sl, charge, unaccounted_sl = 0; |
1285 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) | 1449 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) |
1286 | - cfqg->service_tree_idle.count; | 1450 | - cfqg->service_tree_idle.count; |
1451 | unsigned int vfr; | ||
1287 | 1452 | ||
1288 | BUG_ON(nr_sync < 0); | 1453 | BUG_ON(nr_sync < 0); |
1289 | used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); | 1454 | used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); |
@@ -1293,20 +1458,25 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
1293 | else if (!cfq_cfqq_sync(cfqq) && !nr_sync) | 1458 | else if (!cfq_cfqq_sync(cfqq) && !nr_sync) |
1294 | charge = cfqq->allocated_slice; | 1459 | charge = cfqq->allocated_slice; |
1295 | 1460 | ||
1296 | /* Can't update vdisktime while group is on service tree */ | 1461 | /* |
1462 | * Can't update vdisktime while on service tree and cfqg->vfraction | ||
1463 | * is valid only while on it. Cache vfr, leave the service tree, | ||
1464 | * update vdisktime and go back on. The re-addition to the tree | ||
1465 | * will also update the weights as necessary. | ||
1466 | */ | ||
1467 | vfr = cfqg->vfraction; | ||
1297 | cfq_group_service_tree_del(st, cfqg); | 1468 | cfq_group_service_tree_del(st, cfqg); |
1298 | cfqg->vdisktime += cfq_scale_slice(charge, cfqg); | 1469 | cfqg->vdisktime += cfqg_scale_charge(charge, vfr); |
1299 | /* If a new weight was requested, update now, off tree */ | ||
1300 | cfq_group_service_tree_add(st, cfqg); | 1470 | cfq_group_service_tree_add(st, cfqg); |
1301 | 1471 | ||
1302 | /* This group is being expired. Save the context */ | 1472 | /* This group is being expired. Save the context */ |
1303 | if (time_after(cfqd->workload_expires, jiffies)) { | 1473 | if (time_after(cfqd->workload_expires, jiffies)) { |
1304 | cfqg->saved_workload_slice = cfqd->workload_expires | 1474 | cfqg->saved_wl_slice = cfqd->workload_expires |
1305 | - jiffies; | 1475 | - jiffies; |
1306 | cfqg->saved_workload = cfqd->serving_type; | 1476 | cfqg->saved_wl_type = cfqd->serving_wl_type; |
1307 | cfqg->saved_serving_prio = cfqd->serving_prio; | 1477 | cfqg->saved_wl_class = cfqd->serving_wl_class; |
1308 | } else | 1478 | } else |
1309 | cfqg->saved_workload_slice = 0; | 1479 | cfqg->saved_wl_slice = 0; |
1310 | 1480 | ||
1311 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, | 1481 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, |
1312 | st->min_vdisktime); | 1482 | st->min_vdisktime); |
@@ -1344,6 +1514,52 @@ static void cfq_pd_init(struct blkcg_gq *blkg) | |||
1344 | 1514 | ||
1345 | cfq_init_cfqg_base(cfqg); | 1515 | cfq_init_cfqg_base(cfqg); |
1346 | cfqg->weight = blkg->blkcg->cfq_weight; | 1516 | cfqg->weight = blkg->blkcg->cfq_weight; |
1517 | cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight; | ||
1518 | } | ||
1519 | |||
1520 | static void cfq_pd_offline(struct blkcg_gq *blkg) | ||
1521 | { | ||
1522 | /* | ||
1523 | * @blkg is going offline and will be ignored by | ||
1524 | * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so | ||
1525 | * that they don't get lost. If IOs complete after this point, the | ||
1526 | * stats for them will be lost. Oh well... | ||
1527 | */ | ||
1528 | cfqg_stats_xfer_dead(blkg_to_cfqg(blkg)); | ||
1529 | } | ||
1530 | |||
1531 | /* offset delta from cfqg->stats to cfqg->dead_stats */ | ||
1532 | static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) - | ||
1533 | offsetof(struct cfq_group, stats); | ||
1534 | |||
1535 | /* to be used by recursive prfill, sums live and dead stats recursively */ | ||
1536 | static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off) | ||
1537 | { | ||
1538 | u64 sum = 0; | ||
1539 | |||
1540 | sum += blkg_stat_recursive_sum(pd, off); | ||
1541 | sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta); | ||
1542 | return sum; | ||
1543 | } | ||
1544 | |||
1545 | /* to be used by recursive prfill, sums live and dead rwstats recursively */ | ||
1546 | static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, | ||
1547 | int off) | ||
1548 | { | ||
1549 | struct blkg_rwstat a, b; | ||
1550 | |||
1551 | a = blkg_rwstat_recursive_sum(pd, off); | ||
1552 | b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta); | ||
1553 | blkg_rwstat_merge(&a, &b); | ||
1554 | return a; | ||
1555 | } | ||
1556 | |||
1557 | static void cfq_pd_reset_stats(struct blkcg_gq *blkg) | ||
1558 | { | ||
1559 | struct cfq_group *cfqg = blkg_to_cfqg(blkg); | ||
1560 | |||
1561 | cfqg_stats_reset(&cfqg->stats); | ||
1562 | cfqg_stats_reset(&cfqg->dead_stats); | ||
1347 | } | 1563 | } |
1348 | 1564 | ||
1349 | /* | 1565 | /* |
@@ -1400,6 +1616,26 @@ static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft, | |||
1400 | return 0; | 1616 | return 0; |
1401 | } | 1617 | } |
1402 | 1618 | ||
1619 | static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf, | ||
1620 | struct blkg_policy_data *pd, int off) | ||
1621 | { | ||
1622 | struct cfq_group *cfqg = pd_to_cfqg(pd); | ||
1623 | |||
1624 | if (!cfqg->dev_leaf_weight) | ||
1625 | return 0; | ||
1626 | return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight); | ||
1627 | } | ||
1628 | |||
1629 | static int cfqg_print_leaf_weight_device(struct cgroup *cgrp, | ||
1630 | struct cftype *cft, | ||
1631 | struct seq_file *sf) | ||
1632 | { | ||
1633 | blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), | ||
1634 | cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0, | ||
1635 | false); | ||
1636 | return 0; | ||
1637 | } | ||
1638 | |||
1403 | static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft, | 1639 | static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft, |
1404 | struct seq_file *sf) | 1640 | struct seq_file *sf) |
1405 | { | 1641 | { |
@@ -1407,8 +1643,16 @@ static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft, | |||
1407 | return 0; | 1643 | return 0; |
1408 | } | 1644 | } |
1409 | 1645 | ||
1410 | static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, | 1646 | static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft, |
1411 | const char *buf) | 1647 | struct seq_file *sf) |
1648 | { | ||
1649 | seq_printf(sf, "%u\n", | ||
1650 | cgroup_to_blkcg(cgrp)->cfq_leaf_weight); | ||
1651 | return 0; | ||
1652 | } | ||
1653 | |||
1654 | static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, | ||
1655 | const char *buf, bool is_leaf_weight) | ||
1412 | { | 1656 | { |
1413 | struct blkcg *blkcg = cgroup_to_blkcg(cgrp); | 1657 | struct blkcg *blkcg = cgroup_to_blkcg(cgrp); |
1414 | struct blkg_conf_ctx ctx; | 1658 | struct blkg_conf_ctx ctx; |
@@ -1422,8 +1666,13 @@ static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, | |||
1422 | ret = -EINVAL; | 1666 | ret = -EINVAL; |
1423 | cfqg = blkg_to_cfqg(ctx.blkg); | 1667 | cfqg = blkg_to_cfqg(ctx.blkg); |
1424 | if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) { | 1668 | if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) { |
1425 | cfqg->dev_weight = ctx.v; | 1669 | if (!is_leaf_weight) { |
1426 | cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight; | 1670 | cfqg->dev_weight = ctx.v; |
1671 | cfqg->new_weight = ctx.v ?: blkcg->cfq_weight; | ||
1672 | } else { | ||
1673 | cfqg->dev_leaf_weight = ctx.v; | ||
1674 | cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight; | ||
1675 | } | ||
1427 | ret = 0; | 1676 | ret = 0; |
1428 | } | 1677 | } |
1429 | 1678 | ||
@@ -1431,7 +1680,20 @@ static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, | |||
1431 | return ret; | 1680 | return ret; |
1432 | } | 1681 | } |
1433 | 1682 | ||
1434 | static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) | 1683 | static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, |
1684 | const char *buf) | ||
1685 | { | ||
1686 | return __cfqg_set_weight_device(cgrp, cft, buf, false); | ||
1687 | } | ||
1688 | |||
1689 | static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft, | ||
1690 | const char *buf) | ||
1691 | { | ||
1692 | return __cfqg_set_weight_device(cgrp, cft, buf, true); | ||
1693 | } | ||
1694 | |||
1695 | static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val, | ||
1696 | bool is_leaf_weight) | ||
1435 | { | 1697 | { |
1436 | struct blkcg *blkcg = cgroup_to_blkcg(cgrp); | 1698 | struct blkcg *blkcg = cgroup_to_blkcg(cgrp); |
1437 | struct blkcg_gq *blkg; | 1699 | struct blkcg_gq *blkg; |
@@ -1440,19 +1702,41 @@ static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) | |||
1440 | return -EINVAL; | 1702 | return -EINVAL; |
1441 | 1703 | ||
1442 | spin_lock_irq(&blkcg->lock); | 1704 | spin_lock_irq(&blkcg->lock); |
1443 | blkcg->cfq_weight = (unsigned int)val; | 1705 | |
1706 | if (!is_leaf_weight) | ||
1707 | blkcg->cfq_weight = val; | ||
1708 | else | ||
1709 | blkcg->cfq_leaf_weight = val; | ||
1444 | 1710 | ||
1445 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { | 1711 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
1446 | struct cfq_group *cfqg = blkg_to_cfqg(blkg); | 1712 | struct cfq_group *cfqg = blkg_to_cfqg(blkg); |
1447 | 1713 | ||
1448 | if (cfqg && !cfqg->dev_weight) | 1714 | if (!cfqg) |
1449 | cfqg->new_weight = blkcg->cfq_weight; | 1715 | continue; |
1716 | |||
1717 | if (!is_leaf_weight) { | ||
1718 | if (!cfqg->dev_weight) | ||
1719 | cfqg->new_weight = blkcg->cfq_weight; | ||
1720 | } else { | ||
1721 | if (!cfqg->dev_leaf_weight) | ||
1722 | cfqg->new_leaf_weight = blkcg->cfq_leaf_weight; | ||
1723 | } | ||
1450 | } | 1724 | } |
1451 | 1725 | ||
1452 | spin_unlock_irq(&blkcg->lock); | 1726 | spin_unlock_irq(&blkcg->lock); |
1453 | return 0; | 1727 | return 0; |
1454 | } | 1728 | } |
1455 | 1729 | ||
1730 | static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) | ||
1731 | { | ||
1732 | return __cfq_set_weight(cgrp, cft, val, false); | ||
1733 | } | ||
1734 | |||
1735 | static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) | ||
1736 | { | ||
1737 | return __cfq_set_weight(cgrp, cft, val, true); | ||
1738 | } | ||
1739 | |||
1456 | static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft, | 1740 | static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft, |
1457 | struct seq_file *sf) | 1741 | struct seq_file *sf) |
1458 | { | 1742 | { |
@@ -1473,6 +1757,42 @@ static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft, | |||
1473 | return 0; | 1757 | return 0; |
1474 | } | 1758 | } |
1475 | 1759 | ||
1760 | static u64 cfqg_prfill_stat_recursive(struct seq_file *sf, | ||
1761 | struct blkg_policy_data *pd, int off) | ||
1762 | { | ||
1763 | u64 sum = cfqg_stat_pd_recursive_sum(pd, off); | ||
1764 | |||
1765 | return __blkg_prfill_u64(sf, pd, sum); | ||
1766 | } | ||
1767 | |||
1768 | static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf, | ||
1769 | struct blkg_policy_data *pd, int off) | ||
1770 | { | ||
1771 | struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off); | ||
1772 | |||
1773 | return __blkg_prfill_rwstat(sf, pd, &sum); | ||
1774 | } | ||
1775 | |||
1776 | static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft, | ||
1777 | struct seq_file *sf) | ||
1778 | { | ||
1779 | struct blkcg *blkcg = cgroup_to_blkcg(cgrp); | ||
1780 | |||
1781 | blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive, | ||
1782 | &blkcg_policy_cfq, cft->private, false); | ||
1783 | return 0; | ||
1784 | } | ||
1785 | |||
1786 | static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft, | ||
1787 | struct seq_file *sf) | ||
1788 | { | ||
1789 | struct blkcg *blkcg = cgroup_to_blkcg(cgrp); | ||
1790 | |||
1791 | blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive, | ||
1792 | &blkcg_policy_cfq, cft->private, true); | ||
1793 | return 0; | ||
1794 | } | ||
1795 | |||
1476 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 1796 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1477 | static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, | 1797 | static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, |
1478 | struct blkg_policy_data *pd, int off) | 1798 | struct blkg_policy_data *pd, int off) |
@@ -1502,17 +1822,49 @@ static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft, | |||
1502 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ | 1822 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ |
1503 | 1823 | ||
1504 | static struct cftype cfq_blkcg_files[] = { | 1824 | static struct cftype cfq_blkcg_files[] = { |
1825 | /* on root, weight is mapped to leaf_weight */ | ||
1826 | { | ||
1827 | .name = "weight_device", | ||
1828 | .flags = CFTYPE_ONLY_ON_ROOT, | ||
1829 | .read_seq_string = cfqg_print_leaf_weight_device, | ||
1830 | .write_string = cfqg_set_leaf_weight_device, | ||
1831 | .max_write_len = 256, | ||
1832 | }, | ||
1833 | { | ||
1834 | .name = "weight", | ||
1835 | .flags = CFTYPE_ONLY_ON_ROOT, | ||
1836 | .read_seq_string = cfq_print_leaf_weight, | ||
1837 | .write_u64 = cfq_set_leaf_weight, | ||
1838 | }, | ||
1839 | |||
1840 | /* no such mapping necessary for !roots */ | ||
1505 | { | 1841 | { |
1506 | .name = "weight_device", | 1842 | .name = "weight_device", |
1843 | .flags = CFTYPE_NOT_ON_ROOT, | ||
1507 | .read_seq_string = cfqg_print_weight_device, | 1844 | .read_seq_string = cfqg_print_weight_device, |
1508 | .write_string = cfqg_set_weight_device, | 1845 | .write_string = cfqg_set_weight_device, |
1509 | .max_write_len = 256, | 1846 | .max_write_len = 256, |
1510 | }, | 1847 | }, |
1511 | { | 1848 | { |
1512 | .name = "weight", | 1849 | .name = "weight", |
1850 | .flags = CFTYPE_NOT_ON_ROOT, | ||
1513 | .read_seq_string = cfq_print_weight, | 1851 | .read_seq_string = cfq_print_weight, |
1514 | .write_u64 = cfq_set_weight, | 1852 | .write_u64 = cfq_set_weight, |
1515 | }, | 1853 | }, |
1854 | |||
1855 | { | ||
1856 | .name = "leaf_weight_device", | ||
1857 | .read_seq_string = cfqg_print_leaf_weight_device, | ||
1858 | .write_string = cfqg_set_leaf_weight_device, | ||
1859 | .max_write_len = 256, | ||
1860 | }, | ||
1861 | { | ||
1862 | .name = "leaf_weight", | ||
1863 | .read_seq_string = cfq_print_leaf_weight, | ||
1864 | .write_u64 = cfq_set_leaf_weight, | ||
1865 | }, | ||
1866 | |||
1867 | /* statistics, covers only the tasks in the cfqg */ | ||
1516 | { | 1868 | { |
1517 | .name = "time", | 1869 | .name = "time", |
1518 | .private = offsetof(struct cfq_group, stats.time), | 1870 | .private = offsetof(struct cfq_group, stats.time), |
@@ -1553,6 +1905,48 @@ static struct cftype cfq_blkcg_files[] = { | |||
1553 | .private = offsetof(struct cfq_group, stats.queued), | 1905 | .private = offsetof(struct cfq_group, stats.queued), |
1554 | .read_seq_string = cfqg_print_rwstat, | 1906 | .read_seq_string = cfqg_print_rwstat, |
1555 | }, | 1907 | }, |
1908 | |||
1909 | /* the same statictics which cover the cfqg and its descendants */ | ||
1910 | { | ||
1911 | .name = "time_recursive", | ||
1912 | .private = offsetof(struct cfq_group, stats.time), | ||
1913 | .read_seq_string = cfqg_print_stat_recursive, | ||
1914 | }, | ||
1915 | { | ||
1916 | .name = "sectors_recursive", | ||
1917 | .private = offsetof(struct cfq_group, stats.sectors), | ||
1918 | .read_seq_string = cfqg_print_stat_recursive, | ||
1919 | }, | ||
1920 | { | ||
1921 | .name = "io_service_bytes_recursive", | ||
1922 | .private = offsetof(struct cfq_group, stats.service_bytes), | ||
1923 | .read_seq_string = cfqg_print_rwstat_recursive, | ||
1924 | }, | ||
1925 | { | ||
1926 | .name = "io_serviced_recursive", | ||
1927 | .private = offsetof(struct cfq_group, stats.serviced), | ||
1928 | .read_seq_string = cfqg_print_rwstat_recursive, | ||
1929 | }, | ||
1930 | { | ||
1931 | .name = "io_service_time_recursive", | ||
1932 | .private = offsetof(struct cfq_group, stats.service_time), | ||
1933 | .read_seq_string = cfqg_print_rwstat_recursive, | ||
1934 | }, | ||
1935 | { | ||
1936 | .name = "io_wait_time_recursive", | ||
1937 | .private = offsetof(struct cfq_group, stats.wait_time), | ||
1938 | .read_seq_string = cfqg_print_rwstat_recursive, | ||
1939 | }, | ||
1940 | { | ||
1941 | .name = "io_merged_recursive", | ||
1942 | .private = offsetof(struct cfq_group, stats.merged), | ||
1943 | .read_seq_string = cfqg_print_rwstat_recursive, | ||
1944 | }, | ||
1945 | { | ||
1946 | .name = "io_queued_recursive", | ||
1947 | .private = offsetof(struct cfq_group, stats.queued), | ||
1948 | .read_seq_string = cfqg_print_rwstat_recursive, | ||
1949 | }, | ||
1556 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 1950 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1557 | { | 1951 | { |
1558 | .name = "avg_queue_size", | 1952 | .name = "avg_queue_size", |
@@ -1611,15 +2005,14 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1611 | struct rb_node **p, *parent; | 2005 | struct rb_node **p, *parent; |
1612 | struct cfq_queue *__cfqq; | 2006 | struct cfq_queue *__cfqq; |
1613 | unsigned long rb_key; | 2007 | unsigned long rb_key; |
1614 | struct cfq_rb_root *service_tree; | 2008 | struct cfq_rb_root *st; |
1615 | int left; | 2009 | int left; |
1616 | int new_cfqq = 1; | 2010 | int new_cfqq = 1; |
1617 | 2011 | ||
1618 | service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), | 2012 | st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); |
1619 | cfqq_type(cfqq)); | ||
1620 | if (cfq_class_idle(cfqq)) { | 2013 | if (cfq_class_idle(cfqq)) { |
1621 | rb_key = CFQ_IDLE_DELAY; | 2014 | rb_key = CFQ_IDLE_DELAY; |
1622 | parent = rb_last(&service_tree->rb); | 2015 | parent = rb_last(&st->rb); |
1623 | if (parent && parent != &cfqq->rb_node) { | 2016 | if (parent && parent != &cfqq->rb_node) { |
1624 | __cfqq = rb_entry(parent, struct cfq_queue, rb_node); | 2017 | __cfqq = rb_entry(parent, struct cfq_queue, rb_node); |
1625 | rb_key += __cfqq->rb_key; | 2018 | rb_key += __cfqq->rb_key; |
@@ -1637,7 +2030,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1637 | cfqq->slice_resid = 0; | 2030 | cfqq->slice_resid = 0; |
1638 | } else { | 2031 | } else { |
1639 | rb_key = -HZ; | 2032 | rb_key = -HZ; |
1640 | __cfqq = cfq_rb_first(service_tree); | 2033 | __cfqq = cfq_rb_first(st); |
1641 | rb_key += __cfqq ? __cfqq->rb_key : jiffies; | 2034 | rb_key += __cfqq ? __cfqq->rb_key : jiffies; |
1642 | } | 2035 | } |
1643 | 2036 | ||
@@ -1646,8 +2039,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1646 | /* | 2039 | /* |
1647 | * same position, nothing more to do | 2040 | * same position, nothing more to do |
1648 | */ | 2041 | */ |
1649 | if (rb_key == cfqq->rb_key && | 2042 | if (rb_key == cfqq->rb_key && cfqq->service_tree == st) |
1650 | cfqq->service_tree == service_tree) | ||
1651 | return; | 2043 | return; |
1652 | 2044 | ||
1653 | cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); | 2045 | cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); |
@@ -1656,11 +2048,9 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1656 | 2048 | ||
1657 | left = 1; | 2049 | left = 1; |
1658 | parent = NULL; | 2050 | parent = NULL; |
1659 | cfqq->service_tree = service_tree; | 2051 | cfqq->service_tree = st; |
1660 | p = &service_tree->rb.rb_node; | 2052 | p = &st->rb.rb_node; |
1661 | while (*p) { | 2053 | while (*p) { |
1662 | struct rb_node **n; | ||
1663 | |||
1664 | parent = *p; | 2054 | parent = *p; |
1665 | __cfqq = rb_entry(parent, struct cfq_queue, rb_node); | 2055 | __cfqq = rb_entry(parent, struct cfq_queue, rb_node); |
1666 | 2056 | ||
@@ -1668,22 +2058,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1668 | * sort by key, that represents service time. | 2058 | * sort by key, that represents service time. |
1669 | */ | 2059 | */ |
1670 | if (time_before(rb_key, __cfqq->rb_key)) | 2060 | if (time_before(rb_key, __cfqq->rb_key)) |
1671 | n = &(*p)->rb_left; | 2061 | p = &parent->rb_left; |
1672 | else { | 2062 | else { |
1673 | n = &(*p)->rb_right; | 2063 | p = &parent->rb_right; |
1674 | left = 0; | 2064 | left = 0; |
1675 | } | 2065 | } |
1676 | |||
1677 | p = n; | ||
1678 | } | 2066 | } |
1679 | 2067 | ||
1680 | if (left) | 2068 | if (left) |
1681 | service_tree->left = &cfqq->rb_node; | 2069 | st->left = &cfqq->rb_node; |
1682 | 2070 | ||
1683 | cfqq->rb_key = rb_key; | 2071 | cfqq->rb_key = rb_key; |
1684 | rb_link_node(&cfqq->rb_node, parent, p); | 2072 | rb_link_node(&cfqq->rb_node, parent, p); |
1685 | rb_insert_color(&cfqq->rb_node, &service_tree->rb); | 2073 | rb_insert_color(&cfqq->rb_node, &st->rb); |
1686 | service_tree->count++; | 2074 | st->count++; |
1687 | if (add_front || !new_cfqq) | 2075 | if (add_front || !new_cfqq) |
1688 | return; | 2076 | return; |
1689 | cfq_group_notify_queue_add(cfqd, cfqq->cfqg); | 2077 | cfq_group_notify_queue_add(cfqd, cfqq->cfqg); |
@@ -2029,8 +2417,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
2029 | struct cfq_queue *cfqq) | 2417 | struct cfq_queue *cfqq) |
2030 | { | 2418 | { |
2031 | if (cfqq) { | 2419 | if (cfqq) { |
2032 | cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", | 2420 | cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d", |
2033 | cfqd->serving_prio, cfqd->serving_type); | 2421 | cfqd->serving_wl_class, cfqd->serving_wl_type); |
2034 | cfqg_stats_update_avg_queue_size(cfqq->cfqg); | 2422 | cfqg_stats_update_avg_queue_size(cfqq->cfqg); |
2035 | cfqq->slice_start = 0; | 2423 | cfqq->slice_start = 0; |
2036 | cfqq->dispatch_start = jiffies; | 2424 | cfqq->dispatch_start = jiffies; |
@@ -2116,19 +2504,18 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) | |||
2116 | */ | 2504 | */ |
2117 | static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | 2505 | static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) |
2118 | { | 2506 | { |
2119 | struct cfq_rb_root *service_tree = | 2507 | struct cfq_rb_root *st = st_for(cfqd->serving_group, |
2120 | service_tree_for(cfqd->serving_group, cfqd->serving_prio, | 2508 | cfqd->serving_wl_class, cfqd->serving_wl_type); |
2121 | cfqd->serving_type); | ||
2122 | 2509 | ||
2123 | if (!cfqd->rq_queued) | 2510 | if (!cfqd->rq_queued) |
2124 | return NULL; | 2511 | return NULL; |
2125 | 2512 | ||
2126 | /* There is nothing to dispatch */ | 2513 | /* There is nothing to dispatch */ |
2127 | if (!service_tree) | 2514 | if (!st) |
2128 | return NULL; | 2515 | return NULL; |
2129 | if (RB_EMPTY_ROOT(&service_tree->rb)) | 2516 | if (RB_EMPTY_ROOT(&st->rb)) |
2130 | return NULL; | 2517 | return NULL; |
2131 | return cfq_rb_first(service_tree); | 2518 | return cfq_rb_first(st); |
2132 | } | 2519 | } |
2133 | 2520 | ||
2134 | static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) | 2521 | static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) |
@@ -2284,17 +2671,17 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, | |||
2284 | 2671 | ||
2285 | static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 2672 | static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
2286 | { | 2673 | { |
2287 | enum wl_prio_t prio = cfqq_prio(cfqq); | 2674 | enum wl_class_t wl_class = cfqq_class(cfqq); |
2288 | struct cfq_rb_root *service_tree = cfqq->service_tree; | 2675 | struct cfq_rb_root *st = cfqq->service_tree; |
2289 | 2676 | ||
2290 | BUG_ON(!service_tree); | 2677 | BUG_ON(!st); |
2291 | BUG_ON(!service_tree->count); | 2678 | BUG_ON(!st->count); |
2292 | 2679 | ||
2293 | if (!cfqd->cfq_slice_idle) | 2680 | if (!cfqd->cfq_slice_idle) |
2294 | return false; | 2681 | return false; |
2295 | 2682 | ||
2296 | /* We never do for idle class queues. */ | 2683 | /* We never do for idle class queues. */ |
2297 | if (prio == IDLE_WORKLOAD) | 2684 | if (wl_class == IDLE_WORKLOAD) |
2298 | return false; | 2685 | return false; |
2299 | 2686 | ||
2300 | /* We do for queues that were marked with idle window flag. */ | 2687 | /* We do for queues that were marked with idle window flag. */ |
@@ -2306,11 +2693,10 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2306 | * Otherwise, we do only if they are the last ones | 2693 | * Otherwise, we do only if they are the last ones |
2307 | * in their service tree. | 2694 | * in their service tree. |
2308 | */ | 2695 | */ |
2309 | if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) && | 2696 | if (st->count == 1 && cfq_cfqq_sync(cfqq) && |
2310 | !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false)) | 2697 | !cfq_io_thinktime_big(cfqd, &st->ttime, false)) |
2311 | return true; | 2698 | return true; |
2312 | cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", | 2699 | cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count); |
2313 | service_tree->count); | ||
2314 | return false; | 2700 | return false; |
2315 | } | 2701 | } |
2316 | 2702 | ||
@@ -2493,8 +2879,8 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) | |||
2493 | } | 2879 | } |
2494 | } | 2880 | } |
2495 | 2881 | ||
2496 | static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, | 2882 | static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd, |
2497 | struct cfq_group *cfqg, enum wl_prio_t prio) | 2883 | struct cfq_group *cfqg, enum wl_class_t wl_class) |
2498 | { | 2884 | { |
2499 | struct cfq_queue *queue; | 2885 | struct cfq_queue *queue; |
2500 | int i; | 2886 | int i; |
@@ -2504,7 +2890,7 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, | |||
2504 | 2890 | ||
2505 | for (i = 0; i <= SYNC_WORKLOAD; ++i) { | 2891 | for (i = 0; i <= SYNC_WORKLOAD; ++i) { |
2506 | /* select the one with lowest rb_key */ | 2892 | /* select the one with lowest rb_key */ |
2507 | queue = cfq_rb_first(service_tree_for(cfqg, prio, i)); | 2893 | queue = cfq_rb_first(st_for(cfqg, wl_class, i)); |
2508 | if (queue && | 2894 | if (queue && |
2509 | (!key_valid || time_before(queue->rb_key, lowest_key))) { | 2895 | (!key_valid || time_before(queue->rb_key, lowest_key))) { |
2510 | lowest_key = queue->rb_key; | 2896 | lowest_key = queue->rb_key; |
@@ -2516,26 +2902,27 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, | |||
2516 | return cur_best; | 2902 | return cur_best; |
2517 | } | 2903 | } |
2518 | 2904 | ||
2519 | static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) | 2905 | static void |
2906 | choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) | ||
2520 | { | 2907 | { |
2521 | unsigned slice; | 2908 | unsigned slice; |
2522 | unsigned count; | 2909 | unsigned count; |
2523 | struct cfq_rb_root *st; | 2910 | struct cfq_rb_root *st; |
2524 | unsigned group_slice; | 2911 | unsigned group_slice; |
2525 | enum wl_prio_t original_prio = cfqd->serving_prio; | 2912 | enum wl_class_t original_class = cfqd->serving_wl_class; |
2526 | 2913 | ||
2527 | /* Choose next priority. RT > BE > IDLE */ | 2914 | /* Choose next priority. RT > BE > IDLE */ |
2528 | if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) | 2915 | if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) |
2529 | cfqd->serving_prio = RT_WORKLOAD; | 2916 | cfqd->serving_wl_class = RT_WORKLOAD; |
2530 | else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) | 2917 | else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) |
2531 | cfqd->serving_prio = BE_WORKLOAD; | 2918 | cfqd->serving_wl_class = BE_WORKLOAD; |
2532 | else { | 2919 | else { |
2533 | cfqd->serving_prio = IDLE_WORKLOAD; | 2920 | cfqd->serving_wl_class = IDLE_WORKLOAD; |
2534 | cfqd->workload_expires = jiffies + 1; | 2921 | cfqd->workload_expires = jiffies + 1; |
2535 | return; | 2922 | return; |
2536 | } | 2923 | } |
2537 | 2924 | ||
2538 | if (original_prio != cfqd->serving_prio) | 2925 | if (original_class != cfqd->serving_wl_class) |
2539 | goto new_workload; | 2926 | goto new_workload; |
2540 | 2927 | ||
2541 | /* | 2928 | /* |
@@ -2543,7 +2930,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
2543 | * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload | 2930 | * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload |
2544 | * expiration time | 2931 | * expiration time |
2545 | */ | 2932 | */ |
2546 | st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); | 2933 | st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); |
2547 | count = st->count; | 2934 | count = st->count; |
2548 | 2935 | ||
2549 | /* | 2936 | /* |
@@ -2554,9 +2941,9 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
2554 | 2941 | ||
2555 | new_workload: | 2942 | new_workload: |
2556 | /* otherwise select new workload type */ | 2943 | /* otherwise select new workload type */ |
2557 | cfqd->serving_type = | 2944 | cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg, |
2558 | cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio); | 2945 | cfqd->serving_wl_class); |
2559 | st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); | 2946 | st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); |
2560 | count = st->count; | 2947 | count = st->count; |
2561 | 2948 | ||
2562 | /* | 2949 | /* |
@@ -2567,10 +2954,11 @@ new_workload: | |||
2567 | group_slice = cfq_group_slice(cfqd, cfqg); | 2954 | group_slice = cfq_group_slice(cfqd, cfqg); |
2568 | 2955 | ||
2569 | slice = group_slice * count / | 2956 | slice = group_slice * count / |
2570 | max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio], | 2957 | max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], |
2571 | cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg)); | 2958 | cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd, |
2959 | cfqg)); | ||
2572 | 2960 | ||
2573 | if (cfqd->serving_type == ASYNC_WORKLOAD) { | 2961 | if (cfqd->serving_wl_type == ASYNC_WORKLOAD) { |
2574 | unsigned int tmp; | 2962 | unsigned int tmp; |
2575 | 2963 | ||
2576 | /* | 2964 | /* |
@@ -2616,14 +3004,14 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd) | |||
2616 | cfqd->serving_group = cfqg; | 3004 | cfqd->serving_group = cfqg; |
2617 | 3005 | ||
2618 | /* Restore the workload type data */ | 3006 | /* Restore the workload type data */ |
2619 | if (cfqg->saved_workload_slice) { | 3007 | if (cfqg->saved_wl_slice) { |
2620 | cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; | 3008 | cfqd->workload_expires = jiffies + cfqg->saved_wl_slice; |
2621 | cfqd->serving_type = cfqg->saved_workload; | 3009 | cfqd->serving_wl_type = cfqg->saved_wl_type; |
2622 | cfqd->serving_prio = cfqg->saved_serving_prio; | 3010 | cfqd->serving_wl_class = cfqg->saved_wl_class; |
2623 | } else | 3011 | } else |
2624 | cfqd->workload_expires = jiffies - 1; | 3012 | cfqd->workload_expires = jiffies - 1; |
2625 | 3013 | ||
2626 | choose_service_tree(cfqd, cfqg); | 3014 | choose_wl_class_and_type(cfqd, cfqg); |
2627 | } | 3015 | } |
2628 | 3016 | ||
2629 | /* | 3017 | /* |
@@ -3205,6 +3593,8 @@ retry: | |||
3205 | spin_lock_irq(cfqd->queue->queue_lock); | 3593 | spin_lock_irq(cfqd->queue->queue_lock); |
3206 | if (new_cfqq) | 3594 | if (new_cfqq) |
3207 | goto retry; | 3595 | goto retry; |
3596 | else | ||
3597 | return &cfqd->oom_cfqq; | ||
3208 | } else { | 3598 | } else { |
3209 | cfqq = kmem_cache_alloc_node(cfq_pool, | 3599 | cfqq = kmem_cache_alloc_node(cfq_pool, |
3210 | gfp_mask | __GFP_ZERO, | 3600 | gfp_mask | __GFP_ZERO, |
@@ -3402,7 +3792,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
3402 | return true; | 3792 | return true; |
3403 | 3793 | ||
3404 | /* Allow preemption only if we are idling on sync-noidle tree */ | 3794 | /* Allow preemption only if we are idling on sync-noidle tree */ |
3405 | if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD && | 3795 | if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD && |
3406 | cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && | 3796 | cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && |
3407 | new_cfqq->service_tree->count == 2 && | 3797 | new_cfqq->service_tree->count == 2 && |
3408 | RB_EMPTY_ROOT(&cfqq->sort_list)) | 3798 | RB_EMPTY_ROOT(&cfqq->sort_list)) |
@@ -3454,7 +3844,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
3454 | * doesn't happen | 3844 | * doesn't happen |
3455 | */ | 3845 | */ |
3456 | if (old_type != cfqq_type(cfqq)) | 3846 | if (old_type != cfqq_type(cfqq)) |
3457 | cfqq->cfqg->saved_workload_slice = 0; | 3847 | cfqq->cfqg->saved_wl_slice = 0; |
3458 | 3848 | ||
3459 | /* | 3849 | /* |
3460 | * Put the new queue at the front of the of the current list, | 3850 | * Put the new queue at the front of the of the current list, |
@@ -3636,16 +4026,17 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3636 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; | 4026 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; |
3637 | 4027 | ||
3638 | if (sync) { | 4028 | if (sync) { |
3639 | struct cfq_rb_root *service_tree; | 4029 | struct cfq_rb_root *st; |
3640 | 4030 | ||
3641 | RQ_CIC(rq)->ttime.last_end_request = now; | 4031 | RQ_CIC(rq)->ttime.last_end_request = now; |
3642 | 4032 | ||
3643 | if (cfq_cfqq_on_rr(cfqq)) | 4033 | if (cfq_cfqq_on_rr(cfqq)) |
3644 | service_tree = cfqq->service_tree; | 4034 | st = cfqq->service_tree; |
3645 | else | 4035 | else |
3646 | service_tree = service_tree_for(cfqq->cfqg, | 4036 | st = st_for(cfqq->cfqg, cfqq_class(cfqq), |
3647 | cfqq_prio(cfqq), cfqq_type(cfqq)); | 4037 | cfqq_type(cfqq)); |
3648 | service_tree->ttime.last_end_request = now; | 4038 | |
4039 | st->ttime.last_end_request = now; | ||
3649 | if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) | 4040 | if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) |
3650 | cfqd->last_delayed_sync = now; | 4041 | cfqd->last_delayed_sync = now; |
3651 | } | 4042 | } |
@@ -3992,6 +4383,7 @@ static int cfq_init_queue(struct request_queue *q) | |||
3992 | cfq_init_cfqg_base(cfqd->root_group); | 4383 | cfq_init_cfqg_base(cfqd->root_group); |
3993 | #endif | 4384 | #endif |
3994 | cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT; | 4385 | cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT; |
4386 | cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT; | ||
3995 | 4387 | ||
3996 | /* | 4388 | /* |
3997 | * Not strictly needed (since RB_ROOT just clears the node and we | 4389 | * Not strictly needed (since RB_ROOT just clears the node and we |
@@ -4176,6 +4568,7 @@ static struct blkcg_policy blkcg_policy_cfq = { | |||
4176 | .cftypes = cfq_blkcg_files, | 4568 | .cftypes = cfq_blkcg_files, |
4177 | 4569 | ||
4178 | .pd_init_fn = cfq_pd_init, | 4570 | .pd_init_fn = cfq_pd_init, |
4571 | .pd_offline_fn = cfq_pd_offline, | ||
4179 | .pd_reset_stats_fn = cfq_pd_reset_stats, | 4572 | .pd_reset_stats_fn = cfq_pd_reset_stats, |
4180 | }; | 4573 | }; |
4181 | #endif | 4574 | #endif |