aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c102
1 files changed, 52 insertions, 50 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9ef86fbfc9ae..c7449db52a86 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -17,6 +17,8 @@
17#include "blk.h" 17#include "blk.h"
18#include "cfq.h" 18#include "cfq.h"
19 19
20static struct blkio_policy_type blkio_policy_cfq;
21
20/* 22/*
21 * tunables 23 * tunables
22 */ 24 */
@@ -206,7 +208,6 @@ struct cfq_group {
206 unsigned long saved_workload_slice; 208 unsigned long saved_workload_slice;
207 enum wl_type_t saved_workload; 209 enum wl_type_t saved_workload;
208 enum wl_prio_t saved_serving_prio; 210 enum wl_prio_t saved_serving_prio;
209 struct blkio_group blkg;
210#ifdef CONFIG_CFQ_GROUP_IOSCHED 211#ifdef CONFIG_CFQ_GROUP_IOSCHED
211 struct hlist_node cfqd_node; 212 struct hlist_node cfqd_node;
212 int ref; 213 int ref;
@@ -310,6 +311,16 @@ struct cfq_data {
310 unsigned int nr_blkcg_linked_grps; 311 unsigned int nr_blkcg_linked_grps;
311}; 312};
312 313
314static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
315{
316 return blkg_to_pdata(blkg, &blkio_policy_cfq);
317}
318
319static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg)
320{
321 return pdata_to_blkg(cfqg, &blkio_policy_cfq);
322}
323
313static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); 324static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
314 325
315static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, 326static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
@@ -374,11 +385,11 @@ CFQ_CFQQ_FNS(wait_busy);
374#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 385#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
375 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ 386 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
376 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 387 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
377 blkg_path(&(cfqq)->cfqg->blkg), ##args) 388 blkg_path(cfqg_to_blkg((cfqq)->cfqg)), ##args)
378 389
379#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \ 390#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
380 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ 391 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
381 blkg_path(&(cfqg)->blkg), ##args) \ 392 blkg_path(cfqg_to_blkg((cfqg))), ##args) \
382 393
383#else 394#else
384#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 395#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -935,7 +946,7 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
935 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); 946 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
936 cfq_group_service_tree_del(st, cfqg); 947 cfq_group_service_tree_del(st, cfqg);
937 cfqg->saved_workload_slice = 0; 948 cfqg->saved_workload_slice = 0;
938 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); 949 cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg), 1);
939} 950}
940 951
941static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, 952static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
@@ -1007,9 +1018,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1007 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", 1018 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1008 used_sl, cfqq->slice_dispatch, charge, 1019 used_sl, cfqq->slice_dispatch, charge,
1009 iops_mode(cfqd), cfqq->nr_sectors); 1020 iops_mode(cfqd), cfqq->nr_sectors);
1010 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, 1021 cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), used_sl,
1011 unaccounted_sl); 1022 unaccounted_sl);
1012 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 1023 cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg));
1013} 1024}
1014 1025
1015/** 1026/**
@@ -1032,18 +1043,12 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1032} 1043}
1033 1044
1034#ifdef CONFIG_CFQ_GROUP_IOSCHED 1045#ifdef CONFIG_CFQ_GROUP_IOSCHED
1035static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
1036{
1037 if (blkg)
1038 return container_of(blkg, struct cfq_group, blkg);
1039 return NULL;
1040}
1041
1042static void cfq_update_blkio_group_weight(struct request_queue *q, 1046static void cfq_update_blkio_group_weight(struct request_queue *q,
1043 struct blkio_group *blkg, 1047 struct blkio_group *blkg,
1044 unsigned int weight) 1048 unsigned int weight)
1045{ 1049{
1046 struct cfq_group *cfqg = cfqg_of_blkg(blkg); 1050 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1051
1047 cfqg->new_weight = weight; 1052 cfqg->new_weight = weight;
1048 cfqg->needs_update = true; 1053 cfqg->needs_update = true;
1049} 1054}
@@ -1052,7 +1057,7 @@ static void cfq_link_blkio_group(struct request_queue *q,
1052 struct blkio_group *blkg) 1057 struct blkio_group *blkg)
1053{ 1058{
1054 struct cfq_data *cfqd = q->elevator->elevator_data; 1059 struct cfq_data *cfqd = q->elevator->elevator_data;
1055 struct cfq_group *cfqg = cfqg_of_blkg(blkg); 1060 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1056 1061
1057 cfqd->nr_blkcg_linked_grps++; 1062 cfqd->nr_blkcg_linked_grps++;
1058 1063
@@ -1060,17 +1065,12 @@ static void cfq_link_blkio_group(struct request_queue *q,
1060 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); 1065 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1061} 1066}
1062 1067
1063static struct blkio_group *cfq_alloc_blkio_group(struct request_queue *q, 1068static void cfq_init_blkio_group(struct blkio_group *blkg)
1064 struct blkio_cgroup *blkcg)
1065{ 1069{
1066 struct cfq_group *cfqg; 1070 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1067
1068 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, q->node);
1069 if (!cfqg)
1070 return NULL;
1071 1071
1072 cfq_init_cfqg_base(cfqg); 1072 cfq_init_cfqg_base(cfqg);
1073 cfqg->weight = blkcg->weight; 1073 cfqg->weight = blkg->blkcg->weight;
1074 1074
1075 /* 1075 /*
1076 * Take the initial reference that will be released on destroy 1076 * Take the initial reference that will be released on destroy
@@ -1079,8 +1079,6 @@ static struct blkio_group *cfq_alloc_blkio_group(struct request_queue *q,
1079 * or cgroup deletion path depending on who is exiting first. 1079 * or cgroup deletion path depending on who is exiting first.
1080 */ 1080 */
1081 cfqg->ref = 1; 1081 cfqg->ref = 1;
1082
1083 return &cfqg->blkg;
1084} 1082}
1085 1083
1086/* 1084/*
@@ -1101,7 +1099,7 @@ static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1101 1099
1102 blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_PROP, false); 1100 blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_PROP, false);
1103 if (!IS_ERR(blkg)) 1101 if (!IS_ERR(blkg))
1104 cfqg = cfqg_of_blkg(blkg); 1102 cfqg = blkg_to_cfqg(blkg);
1105 } 1103 }
1106 1104
1107 return cfqg; 1105 return cfqg;
@@ -1126,6 +1124,7 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1126 1124
1127static void cfq_put_cfqg(struct cfq_group *cfqg) 1125static void cfq_put_cfqg(struct cfq_group *cfqg)
1128{ 1126{
1127 struct blkio_group *blkg = cfqg_to_blkg(cfqg);
1129 struct cfq_rb_root *st; 1128 struct cfq_rb_root *st;
1130 int i, j; 1129 int i, j;
1131 1130
@@ -1135,12 +1134,13 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
1135 return; 1134 return;
1136 1135
1137 /* release the extra blkcg reference this blkg has been holding */ 1136 /* release the extra blkcg reference this blkg has been holding */
1138 css_put(&cfqg->blkg.blkcg->css); 1137 css_put(&blkg->blkcg->css);
1139 1138
1140 for_each_cfqg_st(cfqg, i, j, st) 1139 for_each_cfqg_st(cfqg, i, j, st)
1141 BUG_ON(!RB_EMPTY_ROOT(&st->rb)); 1140 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1142 free_percpu(cfqg->blkg.stats_cpu); 1141 free_percpu(blkg->stats_cpu);
1143 kfree(cfqg); 1142 kfree(blkg->pd);
1143 kfree(blkg);
1144} 1144}
1145 1145
1146static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg) 1146static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
@@ -1172,7 +1172,7 @@ static bool cfq_release_cfq_groups(struct cfq_data *cfqd)
1172 * it from cgroup list, then it will take care of destroying 1172 * it from cgroup list, then it will take care of destroying
1173 * cfqg also. 1173 * cfqg also.
1174 */ 1174 */
1175 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg)) 1175 if (!cfq_blkiocg_del_blkio_group(cfqg_to_blkg(cfqg)))
1176 cfq_destroy_cfqg(cfqd, cfqg); 1176 cfq_destroy_cfqg(cfqd, cfqg);
1177 else 1177 else
1178 empty = false; 1178 empty = false;
@@ -1201,7 +1201,7 @@ static void cfq_unlink_blkio_group(struct request_queue *q,
1201 unsigned long flags; 1201 unsigned long flags;
1202 1202
1203 spin_lock_irqsave(q->queue_lock, flags); 1203 spin_lock_irqsave(q->queue_lock, flags);
1204 cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg)); 1204 cfq_destroy_cfqg(cfqd, blkg_to_cfqg(blkg));
1205 spin_unlock_irqrestore(q->queue_lock, flags); 1205 spin_unlock_irqrestore(q->queue_lock, flags);
1206} 1206}
1207 1207
@@ -1504,12 +1504,12 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1504{ 1504{
1505 elv_rb_del(&cfqq->sort_list, rq); 1505 elv_rb_del(&cfqq->sort_list, rq);
1506 cfqq->queued[rq_is_sync(rq)]--; 1506 cfqq->queued[rq_is_sync(rq)]--;
1507 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1507 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1508 rq_data_dir(rq), rq_is_sync(rq)); 1508 rq_data_dir(rq), rq_is_sync(rq));
1509 cfq_add_rq_rb(rq); 1509 cfq_add_rq_rb(rq);
1510 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 1510 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1511 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq), 1511 cfqg_to_blkg(cfqq->cfqd->serving_group),
1512 rq_is_sync(rq)); 1512 rq_data_dir(rq), rq_is_sync(rq));
1513} 1513}
1514 1514
1515static struct request * 1515static struct request *
@@ -1565,7 +1565,7 @@ static void cfq_remove_request(struct request *rq)
1565 cfq_del_rq_rb(rq); 1565 cfq_del_rq_rb(rq);
1566 1566
1567 cfqq->cfqd->rq_queued--; 1567 cfqq->cfqd->rq_queued--;
1568 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1568 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1569 rq_data_dir(rq), rq_is_sync(rq)); 1569 rq_data_dir(rq), rq_is_sync(rq));
1570 if (rq->cmd_flags & REQ_PRIO) { 1570 if (rq->cmd_flags & REQ_PRIO) {
1571 WARN_ON(!cfqq->prio_pending); 1571 WARN_ON(!cfqq->prio_pending);
@@ -1601,7 +1601,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
1601static void cfq_bio_merged(struct request_queue *q, struct request *req, 1601static void cfq_bio_merged(struct request_queue *q, struct request *req,
1602 struct bio *bio) 1602 struct bio *bio)
1603{ 1603{
1604 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, 1604 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)),
1605 bio_data_dir(bio), cfq_bio_sync(bio)); 1605 bio_data_dir(bio), cfq_bio_sync(bio));
1606} 1606}
1607 1607
@@ -1624,7 +1624,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
1624 if (cfqq->next_rq == next) 1624 if (cfqq->next_rq == next)
1625 cfqq->next_rq = rq; 1625 cfqq->next_rq = rq;
1626 cfq_remove_request(next); 1626 cfq_remove_request(next);
1627 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, 1627 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1628 rq_data_dir(next), rq_is_sync(next)); 1628 rq_data_dir(next), rq_is_sync(next));
1629 1629
1630 cfqq = RQ_CFQQ(next); 1630 cfqq = RQ_CFQQ(next);
@@ -1666,7 +1666,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1666static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1666static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1667{ 1667{
1668 del_timer(&cfqd->idle_slice_timer); 1668 del_timer(&cfqd->idle_slice_timer);
1669 cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg); 1669 cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg));
1670} 1670}
1671 1671
1672static void __cfq_set_active_queue(struct cfq_data *cfqd, 1672static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1675,7 +1675,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1675 if (cfqq) { 1675 if (cfqq) {
1676 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", 1676 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1677 cfqd->serving_prio, cfqd->serving_type); 1677 cfqd->serving_prio, cfqd->serving_type);
1678 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg); 1678 cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg));
1679 cfqq->slice_start = 0; 1679 cfqq->slice_start = 0;
1680 cfqq->dispatch_start = jiffies; 1680 cfqq->dispatch_start = jiffies;
1681 cfqq->allocated_slice = 0; 1681 cfqq->allocated_slice = 0;
@@ -2023,7 +2023,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2023 sl = cfqd->cfq_slice_idle; 2023 sl = cfqd->cfq_slice_idle;
2024 2024
2025 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 2025 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2026 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); 2026 cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg));
2027 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, 2027 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2028 group_idle ? 1 : 0); 2028 group_idle ? 1 : 0);
2029} 2029}
@@ -2046,8 +2046,9 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2046 2046
2047 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 2047 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2048 cfqq->nr_sectors += blk_rq_sectors(rq); 2048 cfqq->nr_sectors += blk_rq_sectors(rq);
2049 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), 2049 cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg),
2050 rq_data_dir(rq), rq_is_sync(rq)); 2050 blk_rq_bytes(rq), rq_data_dir(rq),
2051 rq_is_sync(rq));
2051} 2052}
2052 2053
2053/* 2054/*
@@ -3135,7 +3136,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3135 __blk_run_queue(cfqd->queue); 3136 __blk_run_queue(cfqd->queue);
3136 } else { 3137 } else {
3137 cfq_blkiocg_update_idle_time_stats( 3138 cfq_blkiocg_update_idle_time_stats(
3138 &cfqq->cfqg->blkg); 3139 cfqg_to_blkg(cfqq->cfqg));
3139 cfq_mark_cfqq_must_dispatch(cfqq); 3140 cfq_mark_cfqq_must_dispatch(cfqq);
3140 } 3141 }
3141 } 3142 }
@@ -3162,9 +3163,9 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
3162 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 3163 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3163 list_add_tail(&rq->queuelist, &cfqq->fifo); 3164 list_add_tail(&rq->queuelist, &cfqq->fifo);
3164 cfq_add_rq_rb(rq); 3165 cfq_add_rq_rb(rq);
3165 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 3166 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
3166 &cfqd->serving_group->blkg, rq_data_dir(rq), 3167 cfqg_to_blkg(cfqd->serving_group),
3167 rq_is_sync(rq)); 3168 rq_data_dir(rq), rq_is_sync(rq));
3168 cfq_rq_enqueued(cfqd, cfqq, rq); 3169 cfq_rq_enqueued(cfqd, cfqq, rq);
3169} 3170}
3170 3171
@@ -3260,7 +3261,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3260 cfqd->rq_in_driver--; 3261 cfqd->rq_in_driver--;
3261 cfqq->dispatched--; 3262 cfqq->dispatched--;
3262 (RQ_CFQG(rq))->dispatched--; 3263 (RQ_CFQG(rq))->dispatched--;
3263 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, 3264 cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg),
3264 rq_start_time_ns(rq), rq_io_start_time_ns(rq), 3265 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3265 rq_data_dir(rq), rq_is_sync(rq)); 3266 rq_data_dir(rq), rq_is_sync(rq));
3266 3267
@@ -3641,7 +3642,7 @@ static int cfq_init_queue(struct request_queue *q)
3641 blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_PROP, 3642 blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_PROP,
3642 true); 3643 true);
3643 if (!IS_ERR(blkg)) 3644 if (!IS_ERR(blkg))
3644 cfqd->root_group = cfqg_of_blkg(blkg); 3645 cfqd->root_group = blkg_to_cfqg(blkg);
3645 3646
3646 spin_unlock_irq(q->queue_lock); 3647 spin_unlock_irq(q->queue_lock);
3647 rcu_read_unlock(); 3648 rcu_read_unlock();
@@ -3827,13 +3828,14 @@ static struct elevator_type iosched_cfq = {
3827#ifdef CONFIG_CFQ_GROUP_IOSCHED 3828#ifdef CONFIG_CFQ_GROUP_IOSCHED
3828static struct blkio_policy_type blkio_policy_cfq = { 3829static struct blkio_policy_type blkio_policy_cfq = {
3829 .ops = { 3830 .ops = {
3830 .blkio_alloc_group_fn = cfq_alloc_blkio_group, 3831 .blkio_init_group_fn = cfq_init_blkio_group,
3831 .blkio_link_group_fn = cfq_link_blkio_group, 3832 .blkio_link_group_fn = cfq_link_blkio_group,
3832 .blkio_unlink_group_fn = cfq_unlink_blkio_group, 3833 .blkio_unlink_group_fn = cfq_unlink_blkio_group,
3833 .blkio_clear_queue_fn = cfq_clear_queue, 3834 .blkio_clear_queue_fn = cfq_clear_queue,
3834 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, 3835 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3835 }, 3836 },
3836 .plid = BLKIO_POLICY_PROP, 3837 .plid = BLKIO_POLICY_PROP,
3838 .pdata_size = sizeof(struct cfq_group),
3837}; 3839};
3838#endif 3840#endif
3839 3841