aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-01 17:38:44 -0400
committerTejun Heo <tj@kernel.org>2012-04-01 17:38:44 -0400
commit8a3d26151f24e2a2ffa550890144c3d54d2edb15 (patch)
treeb99f5cf8db0abc02e39e5c5107f8d4342f81effa /block
parent155fead9b6347ead90e0b0396cb108a6ba6126c6 (diff)
blkcg: move blkio_group_stats_cpu and friends to blk-throttle.c
blkio_group_stats_cpu is used only by blk-throtl and has no reason to be defined in blkcg core. * Move blkio_group_stats_cpu to blk-throttle.c and rename it to tg_stats_cpu. * blkg_policy_data->stats_cpu is replaced with throtl_grp->stats_cpu. prfill functions updated accordingly. * All related macros / functions are renamed so that they have tg_ prefix and the unnecessary @pol arguments are dropped. * Per-cpu stats allocation code is also moved from blk-cgroup.c to blk-throttle.c and gets simplified to only deal with BLKIO_POLICY_THROTL. percpu stat free is performed by the exit method throtl_exit_blkio_group(). * throtl_reset_group_stats() implemented for blkio_reset_group_stats_fn method so that tg->stats_cpu can be reset. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c98
-rw-r--r--block/blk-cgroup.h13
-rw-r--r--block/blk-throttle.c128
3 files changed, 114 insertions, 125 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index cfdda44f4a0b..16f6ee65a593 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -30,13 +30,6 @@ static LIST_HEAD(blkio_list);
30static DEFINE_MUTEX(all_q_mutex); 30static DEFINE_MUTEX(all_q_mutex);
31static LIST_HEAD(all_q_list); 31static LIST_HEAD(all_q_list);
32 32
33/* List of groups pending per cpu stats allocation */
34static DEFINE_SPINLOCK(alloc_list_lock);
35static LIST_HEAD(alloc_list);
36
37static void blkio_stat_alloc_fn(struct work_struct *);
38static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
39
40struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; 33struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
41EXPORT_SYMBOL_GPL(blkio_root_cgroup); 34EXPORT_SYMBOL_GPL(blkio_root_cgroup);
42 35
@@ -63,60 +56,6 @@ struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
63} 56}
64EXPORT_SYMBOL_GPL(bio_blkio_cgroup); 57EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
65 58
66/*
67 * Worker for allocating per cpu stat for blk groups. This is scheduled on
68 * the system_nrt_wq once there are some groups on the alloc_list waiting
69 * for allocation.
70 */
71static void blkio_stat_alloc_fn(struct work_struct *work)
72{
73 static void *pcpu_stats[BLKIO_NR_POLICIES];
74 struct delayed_work *dwork = to_delayed_work(work);
75 struct blkio_group *blkg;
76 int i;
77 bool empty = false;
78
79alloc_stats:
80 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
81 if (pcpu_stats[i] != NULL)
82 continue;
83
84 pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
85
86 /* Allocation failed. Try again after some time. */
87 if (pcpu_stats[i] == NULL) {
88 queue_delayed_work(system_nrt_wq, dwork,
89 msecs_to_jiffies(10));
90 return;
91 }
92 }
93
94 spin_lock_irq(&blkio_list_lock);
95 spin_lock(&alloc_list_lock);
96
97 /* cgroup got deleted or queue exited. */
98 if (!list_empty(&alloc_list)) {
99 blkg = list_first_entry(&alloc_list, struct blkio_group,
100 alloc_node);
101 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
102 struct blkg_policy_data *pd = blkg->pd[i];
103
104 if (blkio_policy[i] && pd && !pd->stats_cpu)
105 swap(pd->stats_cpu, pcpu_stats[i]);
106 }
107
108 list_del_init(&blkg->alloc_node);
109 }
110
111 empty = list_empty(&alloc_list);
112
113 spin_unlock(&alloc_list_lock);
114 spin_unlock_irq(&blkio_list_lock);
115
116 if (!empty)
117 goto alloc_stats;
118}
119
120/** 59/**
121 * blkg_free - free a blkg 60 * blkg_free - free a blkg
122 * @blkg: blkg to free 61 * @blkg: blkg to free
@@ -140,7 +79,6 @@ static void blkg_free(struct blkio_group *blkg)
140 if (pol && pol->ops.blkio_exit_group_fn) 79 if (pol && pol->ops.blkio_exit_group_fn)
141 pol->ops.blkio_exit_group_fn(blkg); 80 pol->ops.blkio_exit_group_fn(blkg);
142 81
143 free_percpu(pd->stats_cpu);
144 kfree(pd); 82 kfree(pd);
145 } 83 }
146 84
@@ -167,7 +105,6 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
167 105
168 blkg->q = q; 106 blkg->q = q;
169 INIT_LIST_HEAD(&blkg->q_node); 107 INIT_LIST_HEAD(&blkg->q_node);
170 INIT_LIST_HEAD(&blkg->alloc_node);
171 blkg->blkcg = blkcg; 108 blkg->blkcg = blkcg;
172 blkg->refcnt = 1; 109 blkg->refcnt = 1;
173 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); 110 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
@@ -245,12 +182,6 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
245 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 182 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
246 list_add(&blkg->q_node, &q->blkg_list); 183 list_add(&blkg->q_node, &q->blkg_list);
247 spin_unlock(&blkcg->lock); 184 spin_unlock(&blkcg->lock);
248
249 spin_lock(&alloc_list_lock);
250 list_add(&blkg->alloc_node, &alloc_list);
251 /* Queue per cpu stat allocation from worker thread. */
252 queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
253 spin_unlock(&alloc_list_lock);
254out: 185out:
255 return blkg; 186 return blkg;
256} 187}
@@ -284,10 +215,6 @@ static void blkg_destroy(struct blkio_group *blkg)
284 list_del_init(&blkg->q_node); 215 list_del_init(&blkg->q_node);
285 hlist_del_init_rcu(&blkg->blkcg_node); 216 hlist_del_init_rcu(&blkg->blkcg_node);
286 217
287 spin_lock(&alloc_list_lock);
288 list_del_init(&blkg->alloc_node);
289 spin_unlock(&alloc_list_lock);
290
291 /* 218 /*
292 * Put the reference taken at the time of creation so that when all 219 * Put the reference taken at the time of creation so that when all
293 * queues are gone, group can be destroyed. 220 * queues are gone, group can be destroyed.
@@ -319,9 +246,6 @@ void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
319 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL); 246 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
320 WARN_ON_ONCE(!pd); 247 WARN_ON_ONCE(!pd);
321 248
322 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
323 WARN_ON_ONCE(!pd->stats_cpu);
324
325 blkg->pd[plid] = pd; 249 blkg->pd[plid] = pd;
326 pd->blkg = blkg; 250 pd->blkg = blkg;
327 pol->ops.blkio_init_group_fn(blkg); 251 pol->ops.blkio_init_group_fn(blkg);
@@ -381,23 +305,6 @@ void __blkg_release(struct blkio_group *blkg)
381} 305}
382EXPORT_SYMBOL_GPL(__blkg_release); 306EXPORT_SYMBOL_GPL(__blkg_release);
383 307
384static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
385{
386 struct blkg_policy_data *pd = blkg->pd[plid];
387 int cpu;
388
389 if (pd->stats_cpu == NULL)
390 return;
391
392 for_each_possible_cpu(cpu) {
393 struct blkio_group_stats_cpu *sc =
394 per_cpu_ptr(pd->stats_cpu, cpu);
395
396 blkg_rwstat_reset(&sc->service_bytes);
397 blkg_rwstat_reset(&sc->serviced);
398 }
399}
400
401static int 308static int
402blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) 309blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
403{ 310{
@@ -416,12 +323,9 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
416 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 323 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
417 struct blkio_policy_type *pol; 324 struct blkio_policy_type *pol;
418 325
419 list_for_each_entry(pol, &blkio_list, list) { 326 list_for_each_entry(pol, &blkio_list, list)
420 blkio_reset_stats_cpu(blkg, pol->plid);
421
422 if (pol->ops.blkio_reset_group_stats_fn) 327 if (pol->ops.blkio_reset_group_stats_fn)
423 pol->ops.blkio_reset_group_stats_fn(blkg); 328 pol->ops.blkio_reset_group_stats_fn(blkg);
424 }
425 } 329 }
426 330
427 spin_unlock_irq(&blkcg->lock); 331 spin_unlock_irq(&blkcg->lock);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 791570394e87..e368dd00b8cf 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -64,14 +64,6 @@ struct blkg_rwstat {
64 uint64_t cnt[BLKG_RWSTAT_NR]; 64 uint64_t cnt[BLKG_RWSTAT_NR];
65}; 65};
66 66
67/* Per cpu blkio group stats */
68struct blkio_group_stats_cpu {
69 /* total bytes transferred */
70 struct blkg_rwstat service_bytes;
71 /* total IOs serviced, post merge */
72 struct blkg_rwstat serviced;
73};
74
75struct blkio_group_conf { 67struct blkio_group_conf {
76 unsigned int weight; 68 unsigned int weight;
77 u64 iops[2]; 69 u64 iops[2];
@@ -86,9 +78,6 @@ struct blkg_policy_data {
86 /* Configuration */ 78 /* Configuration */
87 struct blkio_group_conf conf; 79 struct blkio_group_conf conf;
88 80
89 /* Per cpu stats pointer */
90 struct blkio_group_stats_cpu __percpu *stats_cpu;
91
92 /* pol->pdata_size bytes of private data used by policy impl */ 81 /* pol->pdata_size bytes of private data used by policy impl */
93 char pdata[] __aligned(__alignof__(unsigned long long)); 82 char pdata[] __aligned(__alignof__(unsigned long long));
94}; 83};
@@ -106,8 +95,6 @@ struct blkio_group {
106 95
107 struct blkg_policy_data *pd[BLKIO_NR_POLICIES]; 96 struct blkg_policy_data *pd[BLKIO_NR_POLICIES];
108 97
109 /* List of blkg waiting for per cpu stats memory to be allocated */
110 struct list_head alloc_node;
111 struct rcu_head rcu_head; 98 struct rcu_head rcu_head;
112}; 99};
113 100
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index cb259bc46f43..27f7960dd421 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -40,6 +40,14 @@ struct throtl_rb_root {
40 40
41#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) 41#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
42 42
43/* Per-cpu group stats */
44struct tg_stats_cpu {
45 /* total bytes transferred */
46 struct blkg_rwstat service_bytes;
47 /* total IOs serviced, post merge */
48 struct blkg_rwstat serviced;
49};
50
43struct throtl_grp { 51struct throtl_grp {
44 /* active throtl group service_tree member */ 52 /* active throtl group service_tree member */
45 struct rb_node rb_node; 53 struct rb_node rb_node;
@@ -76,6 +84,12 @@ struct throtl_grp {
76 84
77 /* Some throttle limits got updated for the group */ 85 /* Some throttle limits got updated for the group */
78 int limits_changed; 86 int limits_changed;
87
88 /* Per cpu stats pointer */
89 struct tg_stats_cpu __percpu *stats_cpu;
90
91 /* List of tgs waiting for per cpu stats memory to be allocated */
92 struct list_head stats_alloc_node;
79}; 93};
80 94
81struct throtl_data 95struct throtl_data
@@ -100,6 +114,13 @@ struct throtl_data
100 int limits_changed; 114 int limits_changed;
101}; 115};
102 116
117/* list and work item to allocate percpu group stats */
118static DEFINE_SPINLOCK(tg_stats_alloc_lock);
119static LIST_HEAD(tg_stats_alloc_list);
120
121static void tg_stats_alloc_fn(struct work_struct *);
122static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
123
103static inline struct throtl_grp *blkg_to_tg(struct blkio_group *blkg) 124static inline struct throtl_grp *blkg_to_tg(struct blkio_group *blkg)
104{ 125{
105 return blkg_to_pdata(blkg, &blkio_policy_throtl); 126 return blkg_to_pdata(blkg, &blkio_policy_throtl);
@@ -142,6 +163,44 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)
142 return td->nr_queued[0] + td->nr_queued[1]; 163 return td->nr_queued[0] + td->nr_queued[1];
143} 164}
144 165
166/*
167 * Worker for allocating per cpu stat for tgs. This is scheduled on the
168 * system_nrt_wq once there are some groups on the alloc_list waiting for
169 * allocation.
170 */
171static void tg_stats_alloc_fn(struct work_struct *work)
172{
173 static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
174 struct delayed_work *dwork = to_delayed_work(work);
175 bool empty = false;
176
177alloc_stats:
178 if (!stats_cpu) {
179 stats_cpu = alloc_percpu(struct tg_stats_cpu);
180 if (!stats_cpu) {
181 /* allocation failed, try again after some time */
182 queue_delayed_work(system_nrt_wq, dwork,
183 msecs_to_jiffies(10));
184 return;
185 }
186 }
187
188 spin_lock_irq(&tg_stats_alloc_lock);
189
190 if (!list_empty(&tg_stats_alloc_list)) {
191 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
192 struct throtl_grp,
193 stats_alloc_node);
194 swap(tg->stats_cpu, stats_cpu);
195 list_del_init(&tg->stats_alloc_node);
196 }
197
198 empty = list_empty(&tg_stats_alloc_list);
199 spin_unlock_irq(&tg_stats_alloc_lock);
200 if (!empty)
201 goto alloc_stats;
202}
203
145static void throtl_init_blkio_group(struct blkio_group *blkg) 204static void throtl_init_blkio_group(struct blkio_group *blkg)
146{ 205{
147 struct throtl_grp *tg = blkg_to_tg(blkg); 206 struct throtl_grp *tg = blkg_to_tg(blkg);
@@ -155,6 +214,43 @@ static void throtl_init_blkio_group(struct blkio_group *blkg)
155 tg->bps[WRITE] = -1; 214 tg->bps[WRITE] = -1;
156 tg->iops[READ] = -1; 215 tg->iops[READ] = -1;
157 tg->iops[WRITE] = -1; 216 tg->iops[WRITE] = -1;
217
218 /*
219 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
220 * but percpu allocator can't be called from IO path. Queue tg on
221 * tg_stats_alloc_list and allocate from work item.
222 */
223 spin_lock(&tg_stats_alloc_lock);
224 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
225 queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
226 spin_unlock(&tg_stats_alloc_lock);
227}
228
229static void throtl_exit_blkio_group(struct blkio_group *blkg)
230{
231 struct throtl_grp *tg = blkg_to_tg(blkg);
232
233 spin_lock(&tg_stats_alloc_lock);
234 list_del_init(&tg->stats_alloc_node);
235 spin_unlock(&tg_stats_alloc_lock);
236
237 free_percpu(tg->stats_cpu);
238}
239
240static void throtl_reset_group_stats(struct blkio_group *blkg)
241{
242 struct throtl_grp *tg = blkg_to_tg(blkg);
243 int cpu;
244
245 if (tg->stats_cpu == NULL)
246 return;
247
248 for_each_possible_cpu(cpu) {
249 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
250
251 blkg_rwstat_reset(&sc->service_bytes);
252 blkg_rwstat_reset(&sc->serviced);
253 }
158} 254}
159 255
160static struct 256static struct
@@ -565,12 +661,12 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
565static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes, 661static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes,
566 int rw) 662 int rw)
567{ 663{
568 struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_THROTL]; 664 struct throtl_grp *tg = blkg_to_tg(blkg);
569 struct blkio_group_stats_cpu *stats_cpu; 665 struct tg_stats_cpu *stats_cpu;
570 unsigned long flags; 666 unsigned long flags;
571 667
572 /* If per cpu stats are not allocated yet, don't do any accounting. */ 668 /* If per cpu stats are not allocated yet, don't do any accounting. */
573 if (pd->stats_cpu == NULL) 669 if (tg->stats_cpu == NULL)
574 return; 670 return;
575 671
576 /* 672 /*
@@ -580,7 +676,7 @@ static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes,
580 */ 676 */
581 local_irq_save(flags); 677 local_irq_save(flags);
582 678
583 stats_cpu = this_cpu_ptr(pd->stats_cpu); 679 stats_cpu = this_cpu_ptr(tg->stats_cpu);
584 680
585 blkg_rwstat_add(&stats_cpu->serviced, rw, 1); 681 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
586 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes); 682 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
@@ -842,15 +938,15 @@ static void throtl_update_blkio_group_common(struct throtl_data *td,
842 throtl_schedule_delayed_work(td, 0); 938 throtl_schedule_delayed_work(td, 0);
843} 939}
844 940
845static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf, 941static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
846 struct blkg_policy_data *pd, int off) 942 struct blkg_policy_data *pd, int off)
847{ 943{
944 struct throtl_grp *tg = (void *)pd->pdata;
848 struct blkg_rwstat rwstat = { }, tmp; 945 struct blkg_rwstat rwstat = { }, tmp;
849 int i, cpu; 946 int i, cpu;
850 947
851 for_each_possible_cpu(cpu) { 948 for_each_possible_cpu(cpu) {
852 struct blkio_group_stats_cpu *sc = 949 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
853 per_cpu_ptr(pd->stats_cpu, cpu);
854 950
855 tmp = blkg_rwstat_read((void *)sc + off); 951 tmp = blkg_rwstat_read((void *)sc + off);
856 for (i = 0; i < BLKG_RWSTAT_NR; i++) 952 for (i = 0; i < BLKG_RWSTAT_NR; i++)
@@ -861,12 +957,12 @@ static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
861} 957}
862 958
863/* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */ 959/* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
864static int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft, 960static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
865 struct seq_file *sf) 961 struct seq_file *sf)
866{ 962{
867 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 963 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
868 964
869 blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat, 965 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat,
870 BLKCG_STAT_POL(cft->private), 966 BLKCG_STAT_POL(cft->private),
871 BLKCG_STAT_OFF(cft->private), true); 967 BLKCG_STAT_OFF(cft->private), true);
872 return 0; 968 return 0;
@@ -1012,14 +1108,14 @@ static struct cftype throtl_files[] = {
1012 { 1108 {
1013 .name = "throttle.io_service_bytes", 1109 .name = "throttle.io_service_bytes",
1014 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL, 1110 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1015 offsetof(struct blkio_group_stats_cpu, service_bytes)), 1111 offsetof(struct tg_stats_cpu, service_bytes)),
1016 .read_seq_string = blkcg_print_cpu_rwstat, 1112 .read_seq_string = tg_print_cpu_rwstat,
1017 }, 1113 },
1018 { 1114 {
1019 .name = "throttle.io_serviced", 1115 .name = "throttle.io_serviced",
1020 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL, 1116 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1021 offsetof(struct blkio_group_stats_cpu, serviced)), 1117 offsetof(struct tg_stats_cpu, serviced)),
1022 .read_seq_string = blkcg_print_cpu_rwstat, 1118 .read_seq_string = tg_print_cpu_rwstat,
1023 }, 1119 },
1024 { } /* terminate */ 1120 { } /* terminate */
1025}; 1121};
@@ -1034,6 +1130,8 @@ static void throtl_shutdown_wq(struct request_queue *q)
1034static struct blkio_policy_type blkio_policy_throtl = { 1130static struct blkio_policy_type blkio_policy_throtl = {
1035 .ops = { 1131 .ops = {
1036 .blkio_init_group_fn = throtl_init_blkio_group, 1132 .blkio_init_group_fn = throtl_init_blkio_group,
1133 .blkio_exit_group_fn = throtl_exit_blkio_group,
1134 .blkio_reset_group_stats_fn = throtl_reset_group_stats,
1037 }, 1135 },
1038 .plid = BLKIO_POLICY_THROTL, 1136 .plid = BLKIO_POLICY_THROTL,
1039 .pdata_size = sizeof(struct throtl_grp), 1137 .pdata_size = sizeof(struct throtl_grp),