aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c86
-rw-r--r--block/blk-cgroup.h53
-rw-r--r--block/blk-throttle.c79
-rw-r--r--block/cfq-iosched.c102
4 files changed, 209 insertions, 111 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 266c0707d588..14367499cfed 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -422,6 +422,70 @@ void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
422} 422}
423EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); 423EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
424 424
425/**
426 * blkg_free - free a blkg
427 * @blkg: blkg to free
428 *
429 * Free @blkg which may be partially allocated.
430 */
431static void blkg_free(struct blkio_group *blkg)
432{
433 if (blkg) {
434 free_percpu(blkg->stats_cpu);
435 kfree(blkg->pd);
436 kfree(blkg);
437 }
438}
439
440/**
441 * blkg_alloc - allocate a blkg
442 * @blkcg: block cgroup the new blkg is associated with
443 * @q: request_queue the new blkg is associated with
444 * @pol: policy the new blkg is associated with
445 *
446 * Allocate a new blkg assocating @blkcg and @q for @pol.
447 *
448 * FIXME: Should be called with queue locked but currently isn't due to
449 * percpu stat breakage.
450 */
451static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
452 struct request_queue *q,
453 struct blkio_policy_type *pol)
454{
455 struct blkio_group *blkg;
456
457 /* alloc and init base part */
458 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
459 if (!blkg)
460 return NULL;
461
462 spin_lock_init(&blkg->stats_lock);
463 rcu_assign_pointer(blkg->q, q);
464 blkg->blkcg = blkcg;
465 blkg->plid = pol->plid;
466 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
467
468 /* alloc per-policy data */
469 blkg->pd = kzalloc_node(sizeof(*blkg->pd) + pol->pdata_size, GFP_ATOMIC,
470 q->node);
471 if (!blkg->pd) {
472 blkg_free(blkg);
473 return NULL;
474 }
475
476 /* broken, read comment in the callsite */
477 blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
478 if (!blkg->stats_cpu) {
479 blkg_free(blkg);
480 return NULL;
481 }
482
483 /* attach pd to blkg and invoke per-policy init */
484 blkg->pd->blkg = blkg;
485 pol->ops.blkio_init_group_fn(blkg);
486 return blkg;
487}
488
425struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, 489struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
426 struct request_queue *q, 490 struct request_queue *q,
427 enum blkio_policy_id plid, 491 enum blkio_policy_id plid,
@@ -463,19 +527,7 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
463 spin_unlock_irq(q->queue_lock); 527 spin_unlock_irq(q->queue_lock);
464 rcu_read_unlock(); 528 rcu_read_unlock();
465 529
466 new_blkg = pol->ops.blkio_alloc_group_fn(q, blkcg); 530 new_blkg = blkg_alloc(blkcg, q, pol);
467 if (new_blkg) {
468 new_blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
469
470 spin_lock_init(&new_blkg->stats_lock);
471 rcu_assign_pointer(new_blkg->q, q);
472 new_blkg->blkcg = blkcg;
473 new_blkg->plid = plid;
474 cgroup_path(blkcg->css.cgroup, new_blkg->path,
475 sizeof(new_blkg->path));
476 } else {
477 css_put(&blkcg->css);
478 }
479 531
480 rcu_read_lock(); 532 rcu_read_lock();
481 spin_lock_irq(q->queue_lock); 533 spin_lock_irq(q->queue_lock);
@@ -492,7 +544,7 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
492 goto out; 544 goto out;
493 545
494 /* did alloc fail? */ 546 /* did alloc fail? */
495 if (unlikely(!new_blkg || !new_blkg->stats_cpu)) { 547 if (unlikely(!new_blkg)) {
496 blkg = ERR_PTR(-ENOMEM); 548 blkg = ERR_PTR(-ENOMEM);
497 goto out; 549 goto out;
498 } 550 }
@@ -504,11 +556,7 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
504 pol->ops.blkio_link_group_fn(q, blkg); 556 pol->ops.blkio_link_group_fn(q, blkg);
505 spin_unlock(&blkcg->lock); 557 spin_unlock(&blkcg->lock);
506out: 558out:
507 if (new_blkg) { 559 blkg_free(new_blkg);
508 free_percpu(new_blkg->stats_cpu);
509 kfree(new_blkg);
510 css_put(&blkcg->css);
511 }
512 return blkg; 560 return blkg;
513} 561}
514EXPORT_SYMBOL_GPL(blkg_lookup_create); 562EXPORT_SYMBOL_GPL(blkg_lookup_create);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 3bc171080e93..9537819c29c6 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -159,6 +159,15 @@ struct blkio_group_conf {
159 u64 bps[2]; 159 u64 bps[2];
160}; 160};
161 161
162/* per-blkg per-policy data */
163struct blkg_policy_data {
164 /* the blkg this per-policy data belongs to */
165 struct blkio_group *blkg;
166
167 /* pol->pdata_size bytes of private data used by policy impl */
168 char pdata[] __aligned(__alignof__(unsigned long long));
169};
170
162struct blkio_group { 171struct blkio_group {
163 /* Pointer to the associated request_queue, RCU protected */ 172 /* Pointer to the associated request_queue, RCU protected */
164 struct request_queue __rcu *q; 173 struct request_queue __rcu *q;
@@ -177,10 +186,11 @@ struct blkio_group {
177 struct blkio_group_stats stats; 186 struct blkio_group_stats stats;
178 /* Per cpu stats pointer */ 187 /* Per cpu stats pointer */
179 struct blkio_group_stats_cpu __percpu *stats_cpu; 188 struct blkio_group_stats_cpu __percpu *stats_cpu;
189
190 struct blkg_policy_data *pd;
180}; 191};
181 192
182typedef struct blkio_group *(blkio_alloc_group_fn)(struct request_queue *q, 193typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
183 struct blkio_cgroup *blkcg);
184typedef void (blkio_link_group_fn)(struct request_queue *q, 194typedef void (blkio_link_group_fn)(struct request_queue *q,
185 struct blkio_group *blkg); 195 struct blkio_group *blkg);
186typedef void (blkio_unlink_group_fn)(struct request_queue *q, 196typedef void (blkio_unlink_group_fn)(struct request_queue *q,
@@ -198,7 +208,7 @@ typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q,
198 struct blkio_group *blkg, unsigned int write_iops); 208 struct blkio_group *blkg, unsigned int write_iops);
199 209
200struct blkio_policy_ops { 210struct blkio_policy_ops {
201 blkio_alloc_group_fn *blkio_alloc_group_fn; 211 blkio_init_group_fn *blkio_init_group_fn;
202 blkio_link_group_fn *blkio_link_group_fn; 212 blkio_link_group_fn *blkio_link_group_fn;
203 blkio_unlink_group_fn *blkio_unlink_group_fn; 213 blkio_unlink_group_fn *blkio_unlink_group_fn;
204 blkio_clear_queue_fn *blkio_clear_queue_fn; 214 blkio_clear_queue_fn *blkio_clear_queue_fn;
@@ -213,6 +223,7 @@ struct blkio_policy_type {
213 struct list_head list; 223 struct list_head list;
214 struct blkio_policy_ops ops; 224 struct blkio_policy_ops ops;
215 enum blkio_policy_id plid; 225 enum blkio_policy_id plid;
226 size_t pdata_size; /* policy specific private data size */
216}; 227};
217 228
218extern int blkcg_init_queue(struct request_queue *q); 229extern int blkcg_init_queue(struct request_queue *q);
@@ -224,6 +235,38 @@ extern void blkio_policy_register(struct blkio_policy_type *);
224extern void blkio_policy_unregister(struct blkio_policy_type *); 235extern void blkio_policy_unregister(struct blkio_policy_type *);
225extern void blkg_destroy_all(struct request_queue *q); 236extern void blkg_destroy_all(struct request_queue *q);
226 237
238/**
239 * blkg_to_pdata - get policy private data
240 * @blkg: blkg of interest
241 * @pol: policy of interest
242 *
243 * Return pointer to private data associated with the @blkg-@pol pair.
244 */
245static inline void *blkg_to_pdata(struct blkio_group *blkg,
246 struct blkio_policy_type *pol)
247{
248 return blkg ? blkg->pd->pdata : NULL;
249}
250
251/**
252 * pdata_to_blkg - get blkg associated with policy private data
253 * @pdata: policy private data of interest
254 * @pol: policy @pdata is for
255 *
256 * @pdata is policy private data for @pol. Determine the blkg it's
257 * associated with.
258 */
259static inline struct blkio_group *pdata_to_blkg(void *pdata,
260 struct blkio_policy_type *pol)
261{
262 if (pdata) {
263 struct blkg_policy_data *pd =
264 container_of(pdata, struct blkg_policy_data, pdata);
265 return pd->blkg;
266 }
267 return NULL;
268}
269
227static inline char *blkg_path(struct blkio_group *blkg) 270static inline char *blkg_path(struct blkio_group *blkg)
228{ 271{
229 return blkg->path; 272 return blkg->path;
@@ -244,6 +287,10 @@ static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
244static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } 287static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
245static inline void blkg_destroy_all(struct request_queue *q) { } 288static inline void blkg_destroy_all(struct request_queue *q) { }
246 289
290static inline void *blkg_to_pdata(struct blkio_group *blkg,
291 struct blkio_policy_type *pol) { return NULL; }
292static inline struct blkio_group *pdata_to_blkg(void *pdata,
293 struct blkio_policy_type *pol) { return NULL; }
247static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } 294static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
248 295
249#endif 296#endif
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index ac6d0fe6e4ee..9c8a12477e13 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -21,6 +21,8 @@ static int throtl_quantum = 32;
21/* Throttling is performed over 100ms slice and after that slice is renewed */ 21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */ 22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23 23
24static struct blkio_policy_type blkio_policy_throtl;
25
24/* A workqueue to queue throttle related work */ 26/* A workqueue to queue throttle related work */
25static struct workqueue_struct *kthrotld_workqueue; 27static struct workqueue_struct *kthrotld_workqueue;
26static void throtl_schedule_delayed_work(struct throtl_data *td, 28static void throtl_schedule_delayed_work(struct throtl_data *td,
@@ -52,7 +54,6 @@ struct throtl_grp {
52 */ 54 */
53 unsigned long disptime; 55 unsigned long disptime;
54 56
55 struct blkio_group blkg;
56 atomic_t ref; 57 atomic_t ref;
57 unsigned int flags; 58 unsigned int flags;
58 59
@@ -108,6 +109,16 @@ struct throtl_data
108 int limits_changed; 109 int limits_changed;
109}; 110};
110 111
112static inline struct throtl_grp *blkg_to_tg(struct blkio_group *blkg)
113{
114 return blkg_to_pdata(blkg, &blkio_policy_throtl);
115}
116
117static inline struct blkio_group *tg_to_blkg(struct throtl_grp *tg)
118{
119 return pdata_to_blkg(tg, &blkio_policy_throtl);
120}
121
111enum tg_state_flags { 122enum tg_state_flags {
112 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */ 123 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
113}; 124};
@@ -130,19 +141,11 @@ THROTL_TG_FNS(on_rr);
130 141
131#define throtl_log_tg(td, tg, fmt, args...) \ 142#define throtl_log_tg(td, tg, fmt, args...) \
132 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \ 143 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
133 blkg_path(&(tg)->blkg), ##args); \ 144 blkg_path(tg_to_blkg(tg)), ##args); \
134 145
135#define throtl_log(td, fmt, args...) \ 146#define throtl_log(td, fmt, args...) \
136 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args) 147 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
137 148
138static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
139{
140 if (blkg)
141 return container_of(blkg, struct throtl_grp, blkg);
142
143 return NULL;
144}
145
146static inline unsigned int total_nr_queued(struct throtl_data *td) 149static inline unsigned int total_nr_queued(struct throtl_data *td)
147{ 150{
148 return td->nr_queued[0] + td->nr_queued[1]; 151 return td->nr_queued[0] + td->nr_queued[1];
@@ -156,21 +159,24 @@ static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
156 159
157static void throtl_free_tg(struct rcu_head *head) 160static void throtl_free_tg(struct rcu_head *head)
158{ 161{
159 struct throtl_grp *tg; 162 struct throtl_grp *tg = container_of(head, struct throtl_grp, rcu_head);
163 struct blkio_group *blkg = tg_to_blkg(tg);
160 164
161 tg = container_of(head, struct throtl_grp, rcu_head); 165 free_percpu(blkg->stats_cpu);
162 free_percpu(tg->blkg.stats_cpu); 166 kfree(blkg->pd);
163 kfree(tg); 167 kfree(blkg);
164} 168}
165 169
166static void throtl_put_tg(struct throtl_grp *tg) 170static void throtl_put_tg(struct throtl_grp *tg)
167{ 171{
172 struct blkio_group *blkg = tg_to_blkg(tg);
173
168 BUG_ON(atomic_read(&tg->ref) <= 0); 174 BUG_ON(atomic_read(&tg->ref) <= 0);
169 if (!atomic_dec_and_test(&tg->ref)) 175 if (!atomic_dec_and_test(&tg->ref))
170 return; 176 return;
171 177
172 /* release the extra blkcg reference this blkg has been holding */ 178 /* release the extra blkcg reference this blkg has been holding */
173 css_put(&tg->blkg.blkcg->css); 179 css_put(&blkg->blkcg->css);
174 180
175 /* 181 /*
176 * A group is freed in rcu manner. But having an rcu lock does not 182 * A group is freed in rcu manner. But having an rcu lock does not
@@ -184,14 +190,9 @@ static void throtl_put_tg(struct throtl_grp *tg)
184 call_rcu(&tg->rcu_head, throtl_free_tg); 190 call_rcu(&tg->rcu_head, throtl_free_tg);
185} 191}
186 192
187static struct blkio_group *throtl_alloc_blkio_group(struct request_queue *q, 193static void throtl_init_blkio_group(struct blkio_group *blkg)
188 struct blkio_cgroup *blkcg)
189{ 194{
190 struct throtl_grp *tg; 195 struct throtl_grp *tg = blkg_to_tg(blkg);
191
192 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, q->node);
193 if (!tg)
194 return NULL;
195 196
196 INIT_HLIST_NODE(&tg->tg_node); 197 INIT_HLIST_NODE(&tg->tg_node);
197 RB_CLEAR_NODE(&tg->rb_node); 198 RB_CLEAR_NODE(&tg->rb_node);
@@ -211,15 +212,13 @@ static struct blkio_group *throtl_alloc_blkio_group(struct request_queue *q,
211 * exit or cgroup deletion path depending on who is exiting first. 212 * exit or cgroup deletion path depending on who is exiting first.
212 */ 213 */
213 atomic_set(&tg->ref, 1); 214 atomic_set(&tg->ref, 1);
214
215 return &tg->blkg;
216} 215}
217 216
218static void throtl_link_blkio_group(struct request_queue *q, 217static void throtl_link_blkio_group(struct request_queue *q,
219 struct blkio_group *blkg) 218 struct blkio_group *blkg)
220{ 219{
221 struct throtl_data *td = q->td; 220 struct throtl_data *td = q->td;
222 struct throtl_grp *tg = tg_of_blkg(blkg); 221 struct throtl_grp *tg = blkg_to_tg(blkg);
223 222
224 hlist_add_head(&tg->tg_node, &td->tg_list); 223 hlist_add_head(&tg->tg_node, &td->tg_list);
225 td->nr_undestroyed_grps++; 224 td->nr_undestroyed_grps++;
@@ -235,7 +234,7 @@ throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
235 if (blkcg == &blkio_root_cgroup) 234 if (blkcg == &blkio_root_cgroup)
236 return td->root_tg; 235 return td->root_tg;
237 236
238 return tg_of_blkg(blkg_lookup(blkcg, td->queue, BLKIO_POLICY_THROTL)); 237 return blkg_to_tg(blkg_lookup(blkcg, td->queue, BLKIO_POLICY_THROTL));
239} 238}
240 239
241static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, 240static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
@@ -257,7 +256,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
257 256
258 /* if %NULL and @q is alive, fall back to root_tg */ 257 /* if %NULL and @q is alive, fall back to root_tg */
259 if (!IS_ERR(blkg)) 258 if (!IS_ERR(blkg))
260 tg = tg_of_blkg(blkg); 259 tg = blkg_to_tg(blkg);
261 else if (!blk_queue_dead(q)) 260 else if (!blk_queue_dead(q))
262 tg = td->root_tg; 261 tg = td->root_tg;
263 } 262 }
@@ -639,7 +638,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
639 tg->bytes_disp[rw] += bio->bi_size; 638 tg->bytes_disp[rw] += bio->bi_size;
640 tg->io_disp[rw]++; 639 tg->io_disp[rw]++;
641 640
642 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync); 641 blkiocg_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, rw, sync);
643} 642}
644 643
645static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg, 644static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
@@ -901,7 +900,7 @@ static bool throtl_release_tgs(struct throtl_data *td, bool release_root)
901 * it from cgroup list, then it will take care of destroying 900 * it from cgroup list, then it will take care of destroying
902 * cfqg also. 901 * cfqg also.
903 */ 902 */
904 if (!blkiocg_del_blkio_group(&tg->blkg)) 903 if (!blkiocg_del_blkio_group(tg_to_blkg(tg)))
905 throtl_destroy_tg(td, tg); 904 throtl_destroy_tg(td, tg);
906 else 905 else
907 empty = false; 906 empty = false;
@@ -929,7 +928,7 @@ void throtl_unlink_blkio_group(struct request_queue *q,
929 unsigned long flags; 928 unsigned long flags;
930 929
931 spin_lock_irqsave(q->queue_lock, flags); 930 spin_lock_irqsave(q->queue_lock, flags);
932 throtl_destroy_tg(q->td, tg_of_blkg(blkg)); 931 throtl_destroy_tg(q->td, blkg_to_tg(blkg));
933 spin_unlock_irqrestore(q->queue_lock, flags); 932 spin_unlock_irqrestore(q->queue_lock, flags);
934} 933}
935 934
@@ -968,7 +967,7 @@ static void throtl_update_blkio_group_common(struct throtl_data *td,
968static void throtl_update_blkio_group_read_bps(struct request_queue *q, 967static void throtl_update_blkio_group_read_bps(struct request_queue *q,
969 struct blkio_group *blkg, u64 read_bps) 968 struct blkio_group *blkg, u64 read_bps)
970{ 969{
971 struct throtl_grp *tg = tg_of_blkg(blkg); 970 struct throtl_grp *tg = blkg_to_tg(blkg);
972 971
973 tg->bps[READ] = read_bps; 972 tg->bps[READ] = read_bps;
974 throtl_update_blkio_group_common(q->td, tg); 973 throtl_update_blkio_group_common(q->td, tg);
@@ -977,7 +976,7 @@ static void throtl_update_blkio_group_read_bps(struct request_queue *q,
977static void throtl_update_blkio_group_write_bps(struct request_queue *q, 976static void throtl_update_blkio_group_write_bps(struct request_queue *q,
978 struct blkio_group *blkg, u64 write_bps) 977 struct blkio_group *blkg, u64 write_bps)
979{ 978{
980 struct throtl_grp *tg = tg_of_blkg(blkg); 979 struct throtl_grp *tg = blkg_to_tg(blkg);
981 980
982 tg->bps[WRITE] = write_bps; 981 tg->bps[WRITE] = write_bps;
983 throtl_update_blkio_group_common(q->td, tg); 982 throtl_update_blkio_group_common(q->td, tg);
@@ -986,7 +985,7 @@ static void throtl_update_blkio_group_write_bps(struct request_queue *q,
986static void throtl_update_blkio_group_read_iops(struct request_queue *q, 985static void throtl_update_blkio_group_read_iops(struct request_queue *q,
987 struct blkio_group *blkg, unsigned int read_iops) 986 struct blkio_group *blkg, unsigned int read_iops)
988{ 987{
989 struct throtl_grp *tg = tg_of_blkg(blkg); 988 struct throtl_grp *tg = blkg_to_tg(blkg);
990 989
991 tg->iops[READ] = read_iops; 990 tg->iops[READ] = read_iops;
992 throtl_update_blkio_group_common(q->td, tg); 991 throtl_update_blkio_group_common(q->td, tg);
@@ -995,7 +994,7 @@ static void throtl_update_blkio_group_read_iops(struct request_queue *q,
995static void throtl_update_blkio_group_write_iops(struct request_queue *q, 994static void throtl_update_blkio_group_write_iops(struct request_queue *q,
996 struct blkio_group *blkg, unsigned int write_iops) 995 struct blkio_group *blkg, unsigned int write_iops)
997{ 996{
998 struct throtl_grp *tg = tg_of_blkg(blkg); 997 struct throtl_grp *tg = blkg_to_tg(blkg);
999 998
1000 tg->iops[WRITE] = write_iops; 999 tg->iops[WRITE] = write_iops;
1001 throtl_update_blkio_group_common(q->td, tg); 1000 throtl_update_blkio_group_common(q->td, tg);
@@ -1010,7 +1009,7 @@ static void throtl_shutdown_wq(struct request_queue *q)
1010 1009
1011static struct blkio_policy_type blkio_policy_throtl = { 1010static struct blkio_policy_type blkio_policy_throtl = {
1012 .ops = { 1011 .ops = {
1013 .blkio_alloc_group_fn = throtl_alloc_blkio_group, 1012 .blkio_init_group_fn = throtl_init_blkio_group,
1014 .blkio_link_group_fn = throtl_link_blkio_group, 1013 .blkio_link_group_fn = throtl_link_blkio_group,
1015 .blkio_unlink_group_fn = throtl_unlink_blkio_group, 1014 .blkio_unlink_group_fn = throtl_unlink_blkio_group,
1016 .blkio_clear_queue_fn = throtl_clear_queue, 1015 .blkio_clear_queue_fn = throtl_clear_queue,
@@ -1024,6 +1023,7 @@ static struct blkio_policy_type blkio_policy_throtl = {
1024 throtl_update_blkio_group_write_iops, 1023 throtl_update_blkio_group_write_iops,
1025 }, 1024 },
1026 .plid = BLKIO_POLICY_THROTL, 1025 .plid = BLKIO_POLICY_THROTL,
1026 .pdata_size = sizeof(struct throtl_grp),
1027}; 1027};
1028 1028
1029bool blk_throtl_bio(struct request_queue *q, struct bio *bio) 1029bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
@@ -1049,8 +1049,9 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1049 tg = throtl_lookup_tg(td, blkcg); 1049 tg = throtl_lookup_tg(td, blkcg);
1050 if (tg) { 1050 if (tg) {
1051 if (tg_no_rule_group(tg, rw)) { 1051 if (tg_no_rule_group(tg, rw)) {
1052 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, 1052 blkiocg_update_dispatch_stats(tg_to_blkg(tg),
1053 rw, rw_is_sync(bio->bi_rw)); 1053 bio->bi_size, rw,
1054 rw_is_sync(bio->bi_rw));
1054 goto out_unlock_rcu; 1055 goto out_unlock_rcu;
1055 } 1056 }
1056 } 1057 }
@@ -1176,7 +1177,7 @@ int blk_throtl_init(struct request_queue *q)
1176 blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_THROTL, 1177 blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_THROTL,
1177 true); 1178 true);
1178 if (!IS_ERR(blkg)) 1179 if (!IS_ERR(blkg))
1179 td->root_tg = tg_of_blkg(blkg); 1180 td->root_tg = blkg_to_tg(blkg);
1180 1181
1181 spin_unlock_irq(q->queue_lock); 1182 spin_unlock_irq(q->queue_lock);
1182 rcu_read_unlock(); 1183 rcu_read_unlock();
@@ -1207,7 +1208,7 @@ void blk_throtl_exit(struct request_queue *q)
1207 spin_unlock_irq(q->queue_lock); 1208 spin_unlock_irq(q->queue_lock);
1208 1209
1209 /* 1210 /*
1210 * Wait for tg->blkg->q accessors to exit their grace periods. 1211 * Wait for tg_to_blkg(tg)->q accessors to exit their grace periods.
1211 * Do this wait only if there are other undestroyed groups out 1212 * Do this wait only if there are other undestroyed groups out
1212 * there (other than root group). This can happen if cgroup deletion 1213 * there (other than root group). This can happen if cgroup deletion
1213 * path claimed the responsibility of cleaning up a group before 1214 * path claimed the responsibility of cleaning up a group before
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9ef86fbfc9ae..c7449db52a86 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -17,6 +17,8 @@
17#include "blk.h" 17#include "blk.h"
18#include "cfq.h" 18#include "cfq.h"
19 19
20static struct blkio_policy_type blkio_policy_cfq;
21
20/* 22/*
21 * tunables 23 * tunables
22 */ 24 */
@@ -206,7 +208,6 @@ struct cfq_group {
206 unsigned long saved_workload_slice; 208 unsigned long saved_workload_slice;
207 enum wl_type_t saved_workload; 209 enum wl_type_t saved_workload;
208 enum wl_prio_t saved_serving_prio; 210 enum wl_prio_t saved_serving_prio;
209 struct blkio_group blkg;
210#ifdef CONFIG_CFQ_GROUP_IOSCHED 211#ifdef CONFIG_CFQ_GROUP_IOSCHED
211 struct hlist_node cfqd_node; 212 struct hlist_node cfqd_node;
212 int ref; 213 int ref;
@@ -310,6 +311,16 @@ struct cfq_data {
310 unsigned int nr_blkcg_linked_grps; 311 unsigned int nr_blkcg_linked_grps;
311}; 312};
312 313
314static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
315{
316 return blkg_to_pdata(blkg, &blkio_policy_cfq);
317}
318
319static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg)
320{
321 return pdata_to_blkg(cfqg, &blkio_policy_cfq);
322}
323
313static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); 324static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
314 325
315static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, 326static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
@@ -374,11 +385,11 @@ CFQ_CFQQ_FNS(wait_busy);
374#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 385#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
375 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ 386 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
376 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 387 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
377 blkg_path(&(cfqq)->cfqg->blkg), ##args) 388 blkg_path(cfqg_to_blkg((cfqq)->cfqg)), ##args)
378 389
379#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \ 390#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
380 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ 391 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
381 blkg_path(&(cfqg)->blkg), ##args) \ 392 blkg_path(cfqg_to_blkg((cfqg))), ##args) \
382 393
383#else 394#else
384#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 395#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -935,7 +946,7 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
935 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); 946 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
936 cfq_group_service_tree_del(st, cfqg); 947 cfq_group_service_tree_del(st, cfqg);
937 cfqg->saved_workload_slice = 0; 948 cfqg->saved_workload_slice = 0;
938 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); 949 cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg), 1);
939} 950}
940 951
941static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, 952static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
@@ -1007,9 +1018,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1007 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", 1018 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1008 used_sl, cfqq->slice_dispatch, charge, 1019 used_sl, cfqq->slice_dispatch, charge,
1009 iops_mode(cfqd), cfqq->nr_sectors); 1020 iops_mode(cfqd), cfqq->nr_sectors);
1010 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, 1021 cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), used_sl,
1011 unaccounted_sl); 1022 unaccounted_sl);
1012 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 1023 cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg));
1013} 1024}
1014 1025
1015/** 1026/**
@@ -1032,18 +1043,12 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1032} 1043}
1033 1044
1034#ifdef CONFIG_CFQ_GROUP_IOSCHED 1045#ifdef CONFIG_CFQ_GROUP_IOSCHED
1035static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
1036{
1037 if (blkg)
1038 return container_of(blkg, struct cfq_group, blkg);
1039 return NULL;
1040}
1041
1042static void cfq_update_blkio_group_weight(struct request_queue *q, 1046static void cfq_update_blkio_group_weight(struct request_queue *q,
1043 struct blkio_group *blkg, 1047 struct blkio_group *blkg,
1044 unsigned int weight) 1048 unsigned int weight)
1045{ 1049{
1046 struct cfq_group *cfqg = cfqg_of_blkg(blkg); 1050 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1051
1047 cfqg->new_weight = weight; 1052 cfqg->new_weight = weight;
1048 cfqg->needs_update = true; 1053 cfqg->needs_update = true;
1049} 1054}
@@ -1052,7 +1057,7 @@ static void cfq_link_blkio_group(struct request_queue *q,
1052 struct blkio_group *blkg) 1057 struct blkio_group *blkg)
1053{ 1058{
1054 struct cfq_data *cfqd = q->elevator->elevator_data; 1059 struct cfq_data *cfqd = q->elevator->elevator_data;
1055 struct cfq_group *cfqg = cfqg_of_blkg(blkg); 1060 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1056 1061
1057 cfqd->nr_blkcg_linked_grps++; 1062 cfqd->nr_blkcg_linked_grps++;
1058 1063
@@ -1060,17 +1065,12 @@ static void cfq_link_blkio_group(struct request_queue *q,
1060 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); 1065 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1061} 1066}
1062 1067
1063static struct blkio_group *cfq_alloc_blkio_group(struct request_queue *q, 1068static void cfq_init_blkio_group(struct blkio_group *blkg)
1064 struct blkio_cgroup *blkcg)
1065{ 1069{
1066 struct cfq_group *cfqg; 1070 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1067
1068 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, q->node);
1069 if (!cfqg)
1070 return NULL;
1071 1071
1072 cfq_init_cfqg_base(cfqg); 1072 cfq_init_cfqg_base(cfqg);
1073 cfqg->weight = blkcg->weight; 1073 cfqg->weight = blkg->blkcg->weight;
1074 1074
1075 /* 1075 /*
1076 * Take the initial reference that will be released on destroy 1076 * Take the initial reference that will be released on destroy
@@ -1079,8 +1079,6 @@ static struct blkio_group *cfq_alloc_blkio_group(struct request_queue *q,
1079 * or cgroup deletion path depending on who is exiting first. 1079 * or cgroup deletion path depending on who is exiting first.
1080 */ 1080 */
1081 cfqg->ref = 1; 1081 cfqg->ref = 1;
1082
1083 return &cfqg->blkg;
1084} 1082}
1085 1083
1086/* 1084/*
@@ -1101,7 +1099,7 @@ static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1101 1099
1102 blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_PROP, false); 1100 blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_PROP, false);
1103 if (!IS_ERR(blkg)) 1101 if (!IS_ERR(blkg))
1104 cfqg = cfqg_of_blkg(blkg); 1102 cfqg = blkg_to_cfqg(blkg);
1105 } 1103 }
1106 1104
1107 return cfqg; 1105 return cfqg;
@@ -1126,6 +1124,7 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1126 1124
1127static void cfq_put_cfqg(struct cfq_group *cfqg) 1125static void cfq_put_cfqg(struct cfq_group *cfqg)
1128{ 1126{
1127 struct blkio_group *blkg = cfqg_to_blkg(cfqg);
1129 struct cfq_rb_root *st; 1128 struct cfq_rb_root *st;
1130 int i, j; 1129 int i, j;
1131 1130
@@ -1135,12 +1134,13 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
1135 return; 1134 return;
1136 1135
1137 /* release the extra blkcg reference this blkg has been holding */ 1136 /* release the extra blkcg reference this blkg has been holding */
1138 css_put(&cfqg->blkg.blkcg->css); 1137 css_put(&blkg->blkcg->css);
1139 1138
1140 for_each_cfqg_st(cfqg, i, j, st) 1139 for_each_cfqg_st(cfqg, i, j, st)
1141 BUG_ON(!RB_EMPTY_ROOT(&st->rb)); 1140 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1142 free_percpu(cfqg->blkg.stats_cpu); 1141 free_percpu(blkg->stats_cpu);
1143 kfree(cfqg); 1142 kfree(blkg->pd);
1143 kfree(blkg);
1144} 1144}
1145 1145
1146static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg) 1146static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
@@ -1172,7 +1172,7 @@ static bool cfq_release_cfq_groups(struct cfq_data *cfqd)
1172 * it from cgroup list, then it will take care of destroying 1172 * it from cgroup list, then it will take care of destroying
1173 * cfqg also. 1173 * cfqg also.
1174 */ 1174 */
1175 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg)) 1175 if (!cfq_blkiocg_del_blkio_group(cfqg_to_blkg(cfqg)))
1176 cfq_destroy_cfqg(cfqd, cfqg); 1176 cfq_destroy_cfqg(cfqd, cfqg);
1177 else 1177 else
1178 empty = false; 1178 empty = false;
@@ -1201,7 +1201,7 @@ static void cfq_unlink_blkio_group(struct request_queue *q,
1201 unsigned long flags; 1201 unsigned long flags;
1202 1202
1203 spin_lock_irqsave(q->queue_lock, flags); 1203 spin_lock_irqsave(q->queue_lock, flags);
1204 cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg)); 1204 cfq_destroy_cfqg(cfqd, blkg_to_cfqg(blkg));
1205 spin_unlock_irqrestore(q->queue_lock, flags); 1205 spin_unlock_irqrestore(q->queue_lock, flags);
1206} 1206}
1207 1207
@@ -1504,12 +1504,12 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1504{ 1504{
1505 elv_rb_del(&cfqq->sort_list, rq); 1505 elv_rb_del(&cfqq->sort_list, rq);
1506 cfqq->queued[rq_is_sync(rq)]--; 1506 cfqq->queued[rq_is_sync(rq)]--;
1507 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1507 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1508 rq_data_dir(rq), rq_is_sync(rq)); 1508 rq_data_dir(rq), rq_is_sync(rq));
1509 cfq_add_rq_rb(rq); 1509 cfq_add_rq_rb(rq);
1510 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 1510 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1511 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq), 1511 cfqg_to_blkg(cfqq->cfqd->serving_group),
1512 rq_is_sync(rq)); 1512 rq_data_dir(rq), rq_is_sync(rq));
1513} 1513}
1514 1514
1515static struct request * 1515static struct request *
@@ -1565,7 +1565,7 @@ static void cfq_remove_request(struct request *rq)
1565 cfq_del_rq_rb(rq); 1565 cfq_del_rq_rb(rq);
1566 1566
1567 cfqq->cfqd->rq_queued--; 1567 cfqq->cfqd->rq_queued--;
1568 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1568 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1569 rq_data_dir(rq), rq_is_sync(rq)); 1569 rq_data_dir(rq), rq_is_sync(rq));
1570 if (rq->cmd_flags & REQ_PRIO) { 1570 if (rq->cmd_flags & REQ_PRIO) {
1571 WARN_ON(!cfqq->prio_pending); 1571 WARN_ON(!cfqq->prio_pending);
@@ -1601,7 +1601,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
1601static void cfq_bio_merged(struct request_queue *q, struct request *req, 1601static void cfq_bio_merged(struct request_queue *q, struct request *req,
1602 struct bio *bio) 1602 struct bio *bio)
1603{ 1603{
1604 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, 1604 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)),
1605 bio_data_dir(bio), cfq_bio_sync(bio)); 1605 bio_data_dir(bio), cfq_bio_sync(bio));
1606} 1606}
1607 1607
@@ -1624,7 +1624,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
1624 if (cfqq->next_rq == next) 1624 if (cfqq->next_rq == next)
1625 cfqq->next_rq = rq; 1625 cfqq->next_rq = rq;
1626 cfq_remove_request(next); 1626 cfq_remove_request(next);
1627 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, 1627 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)),
1628 rq_data_dir(next), rq_is_sync(next)); 1628 rq_data_dir(next), rq_is_sync(next));
1629 1629
1630 cfqq = RQ_CFQQ(next); 1630 cfqq = RQ_CFQQ(next);
@@ -1666,7 +1666,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1666static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1666static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1667{ 1667{
1668 del_timer(&cfqd->idle_slice_timer); 1668 del_timer(&cfqd->idle_slice_timer);
1669 cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg); 1669 cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg));
1670} 1670}
1671 1671
1672static void __cfq_set_active_queue(struct cfq_data *cfqd, 1672static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1675,7 +1675,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1675 if (cfqq) { 1675 if (cfqq) {
1676 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", 1676 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1677 cfqd->serving_prio, cfqd->serving_type); 1677 cfqd->serving_prio, cfqd->serving_type);
1678 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg); 1678 cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg));
1679 cfqq->slice_start = 0; 1679 cfqq->slice_start = 0;
1680 cfqq->dispatch_start = jiffies; 1680 cfqq->dispatch_start = jiffies;
1681 cfqq->allocated_slice = 0; 1681 cfqq->allocated_slice = 0;
@@ -2023,7 +2023,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2023 sl = cfqd->cfq_slice_idle; 2023 sl = cfqd->cfq_slice_idle;
2024 2024
2025 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 2025 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2026 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); 2026 cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg));
2027 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, 2027 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2028 group_idle ? 1 : 0); 2028 group_idle ? 1 : 0);
2029} 2029}
@@ -2046,8 +2046,9 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2046 2046
2047 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 2047 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2048 cfqq->nr_sectors += blk_rq_sectors(rq); 2048 cfqq->nr_sectors += blk_rq_sectors(rq);
2049 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), 2049 cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg),
2050 rq_data_dir(rq), rq_is_sync(rq)); 2050 blk_rq_bytes(rq), rq_data_dir(rq),
2051 rq_is_sync(rq));
2051} 2052}
2052 2053
2053/* 2054/*
@@ -3135,7 +3136,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3135 __blk_run_queue(cfqd->queue); 3136 __blk_run_queue(cfqd->queue);
3136 } else { 3137 } else {
3137 cfq_blkiocg_update_idle_time_stats( 3138 cfq_blkiocg_update_idle_time_stats(
3138 &cfqq->cfqg->blkg); 3139 cfqg_to_blkg(cfqq->cfqg));
3139 cfq_mark_cfqq_must_dispatch(cfqq); 3140 cfq_mark_cfqq_must_dispatch(cfqq);
3140 } 3141 }
3141 } 3142 }
@@ -3162,9 +3163,9 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
3162 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 3163 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3163 list_add_tail(&rq->queuelist, &cfqq->fifo); 3164 list_add_tail(&rq->queuelist, &cfqq->fifo);
3164 cfq_add_rq_rb(rq); 3165 cfq_add_rq_rb(rq);
3165 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, 3166 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
3166 &cfqd->serving_group->blkg, rq_data_dir(rq), 3167 cfqg_to_blkg(cfqd->serving_group),
3167 rq_is_sync(rq)); 3168 rq_data_dir(rq), rq_is_sync(rq));
3168 cfq_rq_enqueued(cfqd, cfqq, rq); 3169 cfq_rq_enqueued(cfqd, cfqq, rq);
3169} 3170}
3170 3171
@@ -3260,7 +3261,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3260 cfqd->rq_in_driver--; 3261 cfqd->rq_in_driver--;
3261 cfqq->dispatched--; 3262 cfqq->dispatched--;
3262 (RQ_CFQG(rq))->dispatched--; 3263 (RQ_CFQG(rq))->dispatched--;
3263 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, 3264 cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg),
3264 rq_start_time_ns(rq), rq_io_start_time_ns(rq), 3265 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3265 rq_data_dir(rq), rq_is_sync(rq)); 3266 rq_data_dir(rq), rq_is_sync(rq));
3266 3267
@@ -3641,7 +3642,7 @@ static int cfq_init_queue(struct request_queue *q)
3641 blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_PROP, 3642 blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_PROP,
3642 true); 3643 true);
3643 if (!IS_ERR(blkg)) 3644 if (!IS_ERR(blkg))
3644 cfqd->root_group = cfqg_of_blkg(blkg); 3645 cfqd->root_group = blkg_to_cfqg(blkg);
3645 3646
3646 spin_unlock_irq(q->queue_lock); 3647 spin_unlock_irq(q->queue_lock);
3647 rcu_read_unlock(); 3648 rcu_read_unlock();
@@ -3827,13 +3828,14 @@ static struct elevator_type iosched_cfq = {
3827#ifdef CONFIG_CFQ_GROUP_IOSCHED 3828#ifdef CONFIG_CFQ_GROUP_IOSCHED
3828static struct blkio_policy_type blkio_policy_cfq = { 3829static struct blkio_policy_type blkio_policy_cfq = {
3829 .ops = { 3830 .ops = {
3830 .blkio_alloc_group_fn = cfq_alloc_blkio_group, 3831 .blkio_init_group_fn = cfq_init_blkio_group,
3831 .blkio_link_group_fn = cfq_link_blkio_group, 3832 .blkio_link_group_fn = cfq_link_blkio_group,
3832 .blkio_unlink_group_fn = cfq_unlink_blkio_group, 3833 .blkio_unlink_group_fn = cfq_unlink_blkio_group,
3833 .blkio_clear_queue_fn = cfq_clear_queue, 3834 .blkio_clear_queue_fn = cfq_clear_queue,
3834 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, 3835 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3835 }, 3836 },
3836 .plid = BLKIO_POLICY_PROP, 3837 .plid = BLKIO_POLICY_PROP,
3838 .pdata_size = sizeof(struct cfq_group),
3837}; 3839};
3838#endif 3840#endif
3839 3841