aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c32
-rw-r--r--block/blk-cgroup.h22
-rw-r--r--block/blk-throttle.c50
-rw-r--r--block/cfq-iosched.c30
-rw-r--r--block/cfq.h7
5 files changed, 70 insertions, 71 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 159aef59589f..5e50ca1f5b47 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -129,7 +129,7 @@ blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
129 if (blkiop->plid != blkg->plid) 129 if (blkiop->plid != blkg->plid)
130 continue; 130 continue;
131 if (blkiop->ops.blkio_update_group_weight_fn) 131 if (blkiop->ops.blkio_update_group_weight_fn)
132 blkiop->ops.blkio_update_group_weight_fn(blkg->key, 132 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
133 blkg, weight); 133 blkg, weight);
134 } 134 }
135} 135}
@@ -147,12 +147,12 @@ static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
147 147
148 if (fileid == BLKIO_THROTL_read_bps_device 148 if (fileid == BLKIO_THROTL_read_bps_device
149 && blkiop->ops.blkio_update_group_read_bps_fn) 149 && blkiop->ops.blkio_update_group_read_bps_fn)
150 blkiop->ops.blkio_update_group_read_bps_fn(blkg->key, 150 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
151 blkg, bps); 151 blkg, bps);
152 152
153 if (fileid == BLKIO_THROTL_write_bps_device 153 if (fileid == BLKIO_THROTL_write_bps_device
154 && blkiop->ops.blkio_update_group_write_bps_fn) 154 && blkiop->ops.blkio_update_group_write_bps_fn)
155 blkiop->ops.blkio_update_group_write_bps_fn(blkg->key, 155 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
156 blkg, bps); 156 blkg, bps);
157 } 157 }
158} 158}
@@ -170,12 +170,12 @@ static inline void blkio_update_group_iops(struct blkio_group *blkg,
170 170
171 if (fileid == BLKIO_THROTL_read_iops_device 171 if (fileid == BLKIO_THROTL_read_iops_device
172 && blkiop->ops.blkio_update_group_read_iops_fn) 172 && blkiop->ops.blkio_update_group_read_iops_fn)
173 blkiop->ops.blkio_update_group_read_iops_fn(blkg->key, 173 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
174 blkg, iops); 174 blkg, iops);
175 175
176 if (fileid == BLKIO_THROTL_write_iops_device 176 if (fileid == BLKIO_THROTL_write_iops_device
177 && blkiop->ops.blkio_update_group_write_iops_fn) 177 && blkiop->ops.blkio_update_group_write_iops_fn)
178 blkiop->ops.blkio_update_group_write_iops_fn(blkg->key, 178 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
179 blkg,iops); 179 blkg,iops);
180 } 180 }
181} 181}
@@ -478,14 +478,14 @@ int blkio_alloc_blkg_stats(struct blkio_group *blkg)
478EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats); 478EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
479 479
480void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 480void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
481 struct blkio_group *blkg, void *key, dev_t dev, 481 struct blkio_group *blkg, struct request_queue *q, dev_t dev,
482 enum blkio_policy_id plid) 482 enum blkio_policy_id plid)
483{ 483{
484 unsigned long flags; 484 unsigned long flags;
485 485
486 spin_lock_irqsave(&blkcg->lock, flags); 486 spin_lock_irqsave(&blkcg->lock, flags);
487 spin_lock_init(&blkg->stats_lock); 487 spin_lock_init(&blkg->stats_lock);
488 rcu_assign_pointer(blkg->key, key); 488 rcu_assign_pointer(blkg->q, q);
489 blkg->blkcg_id = css_id(&blkcg->css); 489 blkg->blkcg_id = css_id(&blkcg->css);
490 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 490 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
491 blkg->plid = plid; 491 blkg->plid = plid;
@@ -531,18 +531,16 @@ int blkiocg_del_blkio_group(struct blkio_group *blkg)
531EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group); 531EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
532 532
533/* called under rcu_read_lock(). */ 533/* called under rcu_read_lock(). */
534struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) 534struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
535 struct request_queue *q,
536 enum blkio_policy_id plid)
535{ 537{
536 struct blkio_group *blkg; 538 struct blkio_group *blkg;
537 struct hlist_node *n; 539 struct hlist_node *n;
538 void *__key;
539 540
540 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { 541 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
541 __key = blkg->key; 542 if (blkg->q == q && blkg->plid == plid)
542 if (__key == key)
543 return blkg; 543 return blkg;
544 }
545
546 return NULL; 544 return NULL;
547} 545}
548EXPORT_SYMBOL_GPL(blkiocg_lookup_group); 546EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
@@ -1582,7 +1580,7 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1582 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 1580 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1583 unsigned long flags; 1581 unsigned long flags;
1584 struct blkio_group *blkg; 1582 struct blkio_group *blkg;
1585 void *key; 1583 struct request_queue *q;
1586 struct blkio_policy_type *blkiop; 1584 struct blkio_policy_type *blkiop;
1587 struct blkio_policy_node *pn, *pntmp; 1585 struct blkio_policy_node *pn, *pntmp;
1588 1586
@@ -1597,7 +1595,7 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1597 1595
1598 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, 1596 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1599 blkcg_node); 1597 blkcg_node);
1600 key = rcu_dereference(blkg->key); 1598 q = rcu_dereference(blkg->q);
1601 __blkiocg_del_blkio_group(blkg); 1599 __blkiocg_del_blkio_group(blkg);
1602 1600
1603 spin_unlock_irqrestore(&blkcg->lock, flags); 1601 spin_unlock_irqrestore(&blkcg->lock, flags);
@@ -1611,7 +1609,7 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1611 list_for_each_entry(blkiop, &blkio_list, list) { 1609 list_for_each_entry(blkiop, &blkio_list, list) {
1612 if (blkiop->plid != blkg->plid) 1610 if (blkiop->plid != blkg->plid)
1613 continue; 1611 continue;
1614 blkiop->ops.blkio_unlink_group_fn(key, blkg); 1612 blkiop->ops.blkio_unlink_group_fn(q, blkg);
1615 } 1613 }
1616 spin_unlock(&blkio_list_lock); 1614 spin_unlock(&blkio_list_lock);
1617 } while (1); 1615 } while (1);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index e5cfcbd4d2f4..41c960b99c57 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -153,8 +153,8 @@ struct blkio_group_stats_cpu {
153}; 153};
154 154
155struct blkio_group { 155struct blkio_group {
156 /* An rcu protected unique identifier for the group */ 156 /* Pointer to the associated request_queue, RCU protected */
157 void *key; 157 struct request_queue __rcu *q;
158 struct hlist_node blkcg_node; 158 struct hlist_node blkcg_node;
159 unsigned short blkcg_id; 159 unsigned short blkcg_id;
160 /* Store cgroup path */ 160 /* Store cgroup path */
@@ -202,17 +202,18 @@ extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg,
202extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, 202extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
203 dev_t dev); 203 dev_t dev);
204 204
205typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg); 205typedef void (blkio_unlink_group_fn)(struct request_queue *q,
206 struct blkio_group *blkg);
206typedef bool (blkio_clear_queue_fn)(struct request_queue *q); 207typedef bool (blkio_clear_queue_fn)(struct request_queue *q);
207typedef void (blkio_update_group_weight_fn) (void *key, 208typedef void (blkio_update_group_weight_fn)(struct request_queue *q,
208 struct blkio_group *blkg, unsigned int weight); 209 struct blkio_group *blkg, unsigned int weight);
209typedef void (blkio_update_group_read_bps_fn) (void * key, 210typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q,
210 struct blkio_group *blkg, u64 read_bps); 211 struct blkio_group *blkg, u64 read_bps);
211typedef void (blkio_update_group_write_bps_fn) (void *key, 212typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q,
212 struct blkio_group *blkg, u64 write_bps); 213 struct blkio_group *blkg, u64 write_bps);
213typedef void (blkio_update_group_read_iops_fn) (void *key, 214typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q,
214 struct blkio_group *blkg, unsigned int read_iops); 215 struct blkio_group *blkg, unsigned int read_iops);
215typedef void (blkio_update_group_write_iops_fn) (void *key, 216typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q,
216 struct blkio_group *blkg, unsigned int write_iops); 217 struct blkio_group *blkg, unsigned int write_iops);
217 218
218struct blkio_policy_ops { 219struct blkio_policy_ops {
@@ -305,12 +306,13 @@ extern struct blkio_cgroup blkio_root_cgroup;
305extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); 306extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
306extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk); 307extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
307extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 308extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
308 struct blkio_group *blkg, void *key, dev_t dev, 309 struct blkio_group *blkg, struct request_queue *q, dev_t dev,
309 enum blkio_policy_id plid); 310 enum blkio_policy_id plid);
310extern int blkio_alloc_blkg_stats(struct blkio_group *blkg); 311extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
311extern int blkiocg_del_blkio_group(struct blkio_group *blkg); 312extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
312extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, 313extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
313 void *key); 314 struct request_queue *q,
315 enum blkio_policy_id plid);
314void blkiocg_update_timeslice_used(struct blkio_group *blkg, 316void blkiocg_update_timeslice_used(struct blkio_group *blkg,
315 unsigned long time, 317 unsigned long time,
316 unsigned long unaccounted_time); 318 unsigned long unaccounted_time);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index c252df9169db..6613de78e364 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -252,7 +252,7 @@ static void throtl_init_add_tg_lists(struct throtl_data *td,
252 __throtl_tg_fill_dev_details(td, tg); 252 __throtl_tg_fill_dev_details(td, tg);
253 253
254 /* Add group onto cgroup list */ 254 /* Add group onto cgroup list */
255 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td, 255 blkiocg_add_blkio_group(blkcg, &tg->blkg, td->queue,
256 tg->blkg.dev, BLKIO_POLICY_THROTL); 256 tg->blkg.dev, BLKIO_POLICY_THROTL);
257 257
258 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev); 258 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
@@ -288,7 +288,6 @@ static struct
288throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) 288throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
289{ 289{
290 struct throtl_grp *tg = NULL; 290 struct throtl_grp *tg = NULL;
291 void *key = td;
292 291
293 /* 292 /*
294 * This is the common case when there are no blkio cgroups. 293 * This is the common case when there are no blkio cgroups.
@@ -297,7 +296,8 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
297 if (blkcg == &blkio_root_cgroup) 296 if (blkcg == &blkio_root_cgroup)
298 tg = td->root_tg; 297 tg = td->root_tg;
299 else 298 else
300 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); 299 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, td->queue,
300 BLKIO_POLICY_THROTL));
301 301
302 __throtl_tg_fill_dev_details(td, tg); 302 __throtl_tg_fill_dev_details(td, tg);
303 return tg; 303 return tg;
@@ -1012,22 +1012,22 @@ static bool throtl_release_tgs(struct throtl_data *td, bool release_root)
1012 * no new IO will come in this group. So get rid of this group as soon as 1012 * no new IO will come in this group. So get rid of this group as soon as
1013 * any pending IO in the group is finished. 1013 * any pending IO in the group is finished.
1014 * 1014 *
1015 * This function is called under rcu_read_lock(). key is the rcu protected 1015 * This function is called under rcu_read_lock(). @q is the rcu protected
1016 * pointer. That means "key" is a valid throtl_data pointer as long as we are 1016 * pointer. That means @q is a valid request_queue pointer as long as we
1017 * rcu read lock. 1017 * are rcu read lock.
1018 * 1018 *
1019 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means 1019 * @q was fetched from blkio_group under blkio_cgroup->lock. That means
1020 * it should not be NULL as even if queue was going away, cgroup deltion 1020 * it should not be NULL as even if queue was going away, cgroup deltion
1021 * path got to it first. 1021 * path got to it first.
1022 */ 1022 */
1023void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg) 1023void throtl_unlink_blkio_group(struct request_queue *q,
1024 struct blkio_group *blkg)
1024{ 1025{
1025 unsigned long flags; 1026 unsigned long flags;
1026 struct throtl_data *td = key;
1027 1027
1028 spin_lock_irqsave(td->queue->queue_lock, flags); 1028 spin_lock_irqsave(q->queue_lock, flags);
1029 throtl_destroy_tg(td, tg_of_blkg(blkg)); 1029 throtl_destroy_tg(q->td, tg_of_blkg(blkg));
1030 spin_unlock_irqrestore(td->queue->queue_lock, flags); 1030 spin_unlock_irqrestore(q->queue_lock, flags);
1031} 1031}
1032 1032
1033static bool throtl_clear_queue(struct request_queue *q) 1033static bool throtl_clear_queue(struct request_queue *q)
@@ -1054,52 +1054,48 @@ static void throtl_update_blkio_group_common(struct throtl_data *td,
1054} 1054}
1055 1055
1056/* 1056/*
1057 * For all update functions, key should be a valid pointer because these 1057 * For all update functions, @q should be a valid pointer because these
1058 * update functions are called under blkcg_lock, that means, blkg is 1058 * update functions are called under blkcg_lock, that means, blkg is
1059 * valid and in turn key is valid. queue exit path can not race because 1059 * valid and in turn @q is valid. queue exit path can not race because
1060 * of blkcg_lock 1060 * of blkcg_lock
1061 * 1061 *
1062 * Can not take queue lock in update functions as queue lock under blkcg_lock 1062 * Can not take queue lock in update functions as queue lock under blkcg_lock
1063 * is not allowed. Under other paths we take blkcg_lock under queue_lock. 1063 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1064 */ 1064 */
1065static void throtl_update_blkio_group_read_bps(void *key, 1065static void throtl_update_blkio_group_read_bps(struct request_queue *q,
1066 struct blkio_group *blkg, u64 read_bps) 1066 struct blkio_group *blkg, u64 read_bps)
1067{ 1067{
1068 struct throtl_data *td = key;
1069 struct throtl_grp *tg = tg_of_blkg(blkg); 1068 struct throtl_grp *tg = tg_of_blkg(blkg);
1070 1069
1071 tg->bps[READ] = read_bps; 1070 tg->bps[READ] = read_bps;
1072 throtl_update_blkio_group_common(td, tg); 1071 throtl_update_blkio_group_common(q->td, tg);
1073} 1072}
1074 1073
1075static void throtl_update_blkio_group_write_bps(void *key, 1074static void throtl_update_blkio_group_write_bps(struct request_queue *q,
1076 struct blkio_group *blkg, u64 write_bps) 1075 struct blkio_group *blkg, u64 write_bps)
1077{ 1076{
1078 struct throtl_data *td = key;
1079 struct throtl_grp *tg = tg_of_blkg(blkg); 1077 struct throtl_grp *tg = tg_of_blkg(blkg);
1080 1078
1081 tg->bps[WRITE] = write_bps; 1079 tg->bps[WRITE] = write_bps;
1082 throtl_update_blkio_group_common(td, tg); 1080 throtl_update_blkio_group_common(q->td, tg);
1083} 1081}
1084 1082
1085static void throtl_update_blkio_group_read_iops(void *key, 1083static void throtl_update_blkio_group_read_iops(struct request_queue *q,
1086 struct blkio_group *blkg, unsigned int read_iops) 1084 struct blkio_group *blkg, unsigned int read_iops)
1087{ 1085{
1088 struct throtl_data *td = key;
1089 struct throtl_grp *tg = tg_of_blkg(blkg); 1086 struct throtl_grp *tg = tg_of_blkg(blkg);
1090 1087
1091 tg->iops[READ] = read_iops; 1088 tg->iops[READ] = read_iops;
1092 throtl_update_blkio_group_common(td, tg); 1089 throtl_update_blkio_group_common(q->td, tg);
1093} 1090}
1094 1091
1095static void throtl_update_blkio_group_write_iops(void *key, 1092static void throtl_update_blkio_group_write_iops(struct request_queue *q,
1096 struct blkio_group *blkg, unsigned int write_iops) 1093 struct blkio_group *blkg, unsigned int write_iops)
1097{ 1094{
1098 struct throtl_data *td = key;
1099 struct throtl_grp *tg = tg_of_blkg(blkg); 1095 struct throtl_grp *tg = tg_of_blkg(blkg);
1100 1096
1101 tg->iops[WRITE] = write_iops; 1097 tg->iops[WRITE] = write_iops;
1102 throtl_update_blkio_group_common(td, tg); 1098 throtl_update_blkio_group_common(q->td, tg);
1103} 1099}
1104 1100
1105static void throtl_shutdown_wq(struct request_queue *q) 1101static void throtl_shutdown_wq(struct request_queue *q)
@@ -1306,7 +1302,7 @@ void blk_throtl_exit(struct request_queue *q)
1306 spin_unlock_irq(q->queue_lock); 1302 spin_unlock_irq(q->queue_lock);
1307 1303
1308 /* 1304 /*
1309 * Wait for tg->blkg->key accessors to exit their grace periods. 1305 * Wait for tg->blkg->q accessors to exit their grace periods.
1310 * Do this wait only if there are other undestroyed groups out 1306 * Do this wait only if there are other undestroyed groups out
1311 * there (other than root group). This can happen if cgroup deletion 1307 * there (other than root group). This can happen if cgroup deletion
1312 * path claimed the responsibility of cleaning up a group before 1308 * path claimed the responsibility of cleaning up a group before
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 0f7a81fc7c73..37e2da9cbb09 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1020,7 +1020,8 @@ static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
1020 return NULL; 1020 return NULL;
1021} 1021}
1022 1022
1023static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, 1023static void cfq_update_blkio_group_weight(struct request_queue *q,
1024 struct blkio_group *blkg,
1024 unsigned int weight) 1025 unsigned int weight)
1025{ 1026{
1026 struct cfq_group *cfqg = cfqg_of_blkg(blkg); 1027 struct cfq_group *cfqg = cfqg_of_blkg(blkg);
@@ -1043,10 +1044,10 @@ static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
1043 if (bdi->dev) { 1044 if (bdi->dev) {
1044 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 1045 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1045 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, 1046 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1046 (void *)cfqd, MKDEV(major, minor)); 1047 cfqd->queue, MKDEV(major, minor));
1047 } else 1048 } else
1048 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, 1049 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1049 (void *)cfqd, 0); 1050 cfqd->queue, 0);
1050 1051
1051 cfqd->nr_blkcg_linked_grps++; 1052 cfqd->nr_blkcg_linked_grps++;
1052 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); 1053 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
@@ -1097,7 +1098,6 @@ static struct cfq_group *
1097cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg) 1098cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
1098{ 1099{
1099 struct cfq_group *cfqg = NULL; 1100 struct cfq_group *cfqg = NULL;
1100 void *key = cfqd;
1101 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; 1101 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1102 unsigned int major, minor; 1102 unsigned int major, minor;
1103 1103
@@ -1108,7 +1108,8 @@ cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
1108 if (blkcg == &blkio_root_cgroup) 1108 if (blkcg == &blkio_root_cgroup)
1109 cfqg = &cfqd->root_group; 1109 cfqg = &cfqd->root_group;
1110 else 1110 else
1111 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); 1111 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, cfqd->queue,
1112 BLKIO_POLICY_PROP));
1112 1113
1113 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { 1114 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1114 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 1115 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
@@ -1247,21 +1248,22 @@ static bool cfq_release_cfq_groups(struct cfq_data *cfqd)
1247 * any pending IO in the group is finished. 1248 * any pending IO in the group is finished.
1248 * 1249 *
1249 * This function is called under rcu_read_lock(). key is the rcu protected 1250 * This function is called under rcu_read_lock(). key is the rcu protected
1250 * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu 1251 * pointer. That means @q is a valid request_queue pointer as long as we
1251 * read lock. 1252 * are rcu read lock.
1252 * 1253 *
1253 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means 1254 * @q was fetched from blkio_group under blkio_cgroup->lock. That means
1254 * it should not be NULL as even if elevator was exiting, cgroup deltion 1255 * it should not be NULL as even if elevator was exiting, cgroup deltion
1255 * path got to it first. 1256 * path got to it first.
1256 */ 1257 */
1257static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg) 1258static void cfq_unlink_blkio_group(struct request_queue *q,
1259 struct blkio_group *blkg)
1258{ 1260{
1259 unsigned long flags; 1261 struct cfq_data *cfqd = q->elevator->elevator_data;
1260 struct cfq_data *cfqd = key; 1262 unsigned long flags;
1261 1263
1262 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1264 spin_lock_irqsave(q->queue_lock, flags);
1263 cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg)); 1265 cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1264 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1266 spin_unlock_irqrestore(q->queue_lock, flags);
1265} 1267}
1266 1268
1267static struct elevator_type iosched_cfq; 1269static struct elevator_type iosched_cfq;
@@ -3718,7 +3720,7 @@ static int cfq_init_queue(struct request_queue *q)
3718 rcu_read_lock(); 3720 rcu_read_lock();
3719 3721
3720 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, 3722 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3721 (void *)cfqd, 0); 3723 cfqd->queue, 0);
3722 rcu_read_unlock(); 3724 rcu_read_unlock();
3723 cfqd->nr_blkcg_linked_grps++; 3725 cfqd->nr_blkcg_linked_grps++;
3724 3726
diff --git a/block/cfq.h b/block/cfq.h
index 2a155927e37c..343b78a61df3 100644
--- a/block/cfq.h
+++ b/block/cfq.h
@@ -68,8 +68,9 @@ static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
68} 68}
69 69
70static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 70static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
71 struct blkio_group *blkg, void *key, dev_t dev) { 71 struct blkio_group *blkg, struct request_queue *q, dev_t dev)
72 blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP); 72{
73 blkiocg_add_blkio_group(blkcg, blkg, q, dev, BLKIO_POLICY_PROP);
73} 74}
74 75
75static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg) 76static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
@@ -105,7 +106,7 @@ static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
105static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {} 106static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
106 107
107static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 108static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
108 struct blkio_group *blkg, void *key, dev_t dev) {} 109 struct blkio_group *blkg, struct request_queue *q, dev_t dev) {}
109static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg) 110static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
110{ 111{
111 return 0; 112 return 0;