aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c61
-rw-r--r--block/blk-cgroup.h6
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/cfq-iosched.c2
4 files changed, 23 insertions, 48 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index d6d59ad105b..10f0d2fc0b2 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -25,8 +25,6 @@
25#define MAX_KEY_LEN 100 25#define MAX_KEY_LEN 100
26 26
27static DEFINE_MUTEX(blkcg_pol_mutex); 27static DEFINE_MUTEX(blkcg_pol_mutex);
28static DEFINE_MUTEX(all_q_mutex);
29static LIST_HEAD(all_q_list);
30 28
31struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; 29struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
32EXPORT_SYMBOL_GPL(blkio_root_cgroup); 30EXPORT_SYMBOL_GPL(blkio_root_cgroup);
@@ -179,9 +177,8 @@ struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
179} 177}
180EXPORT_SYMBOL_GPL(blkg_lookup); 178EXPORT_SYMBOL_GPL(blkg_lookup);
181 179
182struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, 180static struct blkio_group *__blkg_lookup_create(struct blkio_cgroup *blkcg,
183 struct request_queue *q, 181 struct request_queue *q)
184 bool for_root)
185 __releases(q->queue_lock) __acquires(q->queue_lock) 182 __releases(q->queue_lock) __acquires(q->queue_lock)
186{ 183{
187 struct blkio_group *blkg; 184 struct blkio_group *blkg;
@@ -189,13 +186,6 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
189 WARN_ON_ONCE(!rcu_read_lock_held()); 186 WARN_ON_ONCE(!rcu_read_lock_held());
190 lockdep_assert_held(q->queue_lock); 187 lockdep_assert_held(q->queue_lock);
191 188
192 /*
193 * This could be the first entry point of blkcg implementation and
194 * we shouldn't allow anything to go through for a bypassing queue.
195 */
196 if (unlikely(blk_queue_bypass(q)) && !for_root)
197 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
198
199 blkg = __blkg_lookup(blkcg, q); 189 blkg = __blkg_lookup(blkcg, q);
200 if (blkg) 190 if (blkg)
201 return blkg; 191 return blkg;
@@ -223,6 +213,18 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
223out: 213out:
224 return blkg; 214 return blkg;
225} 215}
216
217struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
218 struct request_queue *q)
219{
220 /*
221 * This could be the first entry point of blkcg implementation and
222 * we shouldn't allow anything to go through for a bypassing queue.
223 */
224 if (unlikely(blk_queue_bypass(q)))
225 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
226 return __blkg_lookup_create(blkcg, q);
227}
226EXPORT_SYMBOL_GPL(blkg_lookup_create); 228EXPORT_SYMBOL_GPL(blkg_lookup_create);
227 229
228static void blkg_destroy(struct blkio_group *blkg) 230static void blkg_destroy(struct blkio_group *blkg)
@@ -249,12 +251,10 @@ static void blkg_destroy(struct blkio_group *blkg)
249/** 251/**
250 * blkg_destroy_all - destroy all blkgs associated with a request_queue 252 * blkg_destroy_all - destroy all blkgs associated with a request_queue
251 * @q: request_queue of interest 253 * @q: request_queue of interest
252 * @destroy_root: whether to destroy root blkg or not
253 * 254 *
254 * Destroy blkgs associated with @q. If @destroy_root is %true, all are 255 * Destroy all blkgs associated with @q.
255 * destroyed; otherwise, root blkg is left alone.
256 */ 256 */
257void blkg_destroy_all(struct request_queue *q, bool destroy_root) 257static void blkg_destroy_all(struct request_queue *q)
258{ 258{
259 struct blkio_group *blkg, *n; 259 struct blkio_group *blkg, *n;
260 260
@@ -263,10 +263,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root)
263 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 263 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
264 struct blkio_cgroup *blkcg = blkg->blkcg; 264 struct blkio_cgroup *blkcg = blkg->blkcg;
265 265
266 /* skip root? */
267 if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
268 continue;
269
270 spin_lock(&blkcg->lock); 266 spin_lock(&blkcg->lock);
271 blkg_destroy(blkg); 267 blkg_destroy(blkg);
272 spin_unlock(&blkcg->lock); 268 spin_unlock(&blkcg->lock);
@@ -274,7 +270,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root)
274 270
275 spin_unlock_irq(q->queue_lock); 271 spin_unlock_irq(q->queue_lock);
276} 272}
277EXPORT_SYMBOL_GPL(blkg_destroy_all);
278 273
279static void blkg_rcu_free(struct rcu_head *rcu_head) 274static void blkg_rcu_free(struct rcu_head *rcu_head)
280{ 275{
@@ -492,7 +487,7 @@ int blkg_conf_prep(struct blkio_cgroup *blkcg,
492 spin_lock_irq(disk->queue->queue_lock); 487 spin_lock_irq(disk->queue->queue_lock);
493 488
494 if (blkcg_policy_enabled(disk->queue, pol)) 489 if (blkcg_policy_enabled(disk->queue, pol))
495 blkg = blkg_lookup_create(blkcg, disk->queue, false); 490 blkg = blkg_lookup_create(blkcg, disk->queue);
496 else 491 else
497 blkg = ERR_PTR(-EINVAL); 492 blkg = ERR_PTR(-EINVAL);
498 493
@@ -625,20 +620,9 @@ done:
625 */ 620 */
626int blkcg_init_queue(struct request_queue *q) 621int blkcg_init_queue(struct request_queue *q)
627{ 622{
628 int ret;
629
630 might_sleep(); 623 might_sleep();
631 624
632 ret = blk_throtl_init(q); 625 return blk_throtl_init(q);
633 if (ret)
634 return ret;
635
636 mutex_lock(&all_q_mutex);
637 INIT_LIST_HEAD(&q->all_q_node);
638 list_add_tail(&q->all_q_node, &all_q_list);
639 mutex_unlock(&all_q_mutex);
640
641 return 0;
642} 626}
643 627
644/** 628/**
@@ -662,12 +646,7 @@ void blkcg_drain_queue(struct request_queue *q)
662 */ 646 */
663void blkcg_exit_queue(struct request_queue *q) 647void blkcg_exit_queue(struct request_queue *q)
664{ 648{
665 mutex_lock(&all_q_mutex); 649 blkg_destroy_all(q);
666 list_del_init(&q->all_q_node);
667 mutex_unlock(&all_q_mutex);
668
669 blkg_destroy_all(q, true);
670
671 blk_throtl_exit(q); 650 blk_throtl_exit(q);
672} 651}
673 652
@@ -741,7 +720,7 @@ int blkcg_activate_policy(struct request_queue *q,
741 spin_lock_irq(q->queue_lock); 720 spin_lock_irq(q->queue_lock);
742 721
743 rcu_read_lock(); 722 rcu_read_lock();
744 blkg = blkg_lookup_create(&blkio_root_cgroup, q, true); 723 blkg = __blkg_lookup_create(&blkio_root_cgroup, q);
745 rcu_read_unlock(); 724 rcu_read_unlock();
746 725
747 if (IS_ERR(blkg)) { 726 if (IS_ERR(blkg)) {
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 66253a7c8ff..222063d3635 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -115,7 +115,6 @@ extern int blkcg_activate_policy(struct request_queue *q,
115 const struct blkio_policy_type *pol); 115 const struct blkio_policy_type *pol);
116extern void blkcg_deactivate_policy(struct request_queue *q, 116extern void blkcg_deactivate_policy(struct request_queue *q,
117 const struct blkio_policy_type *pol); 117 const struct blkio_policy_type *pol);
118extern void blkg_destroy_all(struct request_queue *q, bool destroy_root);
119 118
120void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, 119void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
121 u64 (*prfill)(struct seq_file *, void *, int), 120 u64 (*prfill)(struct seq_file *, void *, int),
@@ -334,8 +333,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
334 const struct blkio_policy_type *pol) { return 0; } 333 const struct blkio_policy_type *pol) { return 0; }
335static inline void blkcg_deactivate_policy(struct request_queue *q, 334static inline void blkcg_deactivate_policy(struct request_queue *q,
336 const struct blkio_policy_type *pol) { } 335 const struct blkio_policy_type *pol) { }
337static inline void blkg_destroy_all(struct request_queue *q,
338 bool destory_root) { }
339 336
340static inline void *blkg_to_pdata(struct blkio_group *blkg, 337static inline void *blkg_to_pdata(struct blkio_group *blkg,
341 struct blkio_policy_type *pol) { return NULL; } 338 struct blkio_policy_type *pol) { return NULL; }
@@ -354,8 +351,7 @@ extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio);
354extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, 351extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
355 struct request_queue *q); 352 struct request_queue *q);
356struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, 353struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
357 struct request_queue *q, 354 struct request_queue *q);
358 bool for_root);
359#else 355#else
360struct cgroup; 356struct cgroup;
361static inline struct blkio_cgroup * 357static inline struct blkio_cgroup *
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 2fc964e06ea..e2aaf27e1f1 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -285,7 +285,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
285 } else { 285 } else {
286 struct blkio_group *blkg; 286 struct blkio_group *blkg;
287 287
288 blkg = blkg_lookup_create(blkcg, q, false); 288 blkg = blkg_lookup_create(blkcg, q);
289 289
290 /* if %NULL and @q is alive, fall back to root_tg */ 290 /* if %NULL and @q is alive, fall back to root_tg */
291 if (!IS_ERR(blkg)) 291 if (!IS_ERR(blkg))
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 0203652e1f3..eb07eb64e85 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1348,7 +1348,7 @@ static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1348 } else { 1348 } else {
1349 struct blkio_group *blkg; 1349 struct blkio_group *blkg;
1350 1350
1351 blkg = blkg_lookup_create(blkcg, q, false); 1351 blkg = blkg_lookup_create(blkcg, q);
1352 if (!IS_ERR(blkg)) 1352 if (!IS_ERR(blkg))
1353 cfqg = blkg_to_cfqg(blkg); 1353 cfqg = blkg_to_cfqg(blkg);
1354 } 1354 }