aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c61
1 files changed, 20 insertions, 41 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index d6d59ad105b4..10f0d2fc0b23 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -25,8 +25,6 @@
25#define MAX_KEY_LEN 100 25#define MAX_KEY_LEN 100
26 26
27static DEFINE_MUTEX(blkcg_pol_mutex); 27static DEFINE_MUTEX(blkcg_pol_mutex);
28static DEFINE_MUTEX(all_q_mutex);
29static LIST_HEAD(all_q_list);
30 28
31struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; 29struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
32EXPORT_SYMBOL_GPL(blkio_root_cgroup); 30EXPORT_SYMBOL_GPL(blkio_root_cgroup);
@@ -179,9 +177,8 @@ struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
179} 177}
180EXPORT_SYMBOL_GPL(blkg_lookup); 178EXPORT_SYMBOL_GPL(blkg_lookup);
181 179
182struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, 180static struct blkio_group *__blkg_lookup_create(struct blkio_cgroup *blkcg,
183 struct request_queue *q, 181 struct request_queue *q)
184 bool for_root)
185 __releases(q->queue_lock) __acquires(q->queue_lock) 182 __releases(q->queue_lock) __acquires(q->queue_lock)
186{ 183{
187 struct blkio_group *blkg; 184 struct blkio_group *blkg;
@@ -189,13 +186,6 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
189 WARN_ON_ONCE(!rcu_read_lock_held()); 186 WARN_ON_ONCE(!rcu_read_lock_held());
190 lockdep_assert_held(q->queue_lock); 187 lockdep_assert_held(q->queue_lock);
191 188
192 /*
193 * This could be the first entry point of blkcg implementation and
194 * we shouldn't allow anything to go through for a bypassing queue.
195 */
196 if (unlikely(blk_queue_bypass(q)) && !for_root)
197 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
198
199 blkg = __blkg_lookup(blkcg, q); 189 blkg = __blkg_lookup(blkcg, q);
200 if (blkg) 190 if (blkg)
201 return blkg; 191 return blkg;
@@ -223,6 +213,18 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
223out: 213out:
224 return blkg; 214 return blkg;
225} 215}
216
217struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
218 struct request_queue *q)
219{
220 /*
221 * This could be the first entry point of blkcg implementation and
222 * we shouldn't allow anything to go through for a bypassing queue.
223 */
224 if (unlikely(blk_queue_bypass(q)))
225 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
226 return __blkg_lookup_create(blkcg, q);
227}
226EXPORT_SYMBOL_GPL(blkg_lookup_create); 228EXPORT_SYMBOL_GPL(blkg_lookup_create);
227 229
228static void blkg_destroy(struct blkio_group *blkg) 230static void blkg_destroy(struct blkio_group *blkg)
@@ -249,12 +251,10 @@ static void blkg_destroy(struct blkio_group *blkg)
249/** 251/**
250 * blkg_destroy_all - destroy all blkgs associated with a request_queue 252 * blkg_destroy_all - destroy all blkgs associated with a request_queue
251 * @q: request_queue of interest 253 * @q: request_queue of interest
252 * @destroy_root: whether to destroy root blkg or not
253 * 254 *
254 * Destroy blkgs associated with @q. If @destroy_root is %true, all are 255 * Destroy all blkgs associated with @q.
255 * destroyed; otherwise, root blkg is left alone.
256 */ 256 */
257void blkg_destroy_all(struct request_queue *q, bool destroy_root) 257static void blkg_destroy_all(struct request_queue *q)
258{ 258{
259 struct blkio_group *blkg, *n; 259 struct blkio_group *blkg, *n;
260 260
@@ -263,10 +263,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root)
263 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 263 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
264 struct blkio_cgroup *blkcg = blkg->blkcg; 264 struct blkio_cgroup *blkcg = blkg->blkcg;
265 265
266 /* skip root? */
267 if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
268 continue;
269
270 spin_lock(&blkcg->lock); 266 spin_lock(&blkcg->lock);
271 blkg_destroy(blkg); 267 blkg_destroy(blkg);
272 spin_unlock(&blkcg->lock); 268 spin_unlock(&blkcg->lock);
@@ -274,7 +270,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root)
274 270
275 spin_unlock_irq(q->queue_lock); 271 spin_unlock_irq(q->queue_lock);
276} 272}
277EXPORT_SYMBOL_GPL(blkg_destroy_all);
278 273
279static void blkg_rcu_free(struct rcu_head *rcu_head) 274static void blkg_rcu_free(struct rcu_head *rcu_head)
280{ 275{
@@ -492,7 +487,7 @@ int blkg_conf_prep(struct blkio_cgroup *blkcg,
492 spin_lock_irq(disk->queue->queue_lock); 487 spin_lock_irq(disk->queue->queue_lock);
493 488
494 if (blkcg_policy_enabled(disk->queue, pol)) 489 if (blkcg_policy_enabled(disk->queue, pol))
495 blkg = blkg_lookup_create(blkcg, disk->queue, false); 490 blkg = blkg_lookup_create(blkcg, disk->queue);
496 else 491 else
497 blkg = ERR_PTR(-EINVAL); 492 blkg = ERR_PTR(-EINVAL);
498 493
@@ -625,20 +620,9 @@ done:
625 */ 620 */
626int blkcg_init_queue(struct request_queue *q) 621int blkcg_init_queue(struct request_queue *q)
627{ 622{
628 int ret;
629
630 might_sleep(); 623 might_sleep();
631 624
632 ret = blk_throtl_init(q); 625 return blk_throtl_init(q);
633 if (ret)
634 return ret;
635
636 mutex_lock(&all_q_mutex);
637 INIT_LIST_HEAD(&q->all_q_node);
638 list_add_tail(&q->all_q_node, &all_q_list);
639 mutex_unlock(&all_q_mutex);
640
641 return 0;
642} 626}
643 627
644/** 628/**
@@ -662,12 +646,7 @@ void blkcg_drain_queue(struct request_queue *q)
662 */ 646 */
663void blkcg_exit_queue(struct request_queue *q) 647void blkcg_exit_queue(struct request_queue *q)
664{ 648{
665 mutex_lock(&all_q_mutex); 649 blkg_destroy_all(q);
666 list_del_init(&q->all_q_node);
667 mutex_unlock(&all_q_mutex);
668
669 blkg_destroy_all(q, true);
670
671 blk_throtl_exit(q); 650 blk_throtl_exit(q);
672} 651}
673 652
@@ -741,7 +720,7 @@ int blkcg_activate_policy(struct request_queue *q,
741 spin_lock_irq(q->queue_lock); 720 spin_lock_irq(q->queue_lock);
742 721
743 rcu_read_lock(); 722 rcu_read_lock();
744 blkg = blkg_lookup_create(&blkio_root_cgroup, q, true); 723 blkg = __blkg_lookup_create(&blkio_root_cgroup, q);
745 rcu_read_unlock(); 724 rcu_read_unlock();
746 725
747 if (IS_ERR(blkg)) { 726 if (IS_ERR(blkg)) {