diff options
author | Tejun Heo <tj@kernel.org> | 2012-04-13 16:11:34 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-04-20 04:06:06 -0400 |
commit | 3c96cb32d318f323c1bf972a4c66821f8499e34d (patch) | |
tree | e76d0437f487405d8a22a727b8085696b949179d /block | |
parent | a2b1693bac45ea3fe3ba612fd22c45f17449f610 (diff) |
blkcg: drop stuff unused after per-queue policy activation update
* All_q_list is unused. Drop all_q_{mutex|list}.
* @for_root of blkg_lookup_create() is always %false when called from
outside blk-cgroup.c proper. Factor out __blkg_lookup_create() so
that it doesn't check whether @q is bypassing and use the
underscored version for the @for_root callsite.
* blkg_destroy_all() is used only from blkcg proper and @destroy_root
is always %true. Make it static and drop @destroy_root.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 61 | ||||
-rw-r--r-- | block/blk-cgroup.h | 6 | ||||
-rw-r--r-- | block/blk-throttle.c | 2 | ||||
-rw-r--r-- | block/cfq-iosched.c | 2 |
4 files changed, 23 insertions, 48 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d6d59ad105b4..10f0d2fc0b23 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -25,8 +25,6 @@ | |||
25 | #define MAX_KEY_LEN 100 | 25 | #define MAX_KEY_LEN 100 |
26 | 26 | ||
27 | static DEFINE_MUTEX(blkcg_pol_mutex); | 27 | static DEFINE_MUTEX(blkcg_pol_mutex); |
28 | static DEFINE_MUTEX(all_q_mutex); | ||
29 | static LIST_HEAD(all_q_list); | ||
30 | 28 | ||
31 | struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; | 29 | struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; |
32 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); | 30 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); |
@@ -179,9 +177,8 @@ struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, | |||
179 | } | 177 | } |
180 | EXPORT_SYMBOL_GPL(blkg_lookup); | 178 | EXPORT_SYMBOL_GPL(blkg_lookup); |
181 | 179 | ||
182 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | 180 | static struct blkio_group *__blkg_lookup_create(struct blkio_cgroup *blkcg, |
183 | struct request_queue *q, | 181 | struct request_queue *q) |
184 | bool for_root) | ||
185 | __releases(q->queue_lock) __acquires(q->queue_lock) | 182 | __releases(q->queue_lock) __acquires(q->queue_lock) |
186 | { | 183 | { |
187 | struct blkio_group *blkg; | 184 | struct blkio_group *blkg; |
@@ -189,13 +186,6 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | |||
189 | WARN_ON_ONCE(!rcu_read_lock_held()); | 186 | WARN_ON_ONCE(!rcu_read_lock_held()); |
190 | lockdep_assert_held(q->queue_lock); | 187 | lockdep_assert_held(q->queue_lock); |
191 | 188 | ||
192 | /* | ||
193 | * This could be the first entry point of blkcg implementation and | ||
194 | * we shouldn't allow anything to go through for a bypassing queue. | ||
195 | */ | ||
196 | if (unlikely(blk_queue_bypass(q)) && !for_root) | ||
197 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | ||
198 | |||
199 | blkg = __blkg_lookup(blkcg, q); | 189 | blkg = __blkg_lookup(blkcg, q); |
200 | if (blkg) | 190 | if (blkg) |
201 | return blkg; | 191 | return blkg; |
@@ -223,6 +213,18 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | |||
223 | out: | 213 | out: |
224 | return blkg; | 214 | return blkg; |
225 | } | 215 | } |
216 | |||
217 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | ||
218 | struct request_queue *q) | ||
219 | { | ||
220 | /* | ||
221 | * This could be the first entry point of blkcg implementation and | ||
222 | * we shouldn't allow anything to go through for a bypassing queue. | ||
223 | */ | ||
224 | if (unlikely(blk_queue_bypass(q))) | ||
225 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | ||
226 | return __blkg_lookup_create(blkcg, q); | ||
227 | } | ||
226 | EXPORT_SYMBOL_GPL(blkg_lookup_create); | 228 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
227 | 229 | ||
228 | static void blkg_destroy(struct blkio_group *blkg) | 230 | static void blkg_destroy(struct blkio_group *blkg) |
@@ -249,12 +251,10 @@ static void blkg_destroy(struct blkio_group *blkg) | |||
249 | /** | 251 | /** |
250 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | 252 | * blkg_destroy_all - destroy all blkgs associated with a request_queue |
251 | * @q: request_queue of interest | 253 | * @q: request_queue of interest |
252 | * @destroy_root: whether to destroy root blkg or not | ||
253 | * | 254 | * |
254 | * Destroy blkgs associated with @q. If @destroy_root is %true, all are | 255 | * Destroy all blkgs associated with @q. |
255 | * destroyed; otherwise, root blkg is left alone. | ||
256 | */ | 256 | */ |
257 | void blkg_destroy_all(struct request_queue *q, bool destroy_root) | 257 | static void blkg_destroy_all(struct request_queue *q) |
258 | { | 258 | { |
259 | struct blkio_group *blkg, *n; | 259 | struct blkio_group *blkg, *n; |
260 | 260 | ||
@@ -263,10 +263,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root) | |||
263 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { | 263 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
264 | struct blkio_cgroup *blkcg = blkg->blkcg; | 264 | struct blkio_cgroup *blkcg = blkg->blkcg; |
265 | 265 | ||
266 | /* skip root? */ | ||
267 | if (!destroy_root && blkg->blkcg == &blkio_root_cgroup) | ||
268 | continue; | ||
269 | |||
270 | spin_lock(&blkcg->lock); | 266 | spin_lock(&blkcg->lock); |
271 | blkg_destroy(blkg); | 267 | blkg_destroy(blkg); |
272 | spin_unlock(&blkcg->lock); | 268 | spin_unlock(&blkcg->lock); |
@@ -274,7 +270,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root) | |||
274 | 270 | ||
275 | spin_unlock_irq(q->queue_lock); | 271 | spin_unlock_irq(q->queue_lock); |
276 | } | 272 | } |
277 | EXPORT_SYMBOL_GPL(blkg_destroy_all); | ||
278 | 273 | ||
279 | static void blkg_rcu_free(struct rcu_head *rcu_head) | 274 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
280 | { | 275 | { |
@@ -492,7 +487,7 @@ int blkg_conf_prep(struct blkio_cgroup *blkcg, | |||
492 | spin_lock_irq(disk->queue->queue_lock); | 487 | spin_lock_irq(disk->queue->queue_lock); |
493 | 488 | ||
494 | if (blkcg_policy_enabled(disk->queue, pol)) | 489 | if (blkcg_policy_enabled(disk->queue, pol)) |
495 | blkg = blkg_lookup_create(blkcg, disk->queue, false); | 490 | blkg = blkg_lookup_create(blkcg, disk->queue); |
496 | else | 491 | else |
497 | blkg = ERR_PTR(-EINVAL); | 492 | blkg = ERR_PTR(-EINVAL); |
498 | 493 | ||
@@ -625,20 +620,9 @@ done: | |||
625 | */ | 620 | */ |
626 | int blkcg_init_queue(struct request_queue *q) | 621 | int blkcg_init_queue(struct request_queue *q) |
627 | { | 622 | { |
628 | int ret; | ||
629 | |||
630 | might_sleep(); | 623 | might_sleep(); |
631 | 624 | ||
632 | ret = blk_throtl_init(q); | 625 | return blk_throtl_init(q); |
633 | if (ret) | ||
634 | return ret; | ||
635 | |||
636 | mutex_lock(&all_q_mutex); | ||
637 | INIT_LIST_HEAD(&q->all_q_node); | ||
638 | list_add_tail(&q->all_q_node, &all_q_list); | ||
639 | mutex_unlock(&all_q_mutex); | ||
640 | |||
641 | return 0; | ||
642 | } | 626 | } |
643 | 627 | ||
644 | /** | 628 | /** |
@@ -662,12 +646,7 @@ void blkcg_drain_queue(struct request_queue *q) | |||
662 | */ | 646 | */ |
663 | void blkcg_exit_queue(struct request_queue *q) | 647 | void blkcg_exit_queue(struct request_queue *q) |
664 | { | 648 | { |
665 | mutex_lock(&all_q_mutex); | 649 | blkg_destroy_all(q); |
666 | list_del_init(&q->all_q_node); | ||
667 | mutex_unlock(&all_q_mutex); | ||
668 | |||
669 | blkg_destroy_all(q, true); | ||
670 | |||
671 | blk_throtl_exit(q); | 650 | blk_throtl_exit(q); |
672 | } | 651 | } |
673 | 652 | ||
@@ -741,7 +720,7 @@ int blkcg_activate_policy(struct request_queue *q, | |||
741 | spin_lock_irq(q->queue_lock); | 720 | spin_lock_irq(q->queue_lock); |
742 | 721 | ||
743 | rcu_read_lock(); | 722 | rcu_read_lock(); |
744 | blkg = blkg_lookup_create(&blkio_root_cgroup, q, true); | 723 | blkg = __blkg_lookup_create(&blkio_root_cgroup, q); |
745 | rcu_read_unlock(); | 724 | rcu_read_unlock(); |
746 | 725 | ||
747 | if (IS_ERR(blkg)) { | 726 | if (IS_ERR(blkg)) { |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 66253a7c8ff4..222063d36355 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -115,7 +115,6 @@ extern int blkcg_activate_policy(struct request_queue *q, | |||
115 | const struct blkio_policy_type *pol); | 115 | const struct blkio_policy_type *pol); |
116 | extern void blkcg_deactivate_policy(struct request_queue *q, | 116 | extern void blkcg_deactivate_policy(struct request_queue *q, |
117 | const struct blkio_policy_type *pol); | 117 | const struct blkio_policy_type *pol); |
118 | extern void blkg_destroy_all(struct request_queue *q, bool destroy_root); | ||
119 | 118 | ||
120 | void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, | 119 | void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, |
121 | u64 (*prfill)(struct seq_file *, void *, int), | 120 | u64 (*prfill)(struct seq_file *, void *, int), |
@@ -334,8 +333,6 @@ static inline int blkcg_activate_policy(struct request_queue *q, | |||
334 | const struct blkio_policy_type *pol) { return 0; } | 333 | const struct blkio_policy_type *pol) { return 0; } |
335 | static inline void blkcg_deactivate_policy(struct request_queue *q, | 334 | static inline void blkcg_deactivate_policy(struct request_queue *q, |
336 | const struct blkio_policy_type *pol) { } | 335 | const struct blkio_policy_type *pol) { } |
337 | static inline void blkg_destroy_all(struct request_queue *q, | ||
338 | bool destory_root) { } | ||
339 | 336 | ||
340 | static inline void *blkg_to_pdata(struct blkio_group *blkg, | 337 | static inline void *blkg_to_pdata(struct blkio_group *blkg, |
341 | struct blkio_policy_type *pol) { return NULL; } | 338 | struct blkio_policy_type *pol) { return NULL; } |
@@ -354,8 +351,7 @@ extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio); | |||
354 | extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, | 351 | extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, |
355 | struct request_queue *q); | 352 | struct request_queue *q); |
356 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | 353 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, |
357 | struct request_queue *q, | 354 | struct request_queue *q); |
358 | bool for_root); | ||
359 | #else | 355 | #else |
360 | struct cgroup; | 356 | struct cgroup; |
361 | static inline struct blkio_cgroup * | 357 | static inline struct blkio_cgroup * |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 2fc964e06ea4..e2aaf27e1f10 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -285,7 +285,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, | |||
285 | } else { | 285 | } else { |
286 | struct blkio_group *blkg; | 286 | struct blkio_group *blkg; |
287 | 287 | ||
288 | blkg = blkg_lookup_create(blkcg, q, false); | 288 | blkg = blkg_lookup_create(blkcg, q); |
289 | 289 | ||
290 | /* if %NULL and @q is alive, fall back to root_tg */ | 290 | /* if %NULL and @q is alive, fall back to root_tg */ |
291 | if (!IS_ERR(blkg)) | 291 | if (!IS_ERR(blkg)) |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 0203652e1f34..eb07eb64e85b 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1348,7 +1348,7 @@ static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, | |||
1348 | } else { | 1348 | } else { |
1349 | struct blkio_group *blkg; | 1349 | struct blkio_group *blkg; |
1350 | 1350 | ||
1351 | blkg = blkg_lookup_create(blkcg, q, false); | 1351 | blkg = blkg_lookup_create(blkcg, q); |
1352 | if (!IS_ERR(blkg)) | 1352 | if (!IS_ERR(blkg)) |
1353 | cfqg = blkg_to_cfqg(blkg); | 1353 | cfqg = blkg_to_cfqg(blkg); |
1354 | } | 1354 | } |