summaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-11-15 14:17:28 -0500
committerJens Axboe <axboe@kernel.dk>2018-11-15 14:17:28 -0500
commit0d945c1f966b2bcb67bb12be749da0a7fb00201b (patch)
tree05a4bf8f0d43cf95878316a0d1e93f018c0bbec9 /block/blk-cgroup.c
parent6d46964230d182c4b6097379738849a809d791dc (diff)
block: remove the queue_lock indirection
With the legacy request path gone there is no good reason to keep queue_lock as a pointer, we can always use the embedded lock now. Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Fixed floppy and blk-cgroup missing conversions and half done edits. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c62
1 files changed, 31 insertions, 31 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3ba23b9bfeb9..0f6b44614165 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -147,7 +147,7 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
147 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); 147 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
148 if (blkg && blkg->q == q) { 148 if (blkg && blkg->q == q) {
149 if (update_hint) { 149 if (update_hint) {
150 lockdep_assert_held(q->queue_lock); 150 lockdep_assert_held(&q->queue_lock);
151 rcu_assign_pointer(blkcg->blkg_hint, blkg); 151 rcu_assign_pointer(blkcg->blkg_hint, blkg);
152 } 152 }
153 return blkg; 153 return blkg;
@@ -170,7 +170,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
170 int i, ret; 170 int i, ret;
171 171
172 WARN_ON_ONCE(!rcu_read_lock_held()); 172 WARN_ON_ONCE(!rcu_read_lock_held());
173 lockdep_assert_held(q->queue_lock); 173 lockdep_assert_held(&q->queue_lock);
174 174
175 /* blkg holds a reference to blkcg */ 175 /* blkg holds a reference to blkcg */
176 if (!css_tryget_online(&blkcg->css)) { 176 if (!css_tryget_online(&blkcg->css)) {
@@ -268,7 +268,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
268 struct blkcg_gq *blkg; 268 struct blkcg_gq *blkg;
269 269
270 WARN_ON_ONCE(!rcu_read_lock_held()); 270 WARN_ON_ONCE(!rcu_read_lock_held());
271 lockdep_assert_held(q->queue_lock); 271 lockdep_assert_held(&q->queue_lock);
272 272
273 blkg = __blkg_lookup(blkcg, q, true); 273 blkg = __blkg_lookup(blkcg, q, true);
274 if (blkg) 274 if (blkg)
@@ -299,7 +299,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
299 struct blkcg_gq *parent = blkg->parent; 299 struct blkcg_gq *parent = blkg->parent;
300 int i; 300 int i;
301 301
302 lockdep_assert_held(blkg->q->queue_lock); 302 lockdep_assert_held(&blkg->q->queue_lock);
303 lockdep_assert_held(&blkcg->lock); 303 lockdep_assert_held(&blkcg->lock);
304 304
305 /* Something wrong if we are trying to remove same group twice */ 305 /* Something wrong if we are trying to remove same group twice */
@@ -349,7 +349,7 @@ static void blkg_destroy_all(struct request_queue *q)
349{ 349{
350 struct blkcg_gq *blkg, *n; 350 struct blkcg_gq *blkg, *n;
351 351
352 spin_lock_irq(q->queue_lock); 352 spin_lock_irq(&q->queue_lock);
353 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { 353 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
354 struct blkcg *blkcg = blkg->blkcg; 354 struct blkcg *blkcg = blkg->blkcg;
355 355
@@ -359,7 +359,7 @@ static void blkg_destroy_all(struct request_queue *q)
359 } 359 }
360 360
361 q->root_blkg = NULL; 361 q->root_blkg = NULL;
362 spin_unlock_irq(q->queue_lock); 362 spin_unlock_irq(&q->queue_lock);
363} 363}
364 364
365/* 365/*
@@ -454,10 +454,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
454 454
455 rcu_read_lock(); 455 rcu_read_lock();
456 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 456 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
457 spin_lock_irq(blkg->q->queue_lock); 457 spin_lock_irq(&blkg->q->queue_lock);
458 if (blkcg_policy_enabled(blkg->q, pol)) 458 if (blkcg_policy_enabled(blkg->q, pol))
459 total += prfill(sf, blkg->pd[pol->plid], data); 459 total += prfill(sf, blkg->pd[pol->plid], data);
460 spin_unlock_irq(blkg->q->queue_lock); 460 spin_unlock_irq(&blkg->q->queue_lock);
461 } 461 }
462 rcu_read_unlock(); 462 rcu_read_unlock();
463 463
@@ -655,7 +655,7 @@ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
655 struct cgroup_subsys_state *pos_css; 655 struct cgroup_subsys_state *pos_css;
656 u64 sum = 0; 656 u64 sum = 0;
657 657
658 lockdep_assert_held(blkg->q->queue_lock); 658 lockdep_assert_held(&blkg->q->queue_lock);
659 659
660 rcu_read_lock(); 660 rcu_read_lock();
661 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { 661 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@@ -698,7 +698,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
698 struct blkg_rwstat sum = { }; 698 struct blkg_rwstat sum = { };
699 int i; 699 int i;
700 700
701 lockdep_assert_held(blkg->q->queue_lock); 701 lockdep_assert_held(&blkg->q->queue_lock);
702 702
703 rcu_read_lock(); 703 rcu_read_lock();
704 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { 704 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@@ -729,7 +729,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
729 struct request_queue *q) 729 struct request_queue *q)
730{ 730{
731 WARN_ON_ONCE(!rcu_read_lock_held()); 731 WARN_ON_ONCE(!rcu_read_lock_held());
732 lockdep_assert_held(q->queue_lock); 732 lockdep_assert_held(&q->queue_lock);
733 733
734 if (!blkcg_policy_enabled(q, pol)) 734 if (!blkcg_policy_enabled(q, pol))
735 return ERR_PTR(-EOPNOTSUPP); 735 return ERR_PTR(-EOPNOTSUPP);
@@ -750,7 +750,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
750 */ 750 */
751int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 751int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
752 char *input, struct blkg_conf_ctx *ctx) 752 char *input, struct blkg_conf_ctx *ctx)
753 __acquires(rcu) __acquires(disk->queue->queue_lock) 753 __acquires(rcu) __acquires(&disk->queue->queue_lock)
754{ 754{
755 struct gendisk *disk; 755 struct gendisk *disk;
756 struct request_queue *q; 756 struct request_queue *q;
@@ -778,7 +778,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
778 q = disk->queue; 778 q = disk->queue;
779 779
780 rcu_read_lock(); 780 rcu_read_lock();
781 spin_lock_irq(q->queue_lock); 781 spin_lock_irq(&q->queue_lock);
782 782
783 blkg = blkg_lookup_check(blkcg, pol, q); 783 blkg = blkg_lookup_check(blkcg, pol, q);
784 if (IS_ERR(blkg)) { 784 if (IS_ERR(blkg)) {
@@ -805,7 +805,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
805 } 805 }
806 806
807 /* Drop locks to do new blkg allocation with GFP_KERNEL. */ 807 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
808 spin_unlock_irq(q->queue_lock); 808 spin_unlock_irq(&q->queue_lock);
809 rcu_read_unlock(); 809 rcu_read_unlock();
810 810
811 new_blkg = blkg_alloc(pos, q, GFP_KERNEL); 811 new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
@@ -815,7 +815,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
815 } 815 }
816 816
817 rcu_read_lock(); 817 rcu_read_lock();
818 spin_lock_irq(q->queue_lock); 818 spin_lock_irq(&q->queue_lock);
819 819
820 blkg = blkg_lookup_check(pos, pol, q); 820 blkg = blkg_lookup_check(pos, pol, q);
821 if (IS_ERR(blkg)) { 821 if (IS_ERR(blkg)) {
@@ -843,7 +843,7 @@ success:
843 return 0; 843 return 0;
844 844
845fail_unlock: 845fail_unlock:
846 spin_unlock_irq(q->queue_lock); 846 spin_unlock_irq(&q->queue_lock);
847 rcu_read_unlock(); 847 rcu_read_unlock();
848fail: 848fail:
849 put_disk_and_module(disk); 849 put_disk_and_module(disk);
@@ -868,9 +868,9 @@ fail:
868 * with blkg_conf_prep(). 868 * with blkg_conf_prep().
869 */ 869 */
870void blkg_conf_finish(struct blkg_conf_ctx *ctx) 870void blkg_conf_finish(struct blkg_conf_ctx *ctx)
871 __releases(ctx->disk->queue->queue_lock) __releases(rcu) 871 __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
872{ 872{
873 spin_unlock_irq(ctx->disk->queue->queue_lock); 873 spin_unlock_irq(&ctx->disk->queue->queue_lock);
874 rcu_read_unlock(); 874 rcu_read_unlock();
875 put_disk_and_module(ctx->disk); 875 put_disk_and_module(ctx->disk);
876} 876}
@@ -903,7 +903,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
903 */ 903 */
904 off += scnprintf(buf+off, size-off, "%s ", dname); 904 off += scnprintf(buf+off, size-off, "%s ", dname);
905 905
906 spin_lock_irq(blkg->q->queue_lock); 906 spin_lock_irq(&blkg->q->queue_lock);
907 907
908 rwstat = blkg_rwstat_recursive_sum(blkg, NULL, 908 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
909 offsetof(struct blkcg_gq, stat_bytes)); 909 offsetof(struct blkcg_gq, stat_bytes));
@@ -917,7 +917,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
917 wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); 917 wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
918 dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]); 918 dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
919 919
920 spin_unlock_irq(blkg->q->queue_lock); 920 spin_unlock_irq(&blkg->q->queue_lock);
921 921
922 if (rbytes || wbytes || rios || wios) { 922 if (rbytes || wbytes || rios || wios) {
923 has_stats = true; 923 has_stats = true;
@@ -1038,9 +1038,9 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
1038 struct blkcg_gq, blkcg_node); 1038 struct blkcg_gq, blkcg_node);
1039 struct request_queue *q = blkg->q; 1039 struct request_queue *q = blkg->q;
1040 1040
1041 if (spin_trylock(q->queue_lock)) { 1041 if (spin_trylock(&q->queue_lock)) {
1042 blkg_destroy(blkg); 1042 blkg_destroy(blkg);
1043 spin_unlock(q->queue_lock); 1043 spin_unlock(&q->queue_lock);
1044 } else { 1044 } else {
1045 spin_unlock_irq(&blkcg->lock); 1045 spin_unlock_irq(&blkcg->lock);
1046 cpu_relax(); 1046 cpu_relax();
@@ -1161,12 +1161,12 @@ int blkcg_init_queue(struct request_queue *q)
1161 1161
1162 /* Make sure the root blkg exists. */ 1162 /* Make sure the root blkg exists. */
1163 rcu_read_lock(); 1163 rcu_read_lock();
1164 spin_lock_irq(q->queue_lock); 1164 spin_lock_irq(&q->queue_lock);
1165 blkg = blkg_create(&blkcg_root, q, new_blkg); 1165 blkg = blkg_create(&blkcg_root, q, new_blkg);
1166 if (IS_ERR(blkg)) 1166 if (IS_ERR(blkg))
1167 goto err_unlock; 1167 goto err_unlock;
1168 q->root_blkg = blkg; 1168 q->root_blkg = blkg;
1169 spin_unlock_irq(q->queue_lock); 1169 spin_unlock_irq(&q->queue_lock);
1170 rcu_read_unlock(); 1170 rcu_read_unlock();
1171 1171
1172 if (preloaded) 1172 if (preloaded)
@@ -1185,7 +1185,7 @@ err_destroy_all:
1185 blkg_destroy_all(q); 1185 blkg_destroy_all(q);
1186 return ret; 1186 return ret;
1187err_unlock: 1187err_unlock:
1188 spin_unlock_irq(q->queue_lock); 1188 spin_unlock_irq(&q->queue_lock);
1189 rcu_read_unlock(); 1189 rcu_read_unlock();
1190 if (preloaded) 1190 if (preloaded)
1191 radix_tree_preload_end(); 1191 radix_tree_preload_end();
@@ -1200,7 +1200,7 @@ err_unlock:
1200 */ 1200 */
1201void blkcg_drain_queue(struct request_queue *q) 1201void blkcg_drain_queue(struct request_queue *q)
1202{ 1202{
1203 lockdep_assert_held(q->queue_lock); 1203 lockdep_assert_held(&q->queue_lock);
1204 1204
1205 /* 1205 /*
1206 * @q could be exiting and already have destroyed all blkgs as 1206 * @q could be exiting and already have destroyed all blkgs as
@@ -1335,7 +1335,7 @@ pd_prealloc:
1335 } 1335 }
1336 } 1336 }
1337 1337
1338 spin_lock_irq(q->queue_lock); 1338 spin_lock_irq(&q->queue_lock);
1339 1339
1340 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1340 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1341 struct blkg_policy_data *pd; 1341 struct blkg_policy_data *pd;
@@ -1347,7 +1347,7 @@ pd_prealloc:
1347 if (!pd) 1347 if (!pd)
1348 swap(pd, pd_prealloc); 1348 swap(pd, pd_prealloc);
1349 if (!pd) { 1349 if (!pd) {
1350 spin_unlock_irq(q->queue_lock); 1350 spin_unlock_irq(&q->queue_lock);
1351 goto pd_prealloc; 1351 goto pd_prealloc;
1352 } 1352 }
1353 1353
@@ -1361,7 +1361,7 @@ pd_prealloc:
1361 __set_bit(pol->plid, q->blkcg_pols); 1361 __set_bit(pol->plid, q->blkcg_pols);
1362 ret = 0; 1362 ret = 0;
1363 1363
1364 spin_unlock_irq(q->queue_lock); 1364 spin_unlock_irq(&q->queue_lock);
1365out_bypass_end: 1365out_bypass_end:
1366 if (q->mq_ops) 1366 if (q->mq_ops)
1367 blk_mq_unfreeze_queue(q); 1367 blk_mq_unfreeze_queue(q);
@@ -1390,7 +1390,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
1390 if (q->mq_ops) 1390 if (q->mq_ops)
1391 blk_mq_freeze_queue(q); 1391 blk_mq_freeze_queue(q);
1392 1392
1393 spin_lock_irq(q->queue_lock); 1393 spin_lock_irq(&q->queue_lock);
1394 1394
1395 __clear_bit(pol->plid, q->blkcg_pols); 1395 __clear_bit(pol->plid, q->blkcg_pols);
1396 1396
@@ -1403,7 +1403,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
1403 } 1403 }
1404 } 1404 }
1405 1405
1406 spin_unlock_irq(q->queue_lock); 1406 spin_unlock_irq(&q->queue_lock);
1407 1407
1408 if (q->mq_ops) 1408 if (q->mq_ops)
1409 blk_mq_unfreeze_queue(q); 1409 blk_mq_unfreeze_queue(q);