diff options
-rw-r--r-- | block/bfq-cgroup.c | 2 | ||||
-rw-r--r-- | block/bfq-iosched.c | 16 | ||||
-rw-r--r-- | block/blk-cgroup.c | 62 | ||||
-rw-r--r-- | block/blk-core.c | 10 | ||||
-rw-r--r-- | block/blk-ioc.c | 14 | ||||
-rw-r--r-- | block/blk-iolatency.c | 4 | ||||
-rw-r--r-- | block/blk-mq-sched.c | 4 | ||||
-rw-r--r-- | block/blk-pm.c | 20 | ||||
-rw-r--r-- | block/blk-pm.h | 6 | ||||
-rw-r--r-- | block/blk-sysfs.c | 4 | ||||
-rw-r--r-- | block/blk-throttle.c | 22 | ||||
-rw-r--r-- | drivers/block/floppy.c | 8 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 4 | ||||
-rw-r--r-- | drivers/ide/ide-pm.c | 10 | ||||
-rw-r--r-- | include/linux/blk-cgroup.h | 4 | ||||
-rw-r--r-- | include/linux/blkdev.h | 8 |
16 files changed, 92 insertions, 106 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 9fe5952d117d..a7a1712632b0 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c | |||
@@ -334,7 +334,7 @@ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) | |||
334 | 334 | ||
335 | parent = bfqg_parent(bfqg); | 335 | parent = bfqg_parent(bfqg); |
336 | 336 | ||
337 | lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock); | 337 | lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock); |
338 | 338 | ||
339 | if (unlikely(!parent)) | 339 | if (unlikely(!parent)) |
340 | return; | 340 | return; |
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index c7636cbefc85..67b22c924aee 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c | |||
@@ -399,9 +399,9 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, | |||
399 | unsigned long flags; | 399 | unsigned long flags; |
400 | struct bfq_io_cq *icq; | 400 | struct bfq_io_cq *icq; |
401 | 401 | ||
402 | spin_lock_irqsave(q->queue_lock, flags); | 402 | spin_lock_irqsave(&q->queue_lock, flags); |
403 | icq = icq_to_bic(ioc_lookup_icq(ioc, q)); | 403 | icq = icq_to_bic(ioc_lookup_icq(ioc, q)); |
404 | spin_unlock_irqrestore(q->queue_lock, flags); | 404 | spin_unlock_irqrestore(&q->queue_lock, flags); |
405 | 405 | ||
406 | return icq; | 406 | return icq; |
407 | } | 407 | } |
@@ -4034,7 +4034,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q, | |||
4034 | * In addition, the following queue lock guarantees that | 4034 | * In addition, the following queue lock guarantees that |
4035 | * bfqq_group(bfqq) exists as well. | 4035 | * bfqq_group(bfqq) exists as well. |
4036 | */ | 4036 | */ |
4037 | spin_lock_irq(q->queue_lock); | 4037 | spin_lock_irq(&q->queue_lock); |
4038 | if (idle_timer_disabled) | 4038 | if (idle_timer_disabled) |
4039 | /* | 4039 | /* |
4040 | * Since the idle timer has been disabled, | 4040 | * Since the idle timer has been disabled, |
@@ -4053,7 +4053,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q, | |||
4053 | bfqg_stats_set_start_empty_time(bfqg); | 4053 | bfqg_stats_set_start_empty_time(bfqg); |
4054 | bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); | 4054 | bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); |
4055 | } | 4055 | } |
4056 | spin_unlock_irq(q->queue_lock); | 4056 | spin_unlock_irq(&q->queue_lock); |
4057 | } | 4057 | } |
4058 | #else | 4058 | #else |
4059 | static inline void bfq_update_dispatch_stats(struct request_queue *q, | 4059 | static inline void bfq_update_dispatch_stats(struct request_queue *q, |
@@ -4637,11 +4637,11 @@ static void bfq_update_insert_stats(struct request_queue *q, | |||
4637 | * In addition, the following queue lock guarantees that | 4637 | * In addition, the following queue lock guarantees that |
4638 | * bfqq_group(bfqq) exists as well. | 4638 | * bfqq_group(bfqq) exists as well. |
4639 | */ | 4639 | */ |
4640 | spin_lock_irq(q->queue_lock); | 4640 | spin_lock_irq(&q->queue_lock); |
4641 | bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); | 4641 | bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); |
4642 | if (idle_timer_disabled) | 4642 | if (idle_timer_disabled) |
4643 | bfqg_stats_update_idle_time(bfqq_group(bfqq)); | 4643 | bfqg_stats_update_idle_time(bfqq_group(bfqq)); |
4644 | spin_unlock_irq(q->queue_lock); | 4644 | spin_unlock_irq(&q->queue_lock); |
4645 | } | 4645 | } |
4646 | #else | 4646 | #else |
4647 | static inline void bfq_update_insert_stats(struct request_queue *q, | 4647 | static inline void bfq_update_insert_stats(struct request_queue *q, |
@@ -5382,9 +5382,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) | |||
5382 | } | 5382 | } |
5383 | eq->elevator_data = bfqd; | 5383 | eq->elevator_data = bfqd; |
5384 | 5384 | ||
5385 | spin_lock_irq(q->queue_lock); | 5385 | spin_lock_irq(&q->queue_lock); |
5386 | q->elevator = eq; | 5386 | q->elevator = eq; |
5387 | spin_unlock_irq(q->queue_lock); | 5387 | spin_unlock_irq(&q->queue_lock); |
5388 | 5388 | ||
5389 | /* | 5389 | /* |
5390 | * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. | 5390 | * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 3ba23b9bfeb9..0f6b44614165 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -147,7 +147,7 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, | |||
147 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); | 147 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); |
148 | if (blkg && blkg->q == q) { | 148 | if (blkg && blkg->q == q) { |
149 | if (update_hint) { | 149 | if (update_hint) { |
150 | lockdep_assert_held(q->queue_lock); | 150 | lockdep_assert_held(&q->queue_lock); |
151 | rcu_assign_pointer(blkcg->blkg_hint, blkg); | 151 | rcu_assign_pointer(blkcg->blkg_hint, blkg); |
152 | } | 152 | } |
153 | return blkg; | 153 | return blkg; |
@@ -170,7 +170,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, | |||
170 | int i, ret; | 170 | int i, ret; |
171 | 171 | ||
172 | WARN_ON_ONCE(!rcu_read_lock_held()); | 172 | WARN_ON_ONCE(!rcu_read_lock_held()); |
173 | lockdep_assert_held(q->queue_lock); | 173 | lockdep_assert_held(&q->queue_lock); |
174 | 174 | ||
175 | /* blkg holds a reference to blkcg */ | 175 | /* blkg holds a reference to blkcg */ |
176 | if (!css_tryget_online(&blkcg->css)) { | 176 | if (!css_tryget_online(&blkcg->css)) { |
@@ -268,7 +268,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, | |||
268 | struct blkcg_gq *blkg; | 268 | struct blkcg_gq *blkg; |
269 | 269 | ||
270 | WARN_ON_ONCE(!rcu_read_lock_held()); | 270 | WARN_ON_ONCE(!rcu_read_lock_held()); |
271 | lockdep_assert_held(q->queue_lock); | 271 | lockdep_assert_held(&q->queue_lock); |
272 | 272 | ||
273 | blkg = __blkg_lookup(blkcg, q, true); | 273 | blkg = __blkg_lookup(blkcg, q, true); |
274 | if (blkg) | 274 | if (blkg) |
@@ -299,7 +299,7 @@ static void blkg_destroy(struct blkcg_gq *blkg) | |||
299 | struct blkcg_gq *parent = blkg->parent; | 299 | struct blkcg_gq *parent = blkg->parent; |
300 | int i; | 300 | int i; |
301 | 301 | ||
302 | lockdep_assert_held(blkg->q->queue_lock); | 302 | lockdep_assert_held(&blkg->q->queue_lock); |
303 | lockdep_assert_held(&blkcg->lock); | 303 | lockdep_assert_held(&blkcg->lock); |
304 | 304 | ||
305 | /* Something wrong if we are trying to remove same group twice */ | 305 | /* Something wrong if we are trying to remove same group twice */ |
@@ -349,7 +349,7 @@ static void blkg_destroy_all(struct request_queue *q) | |||
349 | { | 349 | { |
350 | struct blkcg_gq *blkg, *n; | 350 | struct blkcg_gq *blkg, *n; |
351 | 351 | ||
352 | spin_lock_irq(q->queue_lock); | 352 | spin_lock_irq(&q->queue_lock); |
353 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { | 353 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
354 | struct blkcg *blkcg = blkg->blkcg; | 354 | struct blkcg *blkcg = blkg->blkcg; |
355 | 355 | ||
@@ -359,7 +359,7 @@ static void blkg_destroy_all(struct request_queue *q) | |||
359 | } | 359 | } |
360 | 360 | ||
361 | q->root_blkg = NULL; | 361 | q->root_blkg = NULL; |
362 | spin_unlock_irq(q->queue_lock); | 362 | spin_unlock_irq(&q->queue_lock); |
363 | } | 363 | } |
364 | 364 | ||
365 | /* | 365 | /* |
@@ -454,10 +454,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, | |||
454 | 454 | ||
455 | rcu_read_lock(); | 455 | rcu_read_lock(); |
456 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { | 456 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
457 | spin_lock_irq(blkg->q->queue_lock); | 457 | spin_lock_irq(&blkg->q->queue_lock); |
458 | if (blkcg_policy_enabled(blkg->q, pol)) | 458 | if (blkcg_policy_enabled(blkg->q, pol)) |
459 | total += prfill(sf, blkg->pd[pol->plid], data); | 459 | total += prfill(sf, blkg->pd[pol->plid], data); |
460 | spin_unlock_irq(blkg->q->queue_lock); | 460 | spin_unlock_irq(&blkg->q->queue_lock); |
461 | } | 461 | } |
462 | rcu_read_unlock(); | 462 | rcu_read_unlock(); |
463 | 463 | ||
@@ -655,7 +655,7 @@ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, | |||
655 | struct cgroup_subsys_state *pos_css; | 655 | struct cgroup_subsys_state *pos_css; |
656 | u64 sum = 0; | 656 | u64 sum = 0; |
657 | 657 | ||
658 | lockdep_assert_held(blkg->q->queue_lock); | 658 | lockdep_assert_held(&blkg->q->queue_lock); |
659 | 659 | ||
660 | rcu_read_lock(); | 660 | rcu_read_lock(); |
661 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { | 661 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { |
@@ -698,7 +698,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, | |||
698 | struct blkg_rwstat sum = { }; | 698 | struct blkg_rwstat sum = { }; |
699 | int i; | 699 | int i; |
700 | 700 | ||
701 | lockdep_assert_held(blkg->q->queue_lock); | 701 | lockdep_assert_held(&blkg->q->queue_lock); |
702 | 702 | ||
703 | rcu_read_lock(); | 703 | rcu_read_lock(); |
704 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { | 704 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { |
@@ -729,7 +729,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg, | |||
729 | struct request_queue *q) | 729 | struct request_queue *q) |
730 | { | 730 | { |
731 | WARN_ON_ONCE(!rcu_read_lock_held()); | 731 | WARN_ON_ONCE(!rcu_read_lock_held()); |
732 | lockdep_assert_held(q->queue_lock); | 732 | lockdep_assert_held(&q->queue_lock); |
733 | 733 | ||
734 | if (!blkcg_policy_enabled(q, pol)) | 734 | if (!blkcg_policy_enabled(q, pol)) |
735 | return ERR_PTR(-EOPNOTSUPP); | 735 | return ERR_PTR(-EOPNOTSUPP); |
@@ -750,7 +750,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg, | |||
750 | */ | 750 | */ |
751 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | 751 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
752 | char *input, struct blkg_conf_ctx *ctx) | 752 | char *input, struct blkg_conf_ctx *ctx) |
753 | __acquires(rcu) __acquires(disk->queue->queue_lock) | 753 | __acquires(rcu) __acquires(&disk->queue->queue_lock) |
754 | { | 754 | { |
755 | struct gendisk *disk; | 755 | struct gendisk *disk; |
756 | struct request_queue *q; | 756 | struct request_queue *q; |
@@ -778,7 +778,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |||
778 | q = disk->queue; | 778 | q = disk->queue; |
779 | 779 | ||
780 | rcu_read_lock(); | 780 | rcu_read_lock(); |
781 | spin_lock_irq(q->queue_lock); | 781 | spin_lock_irq(&q->queue_lock); |
782 | 782 | ||
783 | blkg = blkg_lookup_check(blkcg, pol, q); | 783 | blkg = blkg_lookup_check(blkcg, pol, q); |
784 | if (IS_ERR(blkg)) { | 784 | if (IS_ERR(blkg)) { |
@@ -805,7 +805,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |||
805 | } | 805 | } |
806 | 806 | ||
807 | /* Drop locks to do new blkg allocation with GFP_KERNEL. */ | 807 | /* Drop locks to do new blkg allocation with GFP_KERNEL. */ |
808 | spin_unlock_irq(q->queue_lock); | 808 | spin_unlock_irq(&q->queue_lock); |
809 | rcu_read_unlock(); | 809 | rcu_read_unlock(); |
810 | 810 | ||
811 | new_blkg = blkg_alloc(pos, q, GFP_KERNEL); | 811 | new_blkg = blkg_alloc(pos, q, GFP_KERNEL); |
@@ -815,7 +815,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |||
815 | } | 815 | } |
816 | 816 | ||
817 | rcu_read_lock(); | 817 | rcu_read_lock(); |
818 | spin_lock_irq(q->queue_lock); | 818 | spin_lock_irq(&q->queue_lock); |
819 | 819 | ||
820 | blkg = blkg_lookup_check(pos, pol, q); | 820 | blkg = blkg_lookup_check(pos, pol, q); |
821 | if (IS_ERR(blkg)) { | 821 | if (IS_ERR(blkg)) { |
@@ -843,7 +843,7 @@ success: | |||
843 | return 0; | 843 | return 0; |
844 | 844 | ||
845 | fail_unlock: | 845 | fail_unlock: |
846 | spin_unlock_irq(q->queue_lock); | 846 | spin_unlock_irq(&q->queue_lock); |
847 | rcu_read_unlock(); | 847 | rcu_read_unlock(); |
848 | fail: | 848 | fail: |
849 | put_disk_and_module(disk); | 849 | put_disk_and_module(disk); |
@@ -868,9 +868,9 @@ fail: | |||
868 | * with blkg_conf_prep(). | 868 | * with blkg_conf_prep(). |
869 | */ | 869 | */ |
870 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) | 870 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
871 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) | 871 | __releases(&ctx->disk->queue->queue_lock) __releases(rcu) |
872 | { | 872 | { |
873 | spin_unlock_irq(ctx->disk->queue->queue_lock); | 873 | spin_unlock_irq(&ctx->disk->queue->queue_lock); |
874 | rcu_read_unlock(); | 874 | rcu_read_unlock(); |
875 | put_disk_and_module(ctx->disk); | 875 | put_disk_and_module(ctx->disk); |
876 | } | 876 | } |
@@ -903,7 +903,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) | |||
903 | */ | 903 | */ |
904 | off += scnprintf(buf+off, size-off, "%s ", dname); | 904 | off += scnprintf(buf+off, size-off, "%s ", dname); |
905 | 905 | ||
906 | spin_lock_irq(blkg->q->queue_lock); | 906 | spin_lock_irq(&blkg->q->queue_lock); |
907 | 907 | ||
908 | rwstat = blkg_rwstat_recursive_sum(blkg, NULL, | 908 | rwstat = blkg_rwstat_recursive_sum(blkg, NULL, |
909 | offsetof(struct blkcg_gq, stat_bytes)); | 909 | offsetof(struct blkcg_gq, stat_bytes)); |
@@ -917,7 +917,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) | |||
917 | wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); | 917 | wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); |
918 | dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]); | 918 | dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]); |
919 | 919 | ||
920 | spin_unlock_irq(blkg->q->queue_lock); | 920 | spin_unlock_irq(&blkg->q->queue_lock); |
921 | 921 | ||
922 | if (rbytes || wbytes || rios || wios) { | 922 | if (rbytes || wbytes || rios || wios) { |
923 | has_stats = true; | 923 | has_stats = true; |
@@ -1038,9 +1038,9 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg) | |||
1038 | struct blkcg_gq, blkcg_node); | 1038 | struct blkcg_gq, blkcg_node); |
1039 | struct request_queue *q = blkg->q; | 1039 | struct request_queue *q = blkg->q; |
1040 | 1040 | ||
1041 | if (spin_trylock(q->queue_lock)) { | 1041 | if (spin_trylock(&q->queue_lock)) { |
1042 | blkg_destroy(blkg); | 1042 | blkg_destroy(blkg); |
1043 | spin_unlock(q->queue_lock); | 1043 | spin_unlock(&q->queue_lock); |
1044 | } else { | 1044 | } else { |
1045 | spin_unlock_irq(&blkcg->lock); | 1045 | spin_unlock_irq(&blkcg->lock); |
1046 | cpu_relax(); | 1046 | cpu_relax(); |
@@ -1161,12 +1161,12 @@ int blkcg_init_queue(struct request_queue *q) | |||
1161 | 1161 | ||
1162 | /* Make sure the root blkg exists. */ | 1162 | /* Make sure the root blkg exists. */ |
1163 | rcu_read_lock(); | 1163 | rcu_read_lock(); |
1164 | spin_lock_irq(q->queue_lock); | 1164 | spin_lock_irq(&q->queue_lock); |
1165 | blkg = blkg_create(&blkcg_root, q, new_blkg); | 1165 | blkg = blkg_create(&blkcg_root, q, new_blkg); |
1166 | if (IS_ERR(blkg)) | 1166 | if (IS_ERR(blkg)) |
1167 | goto err_unlock; | 1167 | goto err_unlock; |
1168 | q->root_blkg = blkg; | 1168 | q->root_blkg = blkg; |
1169 | spin_unlock_irq(q->queue_lock); | 1169 | spin_unlock_irq(&q->queue_lock); |
1170 | rcu_read_unlock(); | 1170 | rcu_read_unlock(); |
1171 | 1171 | ||
1172 | if (preloaded) | 1172 | if (preloaded) |
@@ -1185,7 +1185,7 @@ err_destroy_all: | |||
1185 | blkg_destroy_all(q); | 1185 | blkg_destroy_all(q); |
1186 | return ret; | 1186 | return ret; |
1187 | err_unlock: | 1187 | err_unlock: |
1188 | spin_unlock_irq(q->queue_lock); | 1188 | spin_unlock_irq(&q->queue_lock); |
1189 | rcu_read_unlock(); | 1189 | rcu_read_unlock(); |
1190 | if (preloaded) | 1190 | if (preloaded) |
1191 | radix_tree_preload_end(); | 1191 | radix_tree_preload_end(); |
@@ -1200,7 +1200,7 @@ err_unlock: | |||
1200 | */ | 1200 | */ |
1201 | void blkcg_drain_queue(struct request_queue *q) | 1201 | void blkcg_drain_queue(struct request_queue *q) |
1202 | { | 1202 | { |
1203 | lockdep_assert_held(q->queue_lock); | 1203 | lockdep_assert_held(&q->queue_lock); |
1204 | 1204 | ||
1205 | /* | 1205 | /* |
1206 | * @q could be exiting and already have destroyed all blkgs as | 1206 | * @q could be exiting and already have destroyed all blkgs as |
@@ -1335,7 +1335,7 @@ pd_prealloc: | |||
1335 | } | 1335 | } |
1336 | } | 1336 | } |
1337 | 1337 | ||
1338 | spin_lock_irq(q->queue_lock); | 1338 | spin_lock_irq(&q->queue_lock); |
1339 | 1339 | ||
1340 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | 1340 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
1341 | struct blkg_policy_data *pd; | 1341 | struct blkg_policy_data *pd; |
@@ -1347,7 +1347,7 @@ pd_prealloc: | |||
1347 | if (!pd) | 1347 | if (!pd) |
1348 | swap(pd, pd_prealloc); | 1348 | swap(pd, pd_prealloc); |
1349 | if (!pd) { | 1349 | if (!pd) { |
1350 | spin_unlock_irq(q->queue_lock); | 1350 | spin_unlock_irq(&q->queue_lock); |
1351 | goto pd_prealloc; | 1351 | goto pd_prealloc; |
1352 | } | 1352 | } |
1353 | 1353 | ||
@@ -1361,7 +1361,7 @@ pd_prealloc: | |||
1361 | __set_bit(pol->plid, q->blkcg_pols); | 1361 | __set_bit(pol->plid, q->blkcg_pols); |
1362 | ret = 0; | 1362 | ret = 0; |
1363 | 1363 | ||
1364 | spin_unlock_irq(q->queue_lock); | 1364 | spin_unlock_irq(&q->queue_lock); |
1365 | out_bypass_end: | 1365 | out_bypass_end: |
1366 | if (q->mq_ops) | 1366 | if (q->mq_ops) |
1367 | blk_mq_unfreeze_queue(q); | 1367 | blk_mq_unfreeze_queue(q); |
@@ -1390,7 +1390,7 @@ void blkcg_deactivate_policy(struct request_queue *q, | |||
1390 | if (q->mq_ops) | 1390 | if (q->mq_ops) |
1391 | blk_mq_freeze_queue(q); | 1391 | blk_mq_freeze_queue(q); |
1392 | 1392 | ||
1393 | spin_lock_irq(q->queue_lock); | 1393 | spin_lock_irq(&q->queue_lock); |
1394 | 1394 | ||
1395 | __clear_bit(pol->plid, q->blkcg_pols); | 1395 | __clear_bit(pol->plid, q->blkcg_pols); |
1396 | 1396 | ||
@@ -1403,7 +1403,7 @@ void blkcg_deactivate_policy(struct request_queue *q, | |||
1403 | } | 1403 | } |
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | spin_unlock_irq(q->queue_lock); | 1406 | spin_unlock_irq(&q->queue_lock); |
1407 | 1407 | ||
1408 | if (q->mq_ops) | 1408 | if (q->mq_ops) |
1409 | blk_mq_unfreeze_queue(q); | 1409 | blk_mq_unfreeze_queue(q); |
diff --git a/block/blk-core.c b/block/blk-core.c index 3f94c9de0252..92b6b200e9fb 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -327,8 +327,6 @@ void blk_exit_queue(struct request_queue *q) | |||
327 | */ | 327 | */ |
328 | void blk_cleanup_queue(struct request_queue *q) | 328 | void blk_cleanup_queue(struct request_queue *q) |
329 | { | 329 | { |
330 | spinlock_t *lock = q->queue_lock; | ||
331 | |||
332 | /* mark @q DYING, no new request or merges will be allowed afterwards */ | 330 | /* mark @q DYING, no new request or merges will be allowed afterwards */ |
333 | mutex_lock(&q->sysfs_lock); | 331 | mutex_lock(&q->sysfs_lock); |
334 | blk_set_queue_dying(q); | 332 | blk_set_queue_dying(q); |
@@ -381,11 +379,6 @@ void blk_cleanup_queue(struct request_queue *q) | |||
381 | 379 | ||
382 | percpu_ref_exit(&q->q_usage_counter); | 380 | percpu_ref_exit(&q->q_usage_counter); |
383 | 381 | ||
384 | spin_lock_irq(lock); | ||
385 | if (q->queue_lock != &q->__queue_lock) | ||
386 | q->queue_lock = &q->__queue_lock; | ||
387 | spin_unlock_irq(lock); | ||
388 | |||
389 | /* @q is and will stay empty, shutdown and put */ | 382 | /* @q is and will stay empty, shutdown and put */ |
390 | blk_put_queue(q); | 383 | blk_put_queue(q); |
391 | } | 384 | } |
@@ -524,8 +517,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
524 | mutex_init(&q->blk_trace_mutex); | 517 | mutex_init(&q->blk_trace_mutex); |
525 | #endif | 518 | #endif |
526 | mutex_init(&q->sysfs_lock); | 519 | mutex_init(&q->sysfs_lock); |
527 | spin_lock_init(&q->__queue_lock); | 520 | spin_lock_init(&q->queue_lock); |
528 | q->queue_lock = &q->__queue_lock; | ||
529 | 521 | ||
530 | init_waitqueue_head(&q->mq_freeze_wq); | 522 | init_waitqueue_head(&q->mq_freeze_wq); |
531 | 523 | ||
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index f91ca6b70d6a..5ed59ac6ae58 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -110,9 +110,9 @@ static void ioc_release_fn(struct work_struct *work) | |||
110 | struct io_cq, ioc_node); | 110 | struct io_cq, ioc_node); |
111 | struct request_queue *q = icq->q; | 111 | struct request_queue *q = icq->q; |
112 | 112 | ||
113 | if (spin_trylock(q->queue_lock)) { | 113 | if (spin_trylock(&q->queue_lock)) { |
114 | ioc_destroy_icq(icq); | 114 | ioc_destroy_icq(icq); |
115 | spin_unlock(q->queue_lock); | 115 | spin_unlock(&q->queue_lock); |
116 | } else { | 116 | } else { |
117 | spin_unlock_irqrestore(&ioc->lock, flags); | 117 | spin_unlock_irqrestore(&ioc->lock, flags); |
118 | cpu_relax(); | 118 | cpu_relax(); |
@@ -233,9 +233,9 @@ void ioc_clear_queue(struct request_queue *q) | |||
233 | { | 233 | { |
234 | LIST_HEAD(icq_list); | 234 | LIST_HEAD(icq_list); |
235 | 235 | ||
236 | spin_lock_irq(q->queue_lock); | 236 | spin_lock_irq(&q->queue_lock); |
237 | list_splice_init(&q->icq_list, &icq_list); | 237 | list_splice_init(&q->icq_list, &icq_list); |
238 | spin_unlock_irq(q->queue_lock); | 238 | spin_unlock_irq(&q->queue_lock); |
239 | 239 | ||
240 | __ioc_clear_queue(&icq_list); | 240 | __ioc_clear_queue(&icq_list); |
241 | } | 241 | } |
@@ -326,7 +326,7 @@ struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) | |||
326 | { | 326 | { |
327 | struct io_cq *icq; | 327 | struct io_cq *icq; |
328 | 328 | ||
329 | lockdep_assert_held(q->queue_lock); | 329 | lockdep_assert_held(&q->queue_lock); |
330 | 330 | ||
331 | /* | 331 | /* |
332 | * icq's are indexed from @ioc using radix tree and hint pointer, | 332 | * icq's are indexed from @ioc using radix tree and hint pointer, |
@@ -385,7 +385,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, | |||
385 | INIT_HLIST_NODE(&icq->ioc_node); | 385 | INIT_HLIST_NODE(&icq->ioc_node); |
386 | 386 | ||
387 | /* lock both q and ioc and try to link @icq */ | 387 | /* lock both q and ioc and try to link @icq */ |
388 | spin_lock_irq(q->queue_lock); | 388 | spin_lock_irq(&q->queue_lock); |
389 | spin_lock(&ioc->lock); | 389 | spin_lock(&ioc->lock); |
390 | 390 | ||
391 | if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { | 391 | if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { |
@@ -401,7 +401,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, | |||
401 | } | 401 | } |
402 | 402 | ||
403 | spin_unlock(&ioc->lock); | 403 | spin_unlock(&ioc->lock); |
404 | spin_unlock_irq(q->queue_lock); | 404 | spin_unlock_irq(&q->queue_lock); |
405 | radix_tree_preload_end(); | 405 | radix_tree_preload_end(); |
406 | return icq; | 406 | return icq; |
407 | } | 407 | } |
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 8edf1b353ad1..5f7f1773be61 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c | |||
@@ -485,11 +485,11 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio) | |||
485 | bio_associate_blkcg(bio, &blkcg->css); | 485 | bio_associate_blkcg(bio, &blkcg->css); |
486 | blkg = blkg_lookup(blkcg, q); | 486 | blkg = blkg_lookup(blkcg, q); |
487 | if (unlikely(!blkg)) { | 487 | if (unlikely(!blkg)) { |
488 | spin_lock_irq(q->queue_lock); | 488 | spin_lock_irq(&q->queue_lock); |
489 | blkg = blkg_lookup_create(blkcg, q); | 489 | blkg = blkg_lookup_create(blkcg, q); |
490 | if (IS_ERR(blkg)) | 490 | if (IS_ERR(blkg)) |
491 | blkg = NULL; | 491 | blkg = NULL; |
492 | spin_unlock_irq(q->queue_lock); | 492 | spin_unlock_irq(&q->queue_lock); |
493 | } | 493 | } |
494 | if (!blkg) | 494 | if (!blkg) |
495 | goto out; | 495 | goto out; |
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 66fda19be5a3..d084f731d104 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c | |||
@@ -37,9 +37,9 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) | |||
37 | struct io_context *ioc = rq_ioc(bio); | 37 | struct io_context *ioc = rq_ioc(bio); |
38 | struct io_cq *icq; | 38 | struct io_cq *icq; |
39 | 39 | ||
40 | spin_lock_irq(q->queue_lock); | 40 | spin_lock_irq(&q->queue_lock); |
41 | icq = ioc_lookup_icq(ioc, q); | 41 | icq = ioc_lookup_icq(ioc, q); |
42 | spin_unlock_irq(q->queue_lock); | 42 | spin_unlock_irq(&q->queue_lock); |
43 | 43 | ||
44 | if (!icq) { | 44 | if (!icq) { |
45 | icq = ioc_create_icq(ioc, q, GFP_ATOMIC); | 45 | icq = ioc_create_icq(ioc, q, GFP_ATOMIC); |
diff --git a/block/blk-pm.c b/block/blk-pm.c index f8fdae01bea2..0a028c189897 100644 --- a/block/blk-pm.c +++ b/block/blk-pm.c | |||
@@ -89,12 +89,12 @@ int blk_pre_runtime_suspend(struct request_queue *q) | |||
89 | /* Switch q_usage_counter back to per-cpu mode. */ | 89 | /* Switch q_usage_counter back to per-cpu mode. */ |
90 | blk_mq_unfreeze_queue(q); | 90 | blk_mq_unfreeze_queue(q); |
91 | 91 | ||
92 | spin_lock_irq(q->queue_lock); | 92 | spin_lock_irq(&q->queue_lock); |
93 | if (ret < 0) | 93 | if (ret < 0) |
94 | pm_runtime_mark_last_busy(q->dev); | 94 | pm_runtime_mark_last_busy(q->dev); |
95 | else | 95 | else |
96 | q->rpm_status = RPM_SUSPENDING; | 96 | q->rpm_status = RPM_SUSPENDING; |
97 | spin_unlock_irq(q->queue_lock); | 97 | spin_unlock_irq(&q->queue_lock); |
98 | 98 | ||
99 | if (ret) | 99 | if (ret) |
100 | blk_clear_pm_only(q); | 100 | blk_clear_pm_only(q); |
@@ -121,14 +121,14 @@ void blk_post_runtime_suspend(struct request_queue *q, int err) | |||
121 | if (!q->dev) | 121 | if (!q->dev) |
122 | return; | 122 | return; |
123 | 123 | ||
124 | spin_lock_irq(q->queue_lock); | 124 | spin_lock_irq(&q->queue_lock); |
125 | if (!err) { | 125 | if (!err) { |
126 | q->rpm_status = RPM_SUSPENDED; | 126 | q->rpm_status = RPM_SUSPENDED; |
127 | } else { | 127 | } else { |
128 | q->rpm_status = RPM_ACTIVE; | 128 | q->rpm_status = RPM_ACTIVE; |
129 | pm_runtime_mark_last_busy(q->dev); | 129 | pm_runtime_mark_last_busy(q->dev); |
130 | } | 130 | } |
131 | spin_unlock_irq(q->queue_lock); | 131 | spin_unlock_irq(&q->queue_lock); |
132 | 132 | ||
133 | if (err) | 133 | if (err) |
134 | blk_clear_pm_only(q); | 134 | blk_clear_pm_only(q); |
@@ -151,9 +151,9 @@ void blk_pre_runtime_resume(struct request_queue *q) | |||
151 | if (!q->dev) | 151 | if (!q->dev) |
152 | return; | 152 | return; |
153 | 153 | ||
154 | spin_lock_irq(q->queue_lock); | 154 | spin_lock_irq(&q->queue_lock); |
155 | q->rpm_status = RPM_RESUMING; | 155 | q->rpm_status = RPM_RESUMING; |
156 | spin_unlock_irq(q->queue_lock); | 156 | spin_unlock_irq(&q->queue_lock); |
157 | } | 157 | } |
158 | EXPORT_SYMBOL(blk_pre_runtime_resume); | 158 | EXPORT_SYMBOL(blk_pre_runtime_resume); |
159 | 159 | ||
@@ -176,7 +176,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err) | |||
176 | if (!q->dev) | 176 | if (!q->dev) |
177 | return; | 177 | return; |
178 | 178 | ||
179 | spin_lock_irq(q->queue_lock); | 179 | spin_lock_irq(&q->queue_lock); |
180 | if (!err) { | 180 | if (!err) { |
181 | q->rpm_status = RPM_ACTIVE; | 181 | q->rpm_status = RPM_ACTIVE; |
182 | pm_runtime_mark_last_busy(q->dev); | 182 | pm_runtime_mark_last_busy(q->dev); |
@@ -184,7 +184,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err) | |||
184 | } else { | 184 | } else { |
185 | q->rpm_status = RPM_SUSPENDED; | 185 | q->rpm_status = RPM_SUSPENDED; |
186 | } | 186 | } |
187 | spin_unlock_irq(q->queue_lock); | 187 | spin_unlock_irq(&q->queue_lock); |
188 | 188 | ||
189 | if (!err) | 189 | if (!err) |
190 | blk_clear_pm_only(q); | 190 | blk_clear_pm_only(q); |
@@ -207,10 +207,10 @@ EXPORT_SYMBOL(blk_post_runtime_resume); | |||
207 | */ | 207 | */ |
208 | void blk_set_runtime_active(struct request_queue *q) | 208 | void blk_set_runtime_active(struct request_queue *q) |
209 | { | 209 | { |
210 | spin_lock_irq(q->queue_lock); | 210 | spin_lock_irq(&q->queue_lock); |
211 | q->rpm_status = RPM_ACTIVE; | 211 | q->rpm_status = RPM_ACTIVE; |
212 | pm_runtime_mark_last_busy(q->dev); | 212 | pm_runtime_mark_last_busy(q->dev); |
213 | pm_request_autosuspend(q->dev); | 213 | pm_request_autosuspend(q->dev); |
214 | spin_unlock_irq(q->queue_lock); | 214 | spin_unlock_irq(&q->queue_lock); |
215 | } | 215 | } |
216 | EXPORT_SYMBOL(blk_set_runtime_active); | 216 | EXPORT_SYMBOL(blk_set_runtime_active); |
diff --git a/block/blk-pm.h b/block/blk-pm.h index a8564ea72a41..ea5507d23e75 100644 --- a/block/blk-pm.h +++ b/block/blk-pm.h | |||
@@ -21,7 +21,7 @@ static inline void blk_pm_mark_last_busy(struct request *rq) | |||
21 | 21 | ||
22 | static inline void blk_pm_requeue_request(struct request *rq) | 22 | static inline void blk_pm_requeue_request(struct request *rq) |
23 | { | 23 | { |
24 | lockdep_assert_held(rq->q->queue_lock); | 24 | lockdep_assert_held(&rq->q->queue_lock); |
25 | 25 | ||
26 | if (rq->q->dev && !(rq->rq_flags & RQF_PM)) | 26 | if (rq->q->dev && !(rq->rq_flags & RQF_PM)) |
27 | rq->q->nr_pending--; | 27 | rq->q->nr_pending--; |
@@ -30,7 +30,7 @@ static inline void blk_pm_requeue_request(struct request *rq) | |||
30 | static inline void blk_pm_add_request(struct request_queue *q, | 30 | static inline void blk_pm_add_request(struct request_queue *q, |
31 | struct request *rq) | 31 | struct request *rq) |
32 | { | 32 | { |
33 | lockdep_assert_held(q->queue_lock); | 33 | lockdep_assert_held(&q->queue_lock); |
34 | 34 | ||
35 | if (q->dev && !(rq->rq_flags & RQF_PM)) | 35 | if (q->dev && !(rq->rq_flags & RQF_PM)) |
36 | q->nr_pending++; | 36 | q->nr_pending++; |
@@ -38,7 +38,7 @@ static inline void blk_pm_add_request(struct request_queue *q, | |||
38 | 38 | ||
39 | static inline void blk_pm_put_request(struct request *rq) | 39 | static inline void blk_pm_put_request(struct request *rq) |
40 | { | 40 | { |
41 | lockdep_assert_held(rq->q->queue_lock); | 41 | lockdep_assert_held(&rq->q->queue_lock); |
42 | 42 | ||
43 | if (rq->q->dev && !(rq->rq_flags & RQF_PM)) | 43 | if (rq->q->dev && !(rq->rq_flags & RQF_PM)) |
44 | --rq->q->nr_pending; | 44 | --rq->q->nr_pending; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 22fd086eba9f..1e370207a20e 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -238,10 +238,10 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |||
238 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | 238 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
239 | return -EINVAL; | 239 | return -EINVAL; |
240 | 240 | ||
241 | spin_lock_irq(q->queue_lock); | 241 | spin_lock_irq(&q->queue_lock); |
242 | q->limits.max_sectors = max_sectors_kb << 1; | 242 | q->limits.max_sectors = max_sectors_kb << 1; |
243 | q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); | 243 | q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); |
244 | spin_unlock_irq(q->queue_lock); | 244 | spin_unlock_irq(&q->queue_lock); |
245 | 245 | ||
246 | return ret; | 246 | return ret; |
247 | } | 247 | } |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a665b0950369..d0a23f0bb3ed 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -1243,7 +1243,7 @@ static void throtl_pending_timer_fn(struct timer_list *t) | |||
1243 | bool dispatched; | 1243 | bool dispatched; |
1244 | int ret; | 1244 | int ret; |
1245 | 1245 | ||
1246 | spin_lock_irq(q->queue_lock); | 1246 | spin_lock_irq(&q->queue_lock); |
1247 | if (throtl_can_upgrade(td, NULL)) | 1247 | if (throtl_can_upgrade(td, NULL)) |
1248 | throtl_upgrade_state(td); | 1248 | throtl_upgrade_state(td); |
1249 | 1249 | ||
@@ -1266,9 +1266,9 @@ again: | |||
1266 | break; | 1266 | break; |
1267 | 1267 | ||
1268 | /* this dispatch windows is still open, relax and repeat */ | 1268 | /* this dispatch windows is still open, relax and repeat */ |
1269 | spin_unlock_irq(q->queue_lock); | 1269 | spin_unlock_irq(&q->queue_lock); |
1270 | cpu_relax(); | 1270 | cpu_relax(); |
1271 | spin_lock_irq(q->queue_lock); | 1271 | spin_lock_irq(&q->queue_lock); |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | if (!dispatched) | 1274 | if (!dispatched) |
@@ -1290,7 +1290,7 @@ again: | |||
1290 | queue_work(kthrotld_workqueue, &td->dispatch_work); | 1290 | queue_work(kthrotld_workqueue, &td->dispatch_work); |
1291 | } | 1291 | } |
1292 | out_unlock: | 1292 | out_unlock: |
1293 | spin_unlock_irq(q->queue_lock); | 1293 | spin_unlock_irq(&q->queue_lock); |
1294 | } | 1294 | } |
1295 | 1295 | ||
1296 | /** | 1296 | /** |
@@ -1314,11 +1314,11 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work) | |||
1314 | 1314 | ||
1315 | bio_list_init(&bio_list_on_stack); | 1315 | bio_list_init(&bio_list_on_stack); |
1316 | 1316 | ||
1317 | spin_lock_irq(q->queue_lock); | 1317 | spin_lock_irq(&q->queue_lock); |
1318 | for (rw = READ; rw <= WRITE; rw++) | 1318 | for (rw = READ; rw <= WRITE; rw++) |
1319 | while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) | 1319 | while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) |
1320 | bio_list_add(&bio_list_on_stack, bio); | 1320 | bio_list_add(&bio_list_on_stack, bio); |
1321 | spin_unlock_irq(q->queue_lock); | 1321 | spin_unlock_irq(&q->queue_lock); |
1322 | 1322 | ||
1323 | if (!bio_list_empty(&bio_list_on_stack)) { | 1323 | if (!bio_list_empty(&bio_list_on_stack)) { |
1324 | blk_start_plug(&plug); | 1324 | blk_start_plug(&plug); |
@@ -2141,7 +2141,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, | |||
2141 | if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw]) | 2141 | if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw]) |
2142 | goto out; | 2142 | goto out; |
2143 | 2143 | ||
2144 | spin_lock_irq(q->queue_lock); | 2144 | spin_lock_irq(&q->queue_lock); |
2145 | 2145 | ||
2146 | throtl_update_latency_buckets(td); | 2146 | throtl_update_latency_buckets(td); |
2147 | 2147 | ||
@@ -2224,7 +2224,7 @@ again: | |||
2224 | } | 2224 | } |
2225 | 2225 | ||
2226 | out_unlock: | 2226 | out_unlock: |
2227 | spin_unlock_irq(q->queue_lock); | 2227 | spin_unlock_irq(&q->queue_lock); |
2228 | out: | 2228 | out: |
2229 | bio_set_flag(bio, BIO_THROTTLED); | 2229 | bio_set_flag(bio, BIO_THROTTLED); |
2230 | 2230 | ||
@@ -2345,7 +2345,7 @@ static void tg_drain_bios(struct throtl_service_queue *parent_sq) | |||
2345 | * Dispatch all currently throttled bios on @q through ->make_request_fn(). | 2345 | * Dispatch all currently throttled bios on @q through ->make_request_fn(). |
2346 | */ | 2346 | */ |
2347 | void blk_throtl_drain(struct request_queue *q) | 2347 | void blk_throtl_drain(struct request_queue *q) |
2348 | __releases(q->queue_lock) __acquires(q->queue_lock) | 2348 | __releases(&q->queue_lock) __acquires(&q->queue_lock) |
2349 | { | 2349 | { |
2350 | struct throtl_data *td = q->td; | 2350 | struct throtl_data *td = q->td; |
2351 | struct blkcg_gq *blkg; | 2351 | struct blkcg_gq *blkg; |
@@ -2368,7 +2368,7 @@ void blk_throtl_drain(struct request_queue *q) | |||
2368 | tg_drain_bios(&td->service_queue); | 2368 | tg_drain_bios(&td->service_queue); |
2369 | 2369 | ||
2370 | rcu_read_unlock(); | 2370 | rcu_read_unlock(); |
2371 | spin_unlock_irq(q->queue_lock); | 2371 | spin_unlock_irq(&q->queue_lock); |
2372 | 2372 | ||
2373 | /* all bios now should be in td->service_queue, issue them */ | 2373 | /* all bios now should be in td->service_queue, issue them */ |
2374 | for (rw = READ; rw <= WRITE; rw++) | 2374 | for (rw = READ; rw <= WRITE; rw++) |
@@ -2376,7 +2376,7 @@ void blk_throtl_drain(struct request_queue *q) | |||
2376 | NULL))) | 2376 | NULL))) |
2377 | generic_make_request(bio); | 2377 | generic_make_request(bio); |
2378 | 2378 | ||
2379 | spin_lock_irq(q->queue_lock); | 2379 | spin_lock_irq(&q->queue_lock); |
2380 | } | 2380 | } |
2381 | 2381 | ||
2382 | int blk_throtl_init(struct request_queue *q) | 2382 | int blk_throtl_init(struct request_queue *q) |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index a8cfa011c284..eeb4be8d000b 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -2255,9 +2255,9 @@ static void request_done(int uptodate) | |||
2255 | DRS->maxtrack = 1; | 2255 | DRS->maxtrack = 1; |
2256 | 2256 | ||
2257 | /* unlock chained buffers */ | 2257 | /* unlock chained buffers */ |
2258 | spin_lock_irqsave(q->queue_lock, flags); | 2258 | spin_lock_irqsave(&q->queue_lock, flags); |
2259 | floppy_end_request(req, 0); | 2259 | floppy_end_request(req, 0); |
2260 | spin_unlock_irqrestore(q->queue_lock, flags); | 2260 | spin_unlock_irqrestore(&q->queue_lock, flags); |
2261 | } else { | 2261 | } else { |
2262 | if (rq_data_dir(req) == WRITE) { | 2262 | if (rq_data_dir(req) == WRITE) { |
2263 | /* record write error information */ | 2263 | /* record write error information */ |
@@ -2269,9 +2269,9 @@ static void request_done(int uptodate) | |||
2269 | DRWE->last_error_sector = blk_rq_pos(req); | 2269 | DRWE->last_error_sector = blk_rq_pos(req); |
2270 | DRWE->last_error_generation = DRS->generation; | 2270 | DRWE->last_error_generation = DRS->generation; |
2271 | } | 2271 | } |
2272 | spin_lock_irqsave(q->queue_lock, flags); | 2272 | spin_lock_irqsave(&q->queue_lock, flags); |
2273 | floppy_end_request(req, BLK_STS_IOERR); | 2273 | floppy_end_request(req, BLK_STS_IOERR); |
2274 | spin_unlock_irqrestore(q->queue_lock, flags); | 2274 | spin_unlock_irqrestore(&q->queue_lock, flags); |
2275 | } | 2275 | } |
2276 | } | 2276 | } |
2277 | 2277 | ||
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 9381f4e3b221..4adf4c8861cd 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -2203,9 +2203,9 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) | |||
2203 | * Some CDRW drives can not handle writes larger than one packet, | 2203 | * Some CDRW drives can not handle writes larger than one packet, |
2204 | * even if the size is a multiple of the packet size. | 2204 | * even if the size is a multiple of the packet size. |
2205 | */ | 2205 | */ |
2206 | spin_lock_irq(q->queue_lock); | 2206 | spin_lock_irq(&q->queue_lock); |
2207 | blk_queue_max_hw_sectors(q, pd->settings.size); | 2207 | blk_queue_max_hw_sectors(q, pd->settings.size); |
2208 | spin_unlock_irq(q->queue_lock); | 2208 | spin_unlock_irq(&q->queue_lock); |
2209 | set_bit(PACKET_WRITABLE, &pd->flags); | 2209 | set_bit(PACKET_WRITABLE, &pd->flags); |
2210 | } else { | 2210 | } else { |
2211 | pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); | 2211 | pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); |
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c index a8c53c98252d..51fe10ac02fa 100644 --- a/drivers/ide/ide-pm.c +++ b/drivers/ide/ide-pm.c | |||
@@ -44,15 +44,15 @@ static int ide_pm_execute_rq(struct request *rq) | |||
44 | { | 44 | { |
45 | struct request_queue *q = rq->q; | 45 | struct request_queue *q = rq->q; |
46 | 46 | ||
47 | spin_lock_irq(q->queue_lock); | 47 | spin_lock_irq(&q->queue_lock); |
48 | if (unlikely(blk_queue_dying(q))) { | 48 | if (unlikely(blk_queue_dying(q))) { |
49 | rq->rq_flags |= RQF_QUIET; | 49 | rq->rq_flags |= RQF_QUIET; |
50 | scsi_req(rq)->result = -ENXIO; | 50 | scsi_req(rq)->result = -ENXIO; |
51 | spin_unlock_irq(q->queue_lock); | 51 | spin_unlock_irq(&q->queue_lock); |
52 | blk_mq_end_request(rq, BLK_STS_OK); | 52 | blk_mq_end_request(rq, BLK_STS_OK); |
53 | return -ENXIO; | 53 | return -ENXIO; |
54 | } | 54 | } |
55 | spin_unlock_irq(q->queue_lock); | 55 | spin_unlock_irq(&q->queue_lock); |
56 | blk_execute_rq(q, NULL, rq, true); | 56 | blk_execute_rq(q, NULL, rq, true); |
57 | 57 | ||
58 | return scsi_req(rq)->result ? -EIO : 0; | 58 | return scsi_req(rq)->result ? -EIO : 0; |
@@ -214,12 +214,12 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) | |||
214 | printk("%s: completing PM request, %s\n", drive->name, | 214 | printk("%s: completing PM request, %s\n", drive->name, |
215 | (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume"); | 215 | (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume"); |
216 | #endif | 216 | #endif |
217 | spin_lock_irqsave(q->queue_lock, flags); | 217 | spin_lock_irqsave(&q->queue_lock, flags); |
218 | if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) | 218 | if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) |
219 | blk_mq_stop_hw_queues(q); | 219 | blk_mq_stop_hw_queues(q); |
220 | else | 220 | else |
221 | drive->dev_flags &= ~IDE_DFLAG_BLOCKED; | 221 | drive->dev_flags &= ~IDE_DFLAG_BLOCKED; |
222 | spin_unlock_irqrestore(q->queue_lock, flags); | 222 | spin_unlock_irqrestore(&q->queue_lock, flags); |
223 | 223 | ||
224 | drive->hwif->rq = NULL; | 224 | drive->hwif->rq = NULL; |
225 | 225 | ||
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 2c68efc603bd..a9e2e2037129 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h | |||
@@ -717,11 +717,11 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, | |||
717 | 717 | ||
718 | blkg = blkg_lookup(blkcg, q); | 718 | blkg = blkg_lookup(blkcg, q); |
719 | if (unlikely(!blkg)) { | 719 | if (unlikely(!blkg)) { |
720 | spin_lock_irq(q->queue_lock); | 720 | spin_lock_irq(&q->queue_lock); |
721 | blkg = blkg_lookup_create(blkcg, q); | 721 | blkg = blkg_lookup_create(blkcg, q); |
722 | if (IS_ERR(blkg)) | 722 | if (IS_ERR(blkg)) |
723 | blkg = NULL; | 723 | blkg = NULL; |
724 | spin_unlock_irq(q->queue_lock); | 724 | spin_unlock_irq(&q->queue_lock); |
725 | } | 725 | } |
726 | 726 | ||
727 | throtl = blk_throtl_bio(q, blkg, bio); | 727 | throtl = blk_throtl_bio(q, blkg, bio); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c4a3a660e3f0..1d185f1fc333 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -446,13 +446,7 @@ struct request_queue { | |||
446 | */ | 446 | */ |
447 | gfp_t bounce_gfp; | 447 | gfp_t bounce_gfp; |
448 | 448 | ||
449 | /* | 449 | spinlock_t queue_lock; |
450 | * protects queue structures from reentrancy. ->__queue_lock should | ||
451 | * _never_ be used directly, it is queue private. always use | ||
452 | * ->queue_lock. | ||
453 | */ | ||
454 | spinlock_t __queue_lock; | ||
455 | spinlock_t *queue_lock; | ||
456 | 450 | ||
457 | /* | 451 | /* |
458 | * queue kobject | 452 | * queue kobject |