aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@wdc.com>2018-03-07 20:10:12 -0500
committerJens Axboe <axboe@kernel.dk>2018-03-08 16:13:48 -0500
commit8a0ac14b8da9b86cfbe7aace40c8d485ed5c5b97 (patch)
tree4c832e2015ec68156744fa4497c0a7531ab3cace /include/linux/blkdev.h
parent1db2008b79a32db2ad41338c6c74c4735cf74f6d (diff)
block: Move the queue_flag_*() functions from a public into a private header file
This patch helps to avoid that new code gets introduced in block drivers that manipulates queue flags without holding the queue lock when that lock should be held. Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Ming Lei <ming.lei@redhat.com> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h69
1 files changed, 0 insertions, 69 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 888c9b25cb8f..19eaf8d89368 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -712,75 +712,6 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
712bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 712bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
713bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); 713bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
714 714
715/*
716 * @q->queue_lock is set while a queue is being initialized. Since we know
717 * that no other threads access the queue object before @q->queue_lock has
718 * been set, it is safe to manipulate queue flags without holding the
719 * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
720 * blk_init_allocated_queue().
721 */
722static inline void queue_lockdep_assert_held(struct request_queue *q)
723{
724 if (q->queue_lock)
725 lockdep_assert_held(q->queue_lock);
726}
727
728static inline void queue_flag_set_unlocked(unsigned int flag,
729 struct request_queue *q)
730{
731 if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
732 kref_read(&q->kobj.kref))
733 lockdep_assert_held(q->queue_lock);
734 __set_bit(flag, &q->queue_flags);
735}
736
737static inline void queue_flag_clear_unlocked(unsigned int flag,
738 struct request_queue *q)
739{
740 if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
741 kref_read(&q->kobj.kref))
742 lockdep_assert_held(q->queue_lock);
743 __clear_bit(flag, &q->queue_flags);
744}
745
746static inline int queue_flag_test_and_clear(unsigned int flag,
747 struct request_queue *q)
748{
749 queue_lockdep_assert_held(q);
750
751 if (test_bit(flag, &q->queue_flags)) {
752 __clear_bit(flag, &q->queue_flags);
753 return 1;
754 }
755
756 return 0;
757}
758
759static inline int queue_flag_test_and_set(unsigned int flag,
760 struct request_queue *q)
761{
762 queue_lockdep_assert_held(q);
763
764 if (!test_bit(flag, &q->queue_flags)) {
765 __set_bit(flag, &q->queue_flags);
766 return 0;
767 }
768
769 return 1;
770}
771
772static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
773{
774 queue_lockdep_assert_held(q);
775 __set_bit(flag, &q->queue_flags);
776}
777
778static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
779{
780 queue_lockdep_assert_held(q);
781 __clear_bit(flag, &q->queue_flags);
782}
783
784#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 715#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
785#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 716#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
786#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 717#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)