summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@wdc.com>2018-03-07 20:10:10 -0500
committerJens Axboe <axboe@kernel.dk>2018-03-08 16:13:48 -0500
commit8b904b5b6b58b9a29dcf3f82d936d9e7fd69fda6 (patch)
treec5ba5ff20820c410a422e7461f400b0546111916 /drivers/md
parentbf3a2b310ea35ae2f641bb734892574bd820d4a5 (diff)
block: Use blk_queue_flag_*() in drivers instead of queue_flag_*()
This patch has been generated as follows: for verb in set_unlocked clear_unlocked set clear; do replace-in-files queue_flag_${verb} blk_queue_flag_${verb%_unlocked} \ $(git grep -lw queue_flag_${verb} drivers block/bsg*) done Except for protecting all queue flag changes with the queue lock this patch does not change any functionality. Cc: Mike Snitzer <snitzer@redhat.com> Cc: Shaohua Li <shli@fb.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Ming Lei <ming.lei@redhat.com> Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Acked-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-table.c16
-rw-r--r--drivers/md/md-linear.c4
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/raid0.c4
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c4
7 files changed, 22 insertions, 22 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 5fe7ec356c33..54c39ad4ef01 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1861,7 +1861,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1861 q->limits = *limits; 1861 q->limits = *limits;
1862 1862
1863 if (!dm_table_supports_discards(t)) { 1863 if (!dm_table_supports_discards(t)) {
1864 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 1864 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
1865 /* Must also clear discard limits... */ 1865 /* Must also clear discard limits... */
1866 q->limits.max_discard_sectors = 0; 1866 q->limits.max_discard_sectors = 0;
1867 q->limits.max_hw_discard_sectors = 0; 1867 q->limits.max_hw_discard_sectors = 0;
@@ -1869,7 +1869,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1869 q->limits.discard_alignment = 0; 1869 q->limits.discard_alignment = 0;
1870 q->limits.discard_misaligned = 0; 1870 q->limits.discard_misaligned = 0;
1871 } else 1871 } else
1872 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 1872 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1873 1873
1874 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { 1874 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1875 wc = true; 1875 wc = true;
@@ -1879,15 +1879,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1879 blk_queue_write_cache(q, wc, fua); 1879 blk_queue_write_cache(q, wc, fua);
1880 1880
1881 if (dm_table_supports_dax(t)) 1881 if (dm_table_supports_dax(t))
1882 queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); 1882 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1883 if (dm_table_supports_dax_write_cache(t)) 1883 if (dm_table_supports_dax_write_cache(t))
1884 dax_write_cache(t->md->dax_dev, true); 1884 dax_write_cache(t->md->dax_dev, true);
1885 1885
1886 /* Ensure that all underlying devices are non-rotational. */ 1886 /* Ensure that all underlying devices are non-rotational. */
1887 if (dm_table_all_devices_attribute(t, device_is_nonrot)) 1887 if (dm_table_all_devices_attribute(t, device_is_nonrot))
1888 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1888 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
1889 else 1889 else
1890 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); 1890 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
1891 1891
1892 if (!dm_table_supports_write_same(t)) 1892 if (!dm_table_supports_write_same(t))
1893 q->limits.max_write_same_sectors = 0; 1893 q->limits.max_write_same_sectors = 0;
@@ -1895,9 +1895,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1895 q->limits.max_write_zeroes_sectors = 0; 1895 q->limits.max_write_zeroes_sectors = 0;
1896 1896
1897 if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) 1897 if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
1898 queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 1898 blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
1899 else 1899 else
1900 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 1900 blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
1901 1901
1902 dm_table_verify_integrity(t); 1902 dm_table_verify_integrity(t);
1903 1903
@@ -1908,7 +1908,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1908 * have it set. 1908 * have it set.
1909 */ 1909 */
1910 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) 1910 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1911 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); 1911 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
1912} 1912}
1913 1913
1914unsigned int dm_table_get_num_targets(struct dm_table *t) 1914unsigned int dm_table_get_num_targets(struct dm_table *t)
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 773fc70dced7..4964323d936b 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -138,9 +138,9 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
138 } 138 }
139 139
140 if (!discard_supported) 140 if (!discard_supported)
141 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 141 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
142 else 142 else
143 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 143 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
144 144
145 /* 145 /*
146 * Here we calculate the device offsets. 146 * Here we calculate the device offsets.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index eba7fa2f0abb..de2b26fba5d8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5608,9 +5608,9 @@ int md_run(struct mddev *mddev)
5608 if (mddev->degraded) 5608 if (mddev->degraded)
5609 nonrot = false; 5609 nonrot = false;
5610 if (nonrot) 5610 if (nonrot)
5611 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue); 5611 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
5612 else 5612 else
5613 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue); 5613 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
5614 mddev->queue->backing_dev_info->congested_data = mddev; 5614 mddev->queue->backing_dev_info->congested_data = mddev;
5615 mddev->queue->backing_dev_info->congested_fn = md_congested; 5615 mddev->queue->backing_dev_info->congested_fn = md_congested;
5616 } 5616 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 5ecba9eef441..584c10347267 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev)
399 discard_supported = true; 399 discard_supported = true;
400 } 400 }
401 if (!discard_supported) 401 if (!discard_supported)
402 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 402 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
403 else 403 else
404 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 404 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
405 } 405 }
406 406
407 /* calculate array device size */ 407 /* calculate array device size */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index b2eae332e1a2..f1635eb9e95a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1760,7 +1760,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1760 } 1760 }
1761 } 1761 }
1762 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 1762 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1763 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 1763 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1764 print_conf(conf); 1764 print_conf(conf);
1765 return err; 1765 return err;
1766} 1766}
@@ -3099,10 +3099,10 @@ static int raid1_run(struct mddev *mddev)
3099 3099
3100 if (mddev->queue) { 3100 if (mddev->queue) {
3101 if (discard_supported) 3101 if (discard_supported)
3102 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 3102 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3103 mddev->queue); 3103 mddev->queue);
3104 else 3104 else
3105 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 3105 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3106 mddev->queue); 3106 mddev->queue);
3107 } 3107 }
3108 3108
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 99c9207899a7..e9c409c5f344 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1845,7 +1845,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1845 break; 1845 break;
1846 } 1846 }
1847 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 1847 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1848 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 1848 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1849 1849
1850 print_conf(conf); 1850 print_conf(conf);
1851 return err; 1851 return err;
@@ -3844,10 +3844,10 @@ static int raid10_run(struct mddev *mddev)
3844 3844
3845 if (mddev->queue) { 3845 if (mddev->queue) {
3846 if (discard_supported) 3846 if (discard_supported)
3847 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 3847 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3848 mddev->queue); 3848 mddev->queue);
3849 else 3849 else
3850 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 3850 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3851 mddev->queue); 3851 mddev->queue);
3852 } 3852 }
3853 /* need to check that every block has at least one working mirror */ 3853 /* need to check that every block has at least one working mirror */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 50d01144b805..14714b23a2fa 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7444,10 +7444,10 @@ static int raid5_run(struct mddev *mddev)
7444 if (devices_handle_discard_safely && 7444 if (devices_handle_discard_safely &&
7445 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && 7445 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
7446 mddev->queue->limits.discard_granularity >= stripe) 7446 mddev->queue->limits.discard_granularity >= stripe)
7447 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 7447 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
7448 mddev->queue); 7448 mddev->queue);
7449 else 7449 else
7450 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 7450 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
7451 mddev->queue); 7451 mddev->queue);
7452 7452
7453 blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); 7453 blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);