summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-block10
-rw-r--r--Documentation/block/queue-sysfs.txt5
-rw-r--r--block/blk-lib.c7
-rw-r--r--block/blk-settings.c3
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/compat_ioctl.c2
-rw-r--r--block/ioctl.c2
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_nl.c7
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1
-rw-r--r--drivers/block/nbd.c1
-rw-r--r--drivers/md/dm-cache-target.c1
-rw-r--r--drivers/md/dm-crypt.c1
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-raid1.c1
-rw-r--r--drivers/md/dm-table.c19
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/raid5.c50
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/target/target_core_device.c2
-rw-r--r--include/linux/blkdev.h15
-rw-r--r--include/linux/device-mapper.h5
23 files changed, 27 insertions, 124 deletions
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 2da04ce6aeef..dea212db9df3 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -213,14 +213,8 @@ What: /sys/block/<disk>/queue/discard_zeroes_data
213Date: May 2011 213Date: May 2011
214Contact: Martin K. Petersen <martin.petersen@oracle.com> 214Contact: Martin K. Petersen <martin.petersen@oracle.com>
215Description: 215Description:
216 Devices that support discard functionality may return 216 Will always return 0. Don't rely on any specific behavior
217 stale or random data when a previously discarded block 217 for discards, and don't read this file.
218 is read back. This can cause problems if the filesystem
219 expects discarded blocks to be explicitly cleared. If a
220 device reports that it deterministically returns zeroes
221 when a discarded area is read the discard_zeroes_data
222 parameter will be set to one. Otherwise it will be 0 and
223 the result of reading a discarded area is undefined.
224 218
225What: /sys/block/<disk>/queue/write_same_max_bytes 219What: /sys/block/<disk>/queue/write_same_max_bytes
226Date: January 2012 220Date: January 2012
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index b7f6bdc96d73..2c1e67058fd3 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -43,11 +43,6 @@ large discards are issued, setting this value lower will make Linux issue
43smaller discards and potentially help reduce latencies induced by large 43smaller discards and potentially help reduce latencies induced by large
44discard operations. 44discard operations.
45 45
46discard_zeroes_data (RO)
47------------------------
48When read, this file will show if the discarded block are zeroed by the
49device or not. If its value is '1' the blocks are zeroed otherwise not.
50
51hw_sector_size (RO) 46hw_sector_size (RO)
52------------------- 47-------------------
53This is the hardware sector size of the device, in bytes. 48This is the hardware sector size of the device, in bytes.
diff --git a/block/blk-lib.c b/block/blk-lib.c
index b0c6c4bcf441..e8caecd71688 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -37,17 +37,12 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
37 return -ENXIO; 37 return -ENXIO;
38 38
39 if (flags & BLKDEV_DISCARD_SECURE) { 39 if (flags & BLKDEV_DISCARD_SECURE) {
40 if (flags & BLKDEV_DISCARD_ZERO)
41 return -EOPNOTSUPP;
42 if (!blk_queue_secure_erase(q)) 40 if (!blk_queue_secure_erase(q))
43 return -EOPNOTSUPP; 41 return -EOPNOTSUPP;
44 op = REQ_OP_SECURE_ERASE; 42 op = REQ_OP_SECURE_ERASE;
45 } else { 43 } else {
46 if (!blk_queue_discard(q)) 44 if (!blk_queue_discard(q))
47 return -EOPNOTSUPP; 45 return -EOPNOTSUPP;
48 if ((flags & BLKDEV_DISCARD_ZERO) &&
49 !q->limits.discard_zeroes_data)
50 return -EOPNOTSUPP;
51 op = REQ_OP_DISCARD; 46 op = REQ_OP_DISCARD;
52 } 47 }
53 48
@@ -126,7 +121,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
126 &bio); 121 &bio);
127 if (!ret && bio) { 122 if (!ret && bio) {
128 ret = submit_bio_wait(bio); 123 ret = submit_bio_wait(bio);
129 if (ret == -EOPNOTSUPP && !(flags & BLKDEV_DISCARD_ZERO)) 124 if (ret == -EOPNOTSUPP)
130 ret = 0; 125 ret = 0;
131 bio_put(bio); 126 bio_put(bio);
132 } 127 }
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 1e7174ffc9d4..4fa81ed383ca 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -103,7 +103,6 @@ void blk_set_default_limits(struct queue_limits *lim)
103 lim->discard_granularity = 0; 103 lim->discard_granularity = 0;
104 lim->discard_alignment = 0; 104 lim->discard_alignment = 0;
105 lim->discard_misaligned = 0; 105 lim->discard_misaligned = 0;
106 lim->discard_zeroes_data = 0;
107 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 106 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
108 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); 107 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
109 lim->alignment_offset = 0; 108 lim->alignment_offset = 0;
@@ -127,7 +126,6 @@ void blk_set_stacking_limits(struct queue_limits *lim)
127 blk_set_default_limits(lim); 126 blk_set_default_limits(lim);
128 127
129 /* Inherit limits from component devices */ 128 /* Inherit limits from component devices */
130 lim->discard_zeroes_data = 1;
131 lim->max_segments = USHRT_MAX; 129 lim->max_segments = USHRT_MAX;
132 lim->max_discard_segments = 1; 130 lim->max_discard_segments = 1;
133 lim->max_hw_sectors = UINT_MAX; 131 lim->max_hw_sectors = UINT_MAX;
@@ -609,7 +607,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
609 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 607 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
610 608
611 t->cluster &= b->cluster; 609 t->cluster &= b->cluster;
612 t->discard_zeroes_data &= b->discard_zeroes_data;
613 610
614 /* Physical block size a multiple of the logical block size? */ 611 /* Physical block size a multiple of the logical block size? */
615 if (t->physical_block_size & (t->logical_block_size - 1)) { 612 if (t->physical_block_size & (t->logical_block_size - 1)) {
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index c47db43a40cc..fc20489f0d2b 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -208,7 +208,7 @@ static ssize_t queue_discard_max_store(struct request_queue *q,
208 208
209static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 209static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
210{ 210{
211 return queue_var_show(queue_discard_zeroes_data(q), page); 211 return queue_var_show(0, page);
212} 212}
213 213
214static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 214static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 570021a0dc1c..04325b81c2b4 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -685,7 +685,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
685 case BLKALIGNOFF: 685 case BLKALIGNOFF:
686 return compat_put_int(arg, bdev_alignment_offset(bdev)); 686 return compat_put_int(arg, bdev_alignment_offset(bdev));
687 case BLKDISCARDZEROES: 687 case BLKDISCARDZEROES:
688 return compat_put_uint(arg, bdev_discard_zeroes_data(bdev)); 688 return compat_put_uint(arg, 0);
689 case BLKFLSBUF: 689 case BLKFLSBUF:
690 case BLKROSET: 690 case BLKROSET:
691 case BLKDISCARD: 691 case BLKDISCARD:
diff --git a/block/ioctl.c b/block/ioctl.c
index 8ea00a41be01..0de02ee67eed 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -547,7 +547,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
547 case BLKALIGNOFF: 547 case BLKALIGNOFF:
548 return put_int(arg, bdev_alignment_offset(bdev)); 548 return put_int(arg, bdev_alignment_offset(bdev));
549 case BLKDISCARDZEROES: 549 case BLKDISCARDZEROES:
550 return put_uint(arg, bdev_discard_zeroes_data(bdev)); 550 return put_uint(arg, 0);
551 case BLKSECTGET: 551 case BLKSECTGET:
552 max_sectors = min_t(unsigned int, USHRT_MAX, 552 max_sectors = min_t(unsigned int, USHRT_MAX,
553 queue_max_sectors(bdev_get_queue(bdev))); 553 queue_max_sectors(bdev_get_queue(bdev)));
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 8e62d9f65510..84455c365f57 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -931,7 +931,6 @@ void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct r
931 p->qlim->io_min = cpu_to_be32(queue_io_min(q)); 931 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
932 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); 932 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
933 p->qlim->discard_enabled = blk_queue_discard(q); 933 p->qlim->discard_enabled = blk_queue_discard(q);
934 p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q);
935 p->qlim->write_same_capable = !!q->limits.max_write_same_sectors; 934 p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
936 } else { 935 } else {
937 q = device->rq_queue; 936 q = device->rq_queue;
@@ -941,7 +940,6 @@ void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct r
941 p->qlim->io_min = cpu_to_be32(queue_io_min(q)); 940 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
942 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); 941 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
943 p->qlim->discard_enabled = 0; 942 p->qlim->discard_enabled = 0;
944 p->qlim->discard_zeroes_data = 0;
945 p->qlim->write_same_capable = 0; 943 p->qlim->write_same_capable = 0;
946 } 944 }
947} 945}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index e4516d3b971d..02255a0d68b9 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1199,10 +1199,6 @@ static void decide_on_discard_support(struct drbd_device *device,
1199 struct drbd_connection *connection = first_peer_device(device)->connection; 1199 struct drbd_connection *connection = first_peer_device(device)->connection;
1200 bool can_do = b ? blk_queue_discard(b) : true; 1200 bool can_do = b ? blk_queue_discard(b) : true;
1201 1201
1202 if (can_do && b && !b->limits.discard_zeroes_data && !discard_zeroes_if_aligned) {
1203 can_do = false;
1204 drbd_info(device, "discard_zeroes_data=0 and discard_zeroes_if_aligned=no: disabling discards\n");
1205 }
1206 if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) { 1202 if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
1207 can_do = false; 1203 can_do = false;
1208 drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n"); 1204 drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
@@ -1484,8 +1480,7 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis
1484 if (disk_conf->al_extents > drbd_al_extents_max(nbc)) 1480 if (disk_conf->al_extents > drbd_al_extents_max(nbc))
1485 disk_conf->al_extents = drbd_al_extents_max(nbc); 1481 disk_conf->al_extents = drbd_al_extents_max(nbc);
1486 1482
1487 if (!blk_queue_discard(q) 1483 if (!blk_queue_discard(q)) {
1488 || (!q->limits.discard_zeroes_data && !disk_conf->discard_zeroes_if_aligned)) {
1489 if (disk_conf->rs_discard_granularity) { 1484 if (disk_conf->rs_discard_granularity) {
1490 disk_conf->rs_discard_granularity = 0; /* disable feature */ 1485 disk_conf->rs_discard_granularity = 0; /* disable feature */
1491 drbd_info(device, "rs_discard_granularity feature disabled\n"); 1486 drbd_info(device, "rs_discard_granularity feature disabled\n");
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 3bb04c1a4ba1..3081d83d2ea3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -828,7 +828,6 @@ static void loop_config_discard(struct loop_device *lo)
828 q->limits.discard_alignment = 0; 828 q->limits.discard_alignment = 0;
829 blk_queue_max_discard_sectors(q, 0); 829 blk_queue_max_discard_sectors(q, 0);
830 blk_queue_max_write_zeroes_sectors(q, 0); 830 blk_queue_max_write_zeroes_sectors(q, 0);
831 q->limits.discard_zeroes_data = 0;
832 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 831 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
833 return; 832 return;
834 } 833 }
@@ -837,7 +836,6 @@ static void loop_config_discard(struct loop_device *lo)
837 q->limits.discard_alignment = 0; 836 q->limits.discard_alignment = 0;
838 blk_queue_max_discard_sectors(q, UINT_MAX >> 9); 837 blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
839 blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); 838 blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
840 q->limits.discard_zeroes_data = 1;
841 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 839 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
842} 840}
843 841
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 30076e7753bc..05e3e664ea1b 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -4025,7 +4025,6 @@ skip_create_disk:
4025 dd->queue->limits.discard_granularity = 4096; 4025 dd->queue->limits.discard_granularity = 4096;
4026 blk_queue_max_discard_sectors(dd->queue, 4026 blk_queue_max_discard_sectors(dd->queue,
4027 MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); 4027 MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES);
4028 dd->queue->limits.discard_zeroes_data = 0;
4029 } 4028 }
4030 4029
4031 /* Set the capacity of the device in 512 byte sectors. */ 4030 /* Set the capacity of the device in 512 byte sectors. */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 03ae72985c79..b02f2362fdf7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1110,7 +1110,6 @@ static int nbd_dev_add(int index)
1110 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); 1110 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1111 disk->queue->limits.discard_granularity = 512; 1111 disk->queue->limits.discard_granularity = 512;
1112 blk_queue_max_discard_sectors(disk->queue, UINT_MAX); 1112 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1113 disk->queue->limits.discard_zeroes_data = 0;
1114 blk_queue_max_hw_sectors(disk->queue, 65536); 1113 blk_queue_max_hw_sectors(disk->queue, 65536);
1115 disk->queue->limits.max_sectors = 256; 1114 disk->queue->limits.max_sectors = 256;
1116 1115
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9c689b34e6e7..975922c8f231 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2773,7 +2773,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2773 2773
2774 ti->num_discard_bios = 1; 2774 ti->num_discard_bios = 1;
2775 ti->discards_supported = true; 2775 ti->discards_supported = true;
2776 ti->discard_zeroes_data_unsupported = true;
2777 ti->split_discard_bios = false; 2776 ti->split_discard_bios = false;
2778 2777
2779 cache->features = ca->features; 2778 cache->features = ca->features;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 389a3637ffcc..ef1d836bd81b 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2030,7 +2030,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2030 wake_up_process(cc->write_thread); 2030 wake_up_process(cc->write_thread);
2031 2031
2032 ti->num_flush_bios = 1; 2032 ti->num_flush_bios = 1;
2033 ti->discard_zeroes_data_unsupported = true;
2034 2033
2035 return 0; 2034 return 0;
2036 2035
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index f8564d63982f..468f1380de1d 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2813,7 +2813,9 @@ static void configure_discard_support(struct raid_set *rs)
2813 /* Assume discards not supported until after checks below. */ 2813 /* Assume discards not supported until after checks below. */
2814 ti->discards_supported = false; 2814 ti->discards_supported = false;
2815 2815
2816 /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */ 2816 /*
2817 * XXX: RAID level 4,5,6 require zeroing for safety.
2818 */
2817 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); 2819 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
2818 2820
2819 for (i = 0; i < rs->raid_disks; i++) { 2821 for (i = 0; i < rs->raid_disks; i++) {
@@ -2827,8 +2829,6 @@ static void configure_discard_support(struct raid_set *rs)
2827 return; 2829 return;
2828 2830
2829 if (raid456) { 2831 if (raid456) {
2830 if (!q->limits.discard_zeroes_data)
2831 return;
2832 if (!devices_handle_discard_safely) { 2832 if (!devices_handle_discard_safely) {
2833 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty."); 2833 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
2834 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override."); 2834 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 2ddc2d20e62d..a95cbb80fb34 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1124,7 +1124,6 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1124 ti->num_flush_bios = 1; 1124 ti->num_flush_bios = 1;
1125 ti->num_discard_bios = 1; 1125 ti->num_discard_bios = 1;
1126 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record); 1126 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
1127 ti->discard_zeroes_data_unsupported = true;
1128 1127
1129 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); 1128 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
1130 if (!ms->kmirrord_wq) { 1129 if (!ms->kmirrord_wq) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 5cd665c91ead..958275aca008 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1449,22 +1449,6 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1449 return false; 1449 return false;
1450} 1450}
1451 1451
1452static bool dm_table_discard_zeroes_data(struct dm_table *t)
1453{
1454 struct dm_target *ti;
1455 unsigned i = 0;
1456
1457 /* Ensure that all targets supports discard_zeroes_data. */
1458 while (i < dm_table_get_num_targets(t)) {
1459 ti = dm_table_get_target(t, i++);
1460
1461 if (ti->discard_zeroes_data_unsupported)
1462 return false;
1463 }
1464
1465 return true;
1466}
1467
1468static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, 1452static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1469 sector_t start, sector_t len, void *data) 1453 sector_t start, sector_t len, void *data)
1470{ 1454{
@@ -1620,9 +1604,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1620 } 1604 }
1621 blk_queue_write_cache(q, wc, fua); 1605 blk_queue_write_cache(q, wc, fua);
1622 1606
1623 if (!dm_table_discard_zeroes_data(t))
1624 q->limits.discard_zeroes_data = 0;
1625
1626 /* Ensure that all underlying devices are non-rotational. */ 1607 /* Ensure that all underlying devices are non-rotational. */
1627 if (dm_table_all_devices_attribute(t, device_is_nonrot)) 1608 if (dm_table_all_devices_attribute(t, device_is_nonrot))
1628 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1609 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2b266a2b5035..a5f1916f621a 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3263,7 +3263,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
3263 * them down to the data device. The thin device's discard 3263 * them down to the data device. The thin device's discard
3264 * processing will cause mappings to be removed from the btree. 3264 * processing will cause mappings to be removed from the btree.
3265 */ 3265 */
3266 ti->discard_zeroes_data_unsupported = true;
3267 if (pf.discard_enabled && pf.discard_passdown) { 3266 if (pf.discard_enabled && pf.discard_passdown) {
3268 ti->num_discard_bios = 1; 3267 ti->num_discard_bios = 1;
3269 3268
@@ -4119,7 +4118,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
4119 ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); 4118 ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
4120 4119
4121 /* In case the pool supports discards, pass them on. */ 4120 /* In case the pool supports discards, pass them on. */
4122 ti->discard_zeroes_data_unsupported = true;
4123 if (tc->pool->pf.discard_enabled) { 4121 if (tc->pool->pf.discard_enabled) {
4124 ti->discards_supported = true; 4122 ti->discards_supported = true;
4125 ti->num_discard_bios = 1; 4123 ti->num_discard_bios = 1;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 1725a54042bb..2efdb0d67460 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7227,7 +7227,6 @@ static int raid5_run(struct mddev *mddev)
7227 7227
7228 if (mddev->queue) { 7228 if (mddev->queue) {
7229 int chunk_size; 7229 int chunk_size;
7230 bool discard_supported = true;
7231 /* read-ahead size must cover two whole stripes, which 7230 /* read-ahead size must cover two whole stripes, which
7232 * is 2 * (datadisks) * chunksize where 'n' is the 7231 * is 2 * (datadisks) * chunksize where 'n' is the
7233 * number of raid devices 7232 * number of raid devices
@@ -7263,12 +7262,6 @@ static int raid5_run(struct mddev *mddev)
7263 blk_queue_max_discard_sectors(mddev->queue, 7262 blk_queue_max_discard_sectors(mddev->queue,
7264 0xfffe * STRIPE_SECTORS); 7263 0xfffe * STRIPE_SECTORS);
7265 7264
7266 /*
7267 * unaligned part of discard request will be ignored, so can't
7268 * guarantee discard_zeroes_data
7269 */
7270 mddev->queue->limits.discard_zeroes_data = 0;
7271
7272 blk_queue_max_write_same_sectors(mddev->queue, 0); 7265 blk_queue_max_write_same_sectors(mddev->queue, 0);
7273 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); 7266 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
7274 7267
@@ -7277,35 +7270,24 @@ static int raid5_run(struct mddev *mddev)
7277 rdev->data_offset << 9); 7270 rdev->data_offset << 9);
7278 disk_stack_limits(mddev->gendisk, rdev->bdev, 7271 disk_stack_limits(mddev->gendisk, rdev->bdev,
7279 rdev->new_data_offset << 9); 7272 rdev->new_data_offset << 9);
7280 /*
7281 * discard_zeroes_data is required, otherwise data
7282 * could be lost. Consider a scenario: discard a stripe
7283 * (the stripe could be inconsistent if
7284 * discard_zeroes_data is 0); write one disk of the
7285 * stripe (the stripe could be inconsistent again
7286 * depending on which disks are used to calculate
7287 * parity); the disk is broken; The stripe data of this
7288 * disk is lost.
7289 */
7290 if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
7291 !bdev_get_queue(rdev->bdev)->
7292 limits.discard_zeroes_data)
7293 discard_supported = false;
7294 /* Unfortunately, discard_zeroes_data is not currently
7295 * a guarantee - just a hint. So we only allow DISCARD
7296 * if the sysadmin has confirmed that only safe devices
7297 * are in use by setting a module parameter.
7298 */
7299 if (!devices_handle_discard_safely) {
7300 if (discard_supported) {
7301 pr_info("md/raid456: discard support disabled due to uncertainty.\n");
7302 pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
7303 }
7304 discard_supported = false;
7305 }
7306 } 7273 }
7307 7274
7308 if (discard_supported && 7275 /*
7276 * zeroing is required, otherwise data
7277 * could be lost. Consider a scenario: discard a stripe
7278 * (the stripe could be inconsistent if
7279 * discard_zeroes_data is 0); write one disk of the
7280 * stripe (the stripe could be inconsistent again
7281 * depending on which disks are used to calculate
7282 * parity); the disk is broken; The stripe data of this
7283 * disk is lost.
7284 *
7285 * We only allow DISCARD if the sysadmin has confirmed that
7286 * only safe devices are in use by setting a module parameter.
7287 * A better idea might be to turn DISCARD into WRITE_ZEROES
7288 * requests, as that is required to be safe.
7289 */
7290 if (devices_handle_discard_safely &&
7309 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && 7291 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
7310 mddev->queue->limits.discard_granularity >= stripe) 7292 mddev->queue->limits.discard_granularity >= stripe)
7311 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 7293 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 001593ed0444..bcb0cb020fd2 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -644,8 +644,6 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
644 unsigned int logical_block_size = sdkp->device->sector_size; 644 unsigned int logical_block_size = sdkp->device->sector_size;
645 unsigned int max_blocks = 0; 645 unsigned int max_blocks = 0;
646 646
647 q->limits.discard_zeroes_data = 0;
648
649 /* 647 /*
650 * When LBPRZ is reported, discard alignment and granularity 648 * When LBPRZ is reported, discard alignment and granularity
651 * must be fixed to the logical block size. Otherwise the block 649 * must be fixed to the logical block size. Otherwise the block
@@ -681,19 +679,16 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
681 case SD_LBP_WS16: 679 case SD_LBP_WS16:
682 max_blocks = min_not_zero(sdkp->max_ws_blocks, 680 max_blocks = min_not_zero(sdkp->max_ws_blocks,
683 (u32)SD_MAX_WS16_BLOCKS); 681 (u32)SD_MAX_WS16_BLOCKS);
684 q->limits.discard_zeroes_data = sdkp->lbprz;
685 break; 682 break;
686 683
687 case SD_LBP_WS10: 684 case SD_LBP_WS10:
688 max_blocks = min_not_zero(sdkp->max_ws_blocks, 685 max_blocks = min_not_zero(sdkp->max_ws_blocks,
689 (u32)SD_MAX_WS10_BLOCKS); 686 (u32)SD_MAX_WS10_BLOCKS);
690 q->limits.discard_zeroes_data = sdkp->lbprz;
691 break; 687 break;
692 688
693 case SD_LBP_ZERO: 689 case SD_LBP_ZERO:
694 max_blocks = min_not_zero(sdkp->max_ws_blocks, 690 max_blocks = min_not_zero(sdkp->max_ws_blocks,
695 (u32)SD_MAX_WS10_BLOCKS); 691 (u32)SD_MAX_WS10_BLOCKS);
696 q->limits.discard_zeroes_data = 1;
697 break; 692 break;
698 } 693 }
699 694
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index c754ae33bf7b..d2f089cfa9ae 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -851,7 +851,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
851 attrib->unmap_granularity = q->limits.discard_granularity / block_size; 851 attrib->unmap_granularity = q->limits.discard_granularity / block_size;
852 attrib->unmap_granularity_alignment = q->limits.discard_alignment / 852 attrib->unmap_granularity_alignment = q->limits.discard_alignment /
853 block_size; 853 block_size;
854 attrib->unmap_zeroes_data = q->limits.discard_zeroes_data; 854 attrib->unmap_zeroes_data = 0;
855 return true; 855 return true;
856} 856}
857EXPORT_SYMBOL(target_configure_unmap_from_queue); 857EXPORT_SYMBOL(target_configure_unmap_from_queue);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 21a30f011674..ec993573e0a8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -339,7 +339,6 @@ struct queue_limits {
339 unsigned char misaligned; 339 unsigned char misaligned;
340 unsigned char discard_misaligned; 340 unsigned char discard_misaligned;
341 unsigned char cluster; 341 unsigned char cluster;
342 unsigned char discard_zeroes_data;
343 unsigned char raid_partial_stripes_expensive; 342 unsigned char raid_partial_stripes_expensive;
344 enum blk_zoned_model zoned; 343 enum blk_zoned_model zoned;
345}; 344};
@@ -1341,7 +1340,6 @@ extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1341 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1340 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1342 1341
1343#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ 1342#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
1344#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
1345 1343
1346extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1344extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1347 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1345 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
@@ -1541,19 +1539,6 @@ static inline int bdev_discard_alignment(struct block_device *bdev)
1541 return q->limits.discard_alignment; 1539 return q->limits.discard_alignment;
1542} 1540}
1543 1541
1544static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1545{
1546 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
1547 return 1;
1548
1549 return 0;
1550}
1551
1552static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1553{
1554 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1555}
1556
1557static inline unsigned int bdev_write_same(struct block_device *bdev) 1542static inline unsigned int bdev_write_same(struct block_device *bdev)
1558{ 1543{
1559 struct request_queue *q = bdev_get_queue(bdev); 1544 struct request_queue *q = bdev_get_queue(bdev);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 3829bee2302a..c7ea33e38fb9 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -296,11 +296,6 @@ struct dm_target {
296 * on max_io_len boundary. 296 * on max_io_len boundary.
297 */ 297 */
298 bool split_discard_bios:1; 298 bool split_discard_bios:1;
299
300 /*
301 * Set if this target does not return zeroes on discarded blocks.
302 */
303 bool discard_zeroes_data_unsupported:1;
304}; 299};
305 300
306/* Each target can link one of these into the table */ 301/* Each target can link one of these into the table */