diff options
author | Christoph Hellwig <hch@lst.de> | 2017-04-05 13:21:23 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-04-08 13:25:38 -0400 |
commit | 48920ff2a5a940cd07d12cc79e4a2c75f1185aee (patch) | |
tree | 0505a85fce20a0d34c5d4bbc71ba8771a037e209 /drivers | |
parent | 45c21793a6601b29926d67f7a07fe4077a45072e (diff) |
block: remove the discard_zeroes_data flag
Now that we use the proper REQ_OP_WRITE_ZEROES operation everywhere we can
kill this hack.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 2 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 7 | ||||
-rw-r--r-- | drivers/block/loop.c | 2 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 1 | ||||
-rw-r--r-- | drivers/block/nbd.c | 1 | ||||
-rw-r--r-- | drivers/md/dm-cache-target.c | 1 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 1 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 1 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 19 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 2 | ||||
-rw-r--r-- | drivers/md/raid5.c | 50 | ||||
-rw-r--r-- | drivers/scsi/sd.c | 5 | ||||
-rw-r--r-- | drivers/target/target_core_device.c | 2 |
14 files changed, 21 insertions, 79 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 8e62d9f65510..84455c365f57 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -931,7 +931,6 @@ void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct r | |||
931 | p->qlim->io_min = cpu_to_be32(queue_io_min(q)); | 931 | p->qlim->io_min = cpu_to_be32(queue_io_min(q)); |
932 | p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); | 932 | p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); |
933 | p->qlim->discard_enabled = blk_queue_discard(q); | 933 | p->qlim->discard_enabled = blk_queue_discard(q); |
934 | p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q); | ||
935 | p->qlim->write_same_capable = !!q->limits.max_write_same_sectors; | 934 | p->qlim->write_same_capable = !!q->limits.max_write_same_sectors; |
936 | } else { | 935 | } else { |
937 | q = device->rq_queue; | 936 | q = device->rq_queue; |
@@ -941,7 +940,6 @@ void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct r | |||
941 | p->qlim->io_min = cpu_to_be32(queue_io_min(q)); | 940 | p->qlim->io_min = cpu_to_be32(queue_io_min(q)); |
942 | p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); | 941 | p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); |
943 | p->qlim->discard_enabled = 0; | 942 | p->qlim->discard_enabled = 0; |
944 | p->qlim->discard_zeroes_data = 0; | ||
945 | p->qlim->write_same_capable = 0; | 943 | p->qlim->write_same_capable = 0; |
946 | } | 944 | } |
947 | } | 945 | } |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index e4516d3b971d..02255a0d68b9 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -1199,10 +1199,6 @@ static void decide_on_discard_support(struct drbd_device *device, | |||
1199 | struct drbd_connection *connection = first_peer_device(device)->connection; | 1199 | struct drbd_connection *connection = first_peer_device(device)->connection; |
1200 | bool can_do = b ? blk_queue_discard(b) : true; | 1200 | bool can_do = b ? blk_queue_discard(b) : true; |
1201 | 1201 | ||
1202 | if (can_do && b && !b->limits.discard_zeroes_data && !discard_zeroes_if_aligned) { | ||
1203 | can_do = false; | ||
1204 | drbd_info(device, "discard_zeroes_data=0 and discard_zeroes_if_aligned=no: disabling discards\n"); | ||
1205 | } | ||
1206 | if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) { | 1202 | if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) { |
1207 | can_do = false; | 1203 | can_do = false; |
1208 | drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n"); | 1204 | drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n"); |
@@ -1484,8 +1480,7 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis | |||
1484 | if (disk_conf->al_extents > drbd_al_extents_max(nbc)) | 1480 | if (disk_conf->al_extents > drbd_al_extents_max(nbc)) |
1485 | disk_conf->al_extents = drbd_al_extents_max(nbc); | 1481 | disk_conf->al_extents = drbd_al_extents_max(nbc); |
1486 | 1482 | ||
1487 | if (!blk_queue_discard(q) | 1483 | if (!blk_queue_discard(q)) { |
1488 | || (!q->limits.discard_zeroes_data && !disk_conf->discard_zeroes_if_aligned)) { | ||
1489 | if (disk_conf->rs_discard_granularity) { | 1484 | if (disk_conf->rs_discard_granularity) { |
1490 | disk_conf->rs_discard_granularity = 0; /* disable feature */ | 1485 | disk_conf->rs_discard_granularity = 0; /* disable feature */ |
1491 | drbd_info(device, "rs_discard_granularity feature disabled\n"); | 1486 | drbd_info(device, "rs_discard_granularity feature disabled\n"); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 3bb04c1a4ba1..3081d83d2ea3 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -828,7 +828,6 @@ static void loop_config_discard(struct loop_device *lo) | |||
828 | q->limits.discard_alignment = 0; | 828 | q->limits.discard_alignment = 0; |
829 | blk_queue_max_discard_sectors(q, 0); | 829 | blk_queue_max_discard_sectors(q, 0); |
830 | blk_queue_max_write_zeroes_sectors(q, 0); | 830 | blk_queue_max_write_zeroes_sectors(q, 0); |
831 | q->limits.discard_zeroes_data = 0; | ||
832 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); | 831 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); |
833 | return; | 832 | return; |
834 | } | 833 | } |
@@ -837,7 +836,6 @@ static void loop_config_discard(struct loop_device *lo) | |||
837 | q->limits.discard_alignment = 0; | 836 | q->limits.discard_alignment = 0; |
838 | blk_queue_max_discard_sectors(q, UINT_MAX >> 9); | 837 | blk_queue_max_discard_sectors(q, UINT_MAX >> 9); |
839 | blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); | 838 | blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); |
840 | q->limits.discard_zeroes_data = 1; | ||
841 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | 839 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); |
842 | } | 840 | } |
843 | 841 | ||
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 30076e7753bc..05e3e664ea1b 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -4025,7 +4025,6 @@ skip_create_disk: | |||
4025 | dd->queue->limits.discard_granularity = 4096; | 4025 | dd->queue->limits.discard_granularity = 4096; |
4026 | blk_queue_max_discard_sectors(dd->queue, | 4026 | blk_queue_max_discard_sectors(dd->queue, |
4027 | MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); | 4027 | MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); |
4028 | dd->queue->limits.discard_zeroes_data = 0; | ||
4029 | } | 4028 | } |
4030 | 4029 | ||
4031 | /* Set the capacity of the device in 512 byte sectors. */ | 4030 | /* Set the capacity of the device in 512 byte sectors. */ |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 03ae72985c79..b02f2362fdf7 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -1110,7 +1110,6 @@ static int nbd_dev_add(int index) | |||
1110 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); | 1110 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); |
1111 | disk->queue->limits.discard_granularity = 512; | 1111 | disk->queue->limits.discard_granularity = 512; |
1112 | blk_queue_max_discard_sectors(disk->queue, UINT_MAX); | 1112 | blk_queue_max_discard_sectors(disk->queue, UINT_MAX); |
1113 | disk->queue->limits.discard_zeroes_data = 0; | ||
1114 | blk_queue_max_hw_sectors(disk->queue, 65536); | 1113 | blk_queue_max_hw_sectors(disk->queue, 65536); |
1115 | disk->queue->limits.max_sectors = 256; | 1114 | disk->queue->limits.max_sectors = 256; |
1116 | 1115 | ||
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9c689b34e6e7..975922c8f231 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -2773,7 +2773,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
2773 | 2773 | ||
2774 | ti->num_discard_bios = 1; | 2774 | ti->num_discard_bios = 1; |
2775 | ti->discards_supported = true; | 2775 | ti->discards_supported = true; |
2776 | ti->discard_zeroes_data_unsupported = true; | ||
2777 | ti->split_discard_bios = false; | 2776 | ti->split_discard_bios = false; |
2778 | 2777 | ||
2779 | cache->features = ca->features; | 2778 | cache->features = ca->features; |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 389a3637ffcc..ef1d836bd81b 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -2030,7 +2030,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
2030 | wake_up_process(cc->write_thread); | 2030 | wake_up_process(cc->write_thread); |
2031 | 2031 | ||
2032 | ti->num_flush_bios = 1; | 2032 | ti->num_flush_bios = 1; |
2033 | ti->discard_zeroes_data_unsupported = true; | ||
2034 | 2033 | ||
2035 | return 0; | 2034 | return 0; |
2036 | 2035 | ||
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index f8564d63982f..468f1380de1d 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -2813,7 +2813,9 @@ static void configure_discard_support(struct raid_set *rs) | |||
2813 | /* Assume discards not supported until after checks below. */ | 2813 | /* Assume discards not supported until after checks below. */ |
2814 | ti->discards_supported = false; | 2814 | ti->discards_supported = false; |
2815 | 2815 | ||
2816 | /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */ | 2816 | /* |
2817 | * XXX: RAID level 4,5,6 require zeroing for safety. | ||
2818 | */ | ||
2817 | raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); | 2819 | raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); |
2818 | 2820 | ||
2819 | for (i = 0; i < rs->raid_disks; i++) { | 2821 | for (i = 0; i < rs->raid_disks; i++) { |
@@ -2827,8 +2829,6 @@ static void configure_discard_support(struct raid_set *rs) | |||
2827 | return; | 2829 | return; |
2828 | 2830 | ||
2829 | if (raid456) { | 2831 | if (raid456) { |
2830 | if (!q->limits.discard_zeroes_data) | ||
2831 | return; | ||
2832 | if (!devices_handle_discard_safely) { | 2832 | if (!devices_handle_discard_safely) { |
2833 | DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty."); | 2833 | DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty."); |
2834 | DMERR("Set dm-raid.devices_handle_discard_safely=Y to override."); | 2834 | DMERR("Set dm-raid.devices_handle_discard_safely=Y to override."); |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 2ddc2d20e62d..a95cbb80fb34 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -1124,7 +1124,6 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1124 | ti->num_flush_bios = 1; | 1124 | ti->num_flush_bios = 1; |
1125 | ti->num_discard_bios = 1; | 1125 | ti->num_discard_bios = 1; |
1126 | ti->per_io_data_size = sizeof(struct dm_raid1_bio_record); | 1126 | ti->per_io_data_size = sizeof(struct dm_raid1_bio_record); |
1127 | ti->discard_zeroes_data_unsupported = true; | ||
1128 | 1127 | ||
1129 | ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); | 1128 | ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); |
1130 | if (!ms->kmirrord_wq) { | 1129 | if (!ms->kmirrord_wq) { |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5cd665c91ead..958275aca008 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -1449,22 +1449,6 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) | |||
1449 | return false; | 1449 | return false; |
1450 | } | 1450 | } |
1451 | 1451 | ||
1452 | static bool dm_table_discard_zeroes_data(struct dm_table *t) | ||
1453 | { | ||
1454 | struct dm_target *ti; | ||
1455 | unsigned i = 0; | ||
1456 | |||
1457 | /* Ensure that all targets supports discard_zeroes_data. */ | ||
1458 | while (i < dm_table_get_num_targets(t)) { | ||
1459 | ti = dm_table_get_target(t, i++); | ||
1460 | |||
1461 | if (ti->discard_zeroes_data_unsupported) | ||
1462 | return false; | ||
1463 | } | ||
1464 | |||
1465 | return true; | ||
1466 | } | ||
1467 | |||
1468 | static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, | 1452 | static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, |
1469 | sector_t start, sector_t len, void *data) | 1453 | sector_t start, sector_t len, void *data) |
1470 | { | 1454 | { |
@@ -1620,9 +1604,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1620 | } | 1604 | } |
1621 | blk_queue_write_cache(q, wc, fua); | 1605 | blk_queue_write_cache(q, wc, fua); |
1622 | 1606 | ||
1623 | if (!dm_table_discard_zeroes_data(t)) | ||
1624 | q->limits.discard_zeroes_data = 0; | ||
1625 | |||
1626 | /* Ensure that all underlying devices are non-rotational. */ | 1607 | /* Ensure that all underlying devices are non-rotational. */ |
1627 | if (dm_table_all_devices_attribute(t, device_is_nonrot)) | 1608 | if (dm_table_all_devices_attribute(t, device_is_nonrot)) |
1628 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | 1609 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2b266a2b5035..a5f1916f621a 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -3263,7 +3263,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
3263 | * them down to the data device. The thin device's discard | 3263 | * them down to the data device. The thin device's discard |
3264 | * processing will cause mappings to be removed from the btree. | 3264 | * processing will cause mappings to be removed from the btree. |
3265 | */ | 3265 | */ |
3266 | ti->discard_zeroes_data_unsupported = true; | ||
3267 | if (pf.discard_enabled && pf.discard_passdown) { | 3266 | if (pf.discard_enabled && pf.discard_passdown) { |
3268 | ti->num_discard_bios = 1; | 3267 | ti->num_discard_bios = 1; |
3269 | 3268 | ||
@@ -4119,7 +4118,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
4119 | ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); | 4118 | ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); |
4120 | 4119 | ||
4121 | /* In case the pool supports discards, pass them on. */ | 4120 | /* In case the pool supports discards, pass them on. */ |
4122 | ti->discard_zeroes_data_unsupported = true; | ||
4123 | if (tc->pool->pf.discard_enabled) { | 4121 | if (tc->pool->pf.discard_enabled) { |
4124 | ti->discards_supported = true; | 4122 | ti->discards_supported = true; |
4125 | ti->num_discard_bios = 1; | 4123 | ti->num_discard_bios = 1; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 1725a54042bb..2efdb0d67460 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -7227,7 +7227,6 @@ static int raid5_run(struct mddev *mddev) | |||
7227 | 7227 | ||
7228 | if (mddev->queue) { | 7228 | if (mddev->queue) { |
7229 | int chunk_size; | 7229 | int chunk_size; |
7230 | bool discard_supported = true; | ||
7231 | /* read-ahead size must cover two whole stripes, which | 7230 | /* read-ahead size must cover two whole stripes, which |
7232 | * is 2 * (datadisks) * chunksize where 'n' is the | 7231 | * is 2 * (datadisks) * chunksize where 'n' is the |
7233 | * number of raid devices | 7232 | * number of raid devices |
@@ -7263,12 +7262,6 @@ static int raid5_run(struct mddev *mddev) | |||
7263 | blk_queue_max_discard_sectors(mddev->queue, | 7262 | blk_queue_max_discard_sectors(mddev->queue, |
7264 | 0xfffe * STRIPE_SECTORS); | 7263 | 0xfffe * STRIPE_SECTORS); |
7265 | 7264 | ||
7266 | /* | ||
7267 | * unaligned part of discard request will be ignored, so can't | ||
7268 | * guarantee discard_zeroes_data | ||
7269 | */ | ||
7270 | mddev->queue->limits.discard_zeroes_data = 0; | ||
7271 | |||
7272 | blk_queue_max_write_same_sectors(mddev->queue, 0); | 7265 | blk_queue_max_write_same_sectors(mddev->queue, 0); |
7273 | blk_queue_max_write_zeroes_sectors(mddev->queue, 0); | 7266 | blk_queue_max_write_zeroes_sectors(mddev->queue, 0); |
7274 | 7267 | ||
@@ -7277,35 +7270,24 @@ static int raid5_run(struct mddev *mddev) | |||
7277 | rdev->data_offset << 9); | 7270 | rdev->data_offset << 9); |
7278 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 7271 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
7279 | rdev->new_data_offset << 9); | 7272 | rdev->new_data_offset << 9); |
7280 | /* | ||
7281 | * discard_zeroes_data is required, otherwise data | ||
7282 | * could be lost. Consider a scenario: discard a stripe | ||
7283 | * (the stripe could be inconsistent if | ||
7284 | * discard_zeroes_data is 0); write one disk of the | ||
7285 | * stripe (the stripe could be inconsistent again | ||
7286 | * depending on which disks are used to calculate | ||
7287 | * parity); the disk is broken; The stripe data of this | ||
7288 | * disk is lost. | ||
7289 | */ | ||
7290 | if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) || | ||
7291 | !bdev_get_queue(rdev->bdev)-> | ||
7292 | limits.discard_zeroes_data) | ||
7293 | discard_supported = false; | ||
7294 | /* Unfortunately, discard_zeroes_data is not currently | ||
7295 | * a guarantee - just a hint. So we only allow DISCARD | ||
7296 | * if the sysadmin has confirmed that only safe devices | ||
7297 | * are in use by setting a module parameter. | ||
7298 | */ | ||
7299 | if (!devices_handle_discard_safely) { | ||
7300 | if (discard_supported) { | ||
7301 | pr_info("md/raid456: discard support disabled due to uncertainty.\n"); | ||
7302 | pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); | ||
7303 | } | ||
7304 | discard_supported = false; | ||
7305 | } | ||
7306 | } | 7273 | } |
7307 | 7274 | ||
7308 | if (discard_supported && | 7275 | /* |
7276 | * zeroing is required, otherwise data | ||
7277 | * could be lost. Consider a scenario: discard a stripe | ||
7278 | * (the stripe could be inconsistent if | ||
7279 | * discard_zeroes_data is 0); write one disk of the | ||
7280 | * stripe (the stripe could be inconsistent again | ||
7281 | * depending on which disks are used to calculate | ||
7282 | * parity); the disk is broken; The stripe data of this | ||
7283 | * disk is lost. | ||
7284 | * | ||
7285 | * We only allow DISCARD if the sysadmin has confirmed that | ||
7286 | * only safe devices are in use by setting a module parameter. | ||
7287 | * A better idea might be to turn DISCARD into WRITE_ZEROES | ||
7288 | * requests, as that is required to be safe. | ||
7289 | */ | ||
7290 | if (devices_handle_discard_safely && | ||
7309 | mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && | 7291 | mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && |
7310 | mddev->queue->limits.discard_granularity >= stripe) | 7292 | mddev->queue->limits.discard_granularity >= stripe) |
7311 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, | 7293 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 001593ed0444..bcb0cb020fd2 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -644,8 +644,6 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) | |||
644 | unsigned int logical_block_size = sdkp->device->sector_size; | 644 | unsigned int logical_block_size = sdkp->device->sector_size; |
645 | unsigned int max_blocks = 0; | 645 | unsigned int max_blocks = 0; |
646 | 646 | ||
647 | q->limits.discard_zeroes_data = 0; | ||
648 | |||
649 | /* | 647 | /* |
650 | * When LBPRZ is reported, discard alignment and granularity | 648 | * When LBPRZ is reported, discard alignment and granularity |
651 | * must be fixed to the logical block size. Otherwise the block | 649 | * must be fixed to the logical block size. Otherwise the block |
@@ -681,19 +679,16 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) | |||
681 | case SD_LBP_WS16: | 679 | case SD_LBP_WS16: |
682 | max_blocks = min_not_zero(sdkp->max_ws_blocks, | 680 | max_blocks = min_not_zero(sdkp->max_ws_blocks, |
683 | (u32)SD_MAX_WS16_BLOCKS); | 681 | (u32)SD_MAX_WS16_BLOCKS); |
684 | q->limits.discard_zeroes_data = sdkp->lbprz; | ||
685 | break; | 682 | break; |
686 | 683 | ||
687 | case SD_LBP_WS10: | 684 | case SD_LBP_WS10: |
688 | max_blocks = min_not_zero(sdkp->max_ws_blocks, | 685 | max_blocks = min_not_zero(sdkp->max_ws_blocks, |
689 | (u32)SD_MAX_WS10_BLOCKS); | 686 | (u32)SD_MAX_WS10_BLOCKS); |
690 | q->limits.discard_zeroes_data = sdkp->lbprz; | ||
691 | break; | 687 | break; |
692 | 688 | ||
693 | case SD_LBP_ZERO: | 689 | case SD_LBP_ZERO: |
694 | max_blocks = min_not_zero(sdkp->max_ws_blocks, | 690 | max_blocks = min_not_zero(sdkp->max_ws_blocks, |
695 | (u32)SD_MAX_WS10_BLOCKS); | 691 | (u32)SD_MAX_WS10_BLOCKS); |
696 | q->limits.discard_zeroes_data = 1; | ||
697 | break; | 692 | break; |
698 | } | 693 | } |
699 | 694 | ||
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index c754ae33bf7b..d2f089cfa9ae 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -851,7 +851,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, | |||
851 | attrib->unmap_granularity = q->limits.discard_granularity / block_size; | 851 | attrib->unmap_granularity = q->limits.discard_granularity / block_size; |
852 | attrib->unmap_granularity_alignment = q->limits.discard_alignment / | 852 | attrib->unmap_granularity_alignment = q->limits.discard_alignment / |
853 | block_size; | 853 | block_size; |
854 | attrib->unmap_zeroes_data = q->limits.discard_zeroes_data; | 854 | attrib->unmap_zeroes_data = 0; |
855 | return true; | 855 | return true; |
856 | } | 856 | } |
857 | EXPORT_SYMBOL(target_configure_unmap_from_queue); | 857 | EXPORT_SYMBOL(target_configure_unmap_from_queue); |