diff options
Diffstat (limited to 'drivers/scsi/sd.c')
-rw-r--r-- | drivers/scsi/sd.c | 69 |
1 files changed, 48 insertions, 21 deletions
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 54519804c46a..3d22fc3e3c1a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -638,11 +638,24 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) | |||
638 | unsigned int max_blocks = 0; | 638 | unsigned int max_blocks = 0; |
639 | 639 | ||
640 | q->limits.discard_zeroes_data = 0; | 640 | q->limits.discard_zeroes_data = 0; |
641 | q->limits.discard_alignment = sdkp->unmap_alignment * | 641 | |
642 | logical_block_size; | 642 | /* |
643 | q->limits.discard_granularity = | 643 | * When LBPRZ is reported, discard alignment and granularity |
644 | max(sdkp->physical_block_size, | 644 | * must be fixed to the logical block size. Otherwise the block |
645 | sdkp->unmap_granularity * logical_block_size); | 645 | * layer will drop misaligned portions of the request which can |
646 | * lead to data corruption. If LBPRZ is not set, we honor the | ||
647 | * device preference. | ||
648 | */ | ||
649 | if (sdkp->lbprz) { | ||
650 | q->limits.discard_alignment = 0; | ||
651 | q->limits.discard_granularity = 1; | ||
652 | } else { | ||
653 | q->limits.discard_alignment = sdkp->unmap_alignment * | ||
654 | logical_block_size; | ||
655 | q->limits.discard_granularity = | ||
656 | max(sdkp->physical_block_size, | ||
657 | sdkp->unmap_granularity * logical_block_size); | ||
658 | } | ||
646 | 659 | ||
647 | sdkp->provisioning_mode = mode; | 660 | sdkp->provisioning_mode = mode; |
648 | 661 | ||
@@ -2321,11 +2334,8 @@ got_data: | |||
2321 | } | 2334 | } |
2322 | } | 2335 | } |
2323 | 2336 | ||
2324 | if (sdkp->capacity > 0xffffffff) { | 2337 | if (sdkp->capacity > 0xffffffff) |
2325 | sdp->use_16_for_rw = 1; | 2338 | sdp->use_16_for_rw = 1; |
2326 | sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS; | ||
2327 | } else | ||
2328 | sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS; | ||
2329 | 2339 | ||
2330 | /* Rescale capacity to 512-byte units */ | 2340 | /* Rescale capacity to 512-byte units */ |
2331 | if (sector_size == 4096) | 2341 | if (sector_size == 4096) |
@@ -2642,7 +2652,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
2642 | { | 2652 | { |
2643 | unsigned int sector_sz = sdkp->device->sector_size; | 2653 | unsigned int sector_sz = sdkp->device->sector_size; |
2644 | const int vpd_len = 64; | 2654 | const int vpd_len = 64; |
2645 | u32 max_xfer_length; | ||
2646 | unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); | 2655 | unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); |
2647 | 2656 | ||
2648 | if (!buffer || | 2657 | if (!buffer || |
@@ -2650,14 +2659,11 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
2650 | scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) | 2659 | scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) |
2651 | goto out; | 2660 | goto out; |
2652 | 2661 | ||
2653 | max_xfer_length = get_unaligned_be32(&buffer[8]); | ||
2654 | if (max_xfer_length) | ||
2655 | sdkp->max_xfer_blocks = max_xfer_length; | ||
2656 | |||
2657 | blk_queue_io_min(sdkp->disk->queue, | 2662 | blk_queue_io_min(sdkp->disk->queue, |
2658 | get_unaligned_be16(&buffer[6]) * sector_sz); | 2663 | get_unaligned_be16(&buffer[6]) * sector_sz); |
2659 | blk_queue_io_opt(sdkp->disk->queue, | 2664 | |
2660 | get_unaligned_be32(&buffer[12]) * sector_sz); | 2665 | sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]); |
2666 | sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]); | ||
2661 | 2667 | ||
2662 | if (buffer[3] == 0x3c) { | 2668 | if (buffer[3] == 0x3c) { |
2663 | unsigned int lba_count, desc_count; | 2669 | unsigned int lba_count, desc_count; |
@@ -2806,6 +2812,11 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp) | |||
2806 | return 0; | 2812 | return 0; |
2807 | } | 2813 | } |
2808 | 2814 | ||
2815 | static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks) | ||
2816 | { | ||
2817 | return blocks << (ilog2(sdev->sector_size) - 9); | ||
2818 | } | ||
2819 | |||
2809 | /** | 2820 | /** |
2810 | * sd_revalidate_disk - called the first time a new disk is seen, | 2821 | * sd_revalidate_disk - called the first time a new disk is seen, |
2811 | * performs disk spin up, read_capacity, etc. | 2822 | * performs disk spin up, read_capacity, etc. |
@@ -2815,8 +2826,9 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
2815 | { | 2826 | { |
2816 | struct scsi_disk *sdkp = scsi_disk(disk); | 2827 | struct scsi_disk *sdkp = scsi_disk(disk); |
2817 | struct scsi_device *sdp = sdkp->device; | 2828 | struct scsi_device *sdp = sdkp->device; |
2829 | struct request_queue *q = sdkp->disk->queue; | ||
2818 | unsigned char *buffer; | 2830 | unsigned char *buffer; |
2819 | unsigned int max_xfer; | 2831 | unsigned int dev_max, rw_max; |
2820 | 2832 | ||
2821 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, | 2833 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, |
2822 | "sd_revalidate_disk\n")); | 2834 | "sd_revalidate_disk\n")); |
@@ -2864,11 +2876,26 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
2864 | */ | 2876 | */ |
2865 | sd_set_flush_flag(sdkp); | 2877 | sd_set_flush_flag(sdkp); |
2866 | 2878 | ||
2867 | max_xfer = sdkp->max_xfer_blocks; | 2879 | /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ |
2868 | max_xfer <<= ilog2(sdp->sector_size) - 9; | 2880 | dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; |
2881 | |||
2882 | /* Some devices report a maximum block count for READ/WRITE requests. */ | ||
2883 | dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); | ||
2884 | q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); | ||
2885 | |||
2886 | /* | ||
2887 | * Use the device's preferred I/O size for reads and writes | ||
2888 | * unless the reported value is unreasonably large (or garbage). | ||
2889 | */ | ||
2890 | if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max && | ||
2891 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS) | ||
2892 | rw_max = q->limits.io_opt = | ||
2893 | logical_to_sectors(sdp, sdkp->opt_xfer_blocks); | ||
2894 | else | ||
2895 | rw_max = BLK_DEF_MAX_SECTORS; | ||
2869 | 2896 | ||
2870 | sdkp->disk->queue->limits.max_sectors = | 2897 | /* Combine with controller limits */ |
2871 | min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer); | 2898 | q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); |
2872 | 2899 | ||
2873 | set_capacity(disk, sdkp->capacity); | 2900 | set_capacity(disk, sdkp->capacity); |
2874 | sd_config_write_same(sdkp); | 2901 | sd_config_write_same(sdkp); |