diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2009-11-26 12:00:40 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2009-12-10 09:54:15 -0500 |
commit | e339c1a7c09ef736dca7b3a4353c7742557d9f8f (patch) | |
tree | 60f7bc7c1867f180262dab01cc8c047967d728cd /drivers/scsi/sd.c | |
parent | cc9b2e9f6603190c009e5d2629ce8e3f99571346 (diff) |
[SCSI] sd: WRITE SAME(16) / UNMAP support
Implement a function for handling discard requests that sends either
WRITE SAME(16) or UNMAP(10) depending on parameters indicated by the
device in the block limits VPD.
Extract unmap constraints and report them to the block layer.
Based in part by a patch by Christoph Hellwig <hch@lst.de>.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/sd.c')
-rw-r--r-- | drivers/scsi/sd.c | 107 |
1 files changed, 107 insertions, 0 deletions
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 9093c7261f33..255da53e5a01 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -264,6 +264,15 @@ sd_show_app_tag_own(struct device *dev, struct device_attribute *attr, | |||
264 | return snprintf(buf, 20, "%u\n", sdkp->ATO); | 264 | return snprintf(buf, 20, "%u\n", sdkp->ATO); |
265 | } | 265 | } |
266 | 266 | ||
267 | static ssize_t | ||
268 | sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr, | ||
269 | char *buf) | ||
270 | { | ||
271 | struct scsi_disk *sdkp = to_scsi_disk(dev); | ||
272 | |||
273 | return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning); | ||
274 | } | ||
275 | |||
267 | static struct device_attribute sd_disk_attrs[] = { | 276 | static struct device_attribute sd_disk_attrs[] = { |
268 | __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, | 277 | __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, |
269 | sd_store_cache_type), | 278 | sd_store_cache_type), |
@@ -274,6 +283,7 @@ static struct device_attribute sd_disk_attrs[] = { | |||
274 | sd_store_manage_start_stop), | 283 | sd_store_manage_start_stop), |
275 | __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL), | 284 | __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL), |
276 | __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), | 285 | __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), |
286 | __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), | ||
277 | __ATTR_NULL, | 287 | __ATTR_NULL, |
278 | }; | 288 | }; |
279 | 289 | ||
@@ -399,6 +409,57 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif) | |||
399 | } | 409 | } |
400 | 410 | ||
401 | /** | 411 | /** |
412 | * sd_prepare_discard - unmap blocks on thinly provisioned device | ||
413 | * @rq: Request to prepare | ||
414 | * | ||
415 | * Will issue either UNMAP or WRITE SAME(16) depending on preference | ||
416 | * indicated by target device. | ||
417 | **/ | ||
418 | static int sd_prepare_discard(struct request *rq) | ||
419 | { | ||
420 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); | ||
421 | struct bio *bio = rq->bio; | ||
422 | sector_t sector = bio->bi_sector; | ||
423 | unsigned int num = bio_sectors(bio); | ||
424 | |||
425 | if (sdkp->device->sector_size == 4096) { | ||
426 | sector >>= 3; | ||
427 | num >>= 3; | ||
428 | } | ||
429 | |||
430 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
431 | rq->timeout = SD_TIMEOUT; | ||
432 | |||
433 | memset(rq->cmd, 0, rq->cmd_len); | ||
434 | |||
435 | if (sdkp->unmap) { | ||
436 | char *buf = kmap_atomic(bio_page(bio), KM_USER0); | ||
437 | |||
438 | rq->cmd[0] = UNMAP; | ||
439 | rq->cmd[8] = 24; | ||
440 | rq->cmd_len = 10; | ||
441 | |||
442 | /* Ensure that data length matches payload */ | ||
443 | rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24; | ||
444 | |||
445 | put_unaligned_be16(6 + 16, &buf[0]); | ||
446 | put_unaligned_be16(16, &buf[2]); | ||
447 | put_unaligned_be64(sector, &buf[8]); | ||
448 | put_unaligned_be32(num, &buf[16]); | ||
449 | |||
450 | kunmap_atomic(buf, KM_USER0); | ||
451 | } else { | ||
452 | rq->cmd[0] = WRITE_SAME_16; | ||
453 | rq->cmd[1] = 0x8; /* UNMAP */ | ||
454 | put_unaligned_be64(sector, &rq->cmd[2]); | ||
455 | put_unaligned_be32(num, &rq->cmd[10]); | ||
456 | rq->cmd_len = 16; | ||
457 | } | ||
458 | |||
459 | return BLKPREP_OK; | ||
460 | } | ||
461 | |||
462 | /** | ||
402 | * sd_init_command - build a scsi (read or write) command from | 463 | * sd_init_command - build a scsi (read or write) command from |
403 | * information in the request structure. | 464 | * information in the request structure. |
404 | * @SCpnt: pointer to mid-level's per scsi command structure that | 465 | * @SCpnt: pointer to mid-level's per scsi command structure that |
@@ -418,6 +479,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
418 | int ret, host_dif; | 479 | int ret, host_dif; |
419 | unsigned char protect; | 480 | unsigned char protect; |
420 | 481 | ||
482 | /* | ||
483 | * Discard request come in as REQ_TYPE_FS but we turn them into | ||
484 | * block PC requests to make life easier. | ||
485 | */ | ||
486 | if (blk_discard_rq(rq)) | ||
487 | ret = sd_prepare_discard(rq); | ||
488 | |||
421 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 489 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
422 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); | 490 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); |
423 | goto out; | 491 | goto out; |
@@ -1432,6 +1500,19 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, | |||
1432 | sd_printk(KERN_NOTICE, sdkp, | 1500 | sd_printk(KERN_NOTICE, sdkp, |
1433 | "physical block alignment offset: %u\n", alignment); | 1501 | "physical block alignment offset: %u\n", alignment); |
1434 | 1502 | ||
1503 | if (buffer[14] & 0x80) { /* TPE */ | ||
1504 | struct request_queue *q = sdp->request_queue; | ||
1505 | |||
1506 | sdkp->thin_provisioning = 1; | ||
1507 | q->limits.discard_granularity = sdkp->hw_sector_size; | ||
1508 | q->limits.max_discard_sectors = 0xffffffff; | ||
1509 | |||
1510 | if (buffer[14] & 0x40) /* TPRZ */ | ||
1511 | q->limits.discard_zeroes_data = 1; | ||
1512 | |||
1513 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | ||
1514 | } | ||
1515 | |||
1435 | sdkp->capacity = lba + 1; | 1516 | sdkp->capacity = lba + 1; |
1436 | return sector_size; | 1517 | return sector_size; |
1437 | } | 1518 | } |
@@ -1863,6 +1944,7 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) | |||
1863 | */ | 1944 | */ |
1864 | static void sd_read_block_limits(struct scsi_disk *sdkp) | 1945 | static void sd_read_block_limits(struct scsi_disk *sdkp) |
1865 | { | 1946 | { |
1947 | struct request_queue *q = sdkp->disk->queue; | ||
1866 | unsigned int sector_sz = sdkp->device->sector_size; | 1948 | unsigned int sector_sz = sdkp->device->sector_size; |
1867 | char *buffer; | 1949 | char *buffer; |
1868 | 1950 | ||
@@ -1877,6 +1959,31 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
1877 | blk_queue_io_opt(sdkp->disk->queue, | 1959 | blk_queue_io_opt(sdkp->disk->queue, |
1878 | get_unaligned_be32(&buffer[12]) * sector_sz); | 1960 | get_unaligned_be32(&buffer[12]) * sector_sz); |
1879 | 1961 | ||
1962 | /* Thin provisioning enabled and page length indicates TP support */ | ||
1963 | if (sdkp->thin_provisioning && buffer[3] == 0x3c) { | ||
1964 | unsigned int lba_count, desc_count, granularity; | ||
1965 | |||
1966 | lba_count = get_unaligned_be32(&buffer[20]); | ||
1967 | desc_count = get_unaligned_be32(&buffer[24]); | ||
1968 | |||
1969 | if (lba_count) { | ||
1970 | q->limits.max_discard_sectors = | ||
1971 | lba_count * sector_sz >> 9; | ||
1972 | |||
1973 | if (desc_count) | ||
1974 | sdkp->unmap = 1; | ||
1975 | } | ||
1976 | |||
1977 | granularity = get_unaligned_be32(&buffer[28]); | ||
1978 | |||
1979 | if (granularity) | ||
1980 | q->limits.discard_granularity = granularity * sector_sz; | ||
1981 | |||
1982 | if (buffer[32] & 0x80) | ||
1983 | q->limits.discard_alignment = | ||
1984 | get_unaligned_be32(&buffer[32]) & ~(1 << 31); | ||
1985 | } | ||
1986 | |||
1880 | kfree(buffer); | 1987 | kfree(buffer); |
1881 | } | 1988 | } |
1882 | 1989 | ||