aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/sd.c121
-rw-r--r--drivers/scsi/sd.h25
-rw-r--r--drivers/scsi/sd_dif.c538
5 files changed, 647 insertions, 40 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 26be540d1dd3..c7f06298bd3c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -63,6 +63,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
63config BLK_DEV_SD 63config BLK_DEV_SD
64 tristate "SCSI disk support" 64 tristate "SCSI disk support"
65 depends on SCSI 65 depends on SCSI
66 select CRC_T10DIF
66 ---help--- 67 ---help---
67 If you want to use SCSI hard disks, Fibre Channel disks, 68 If you want to use SCSI hard disks, Fibre Channel disks,
68 Serial ATA (SATA) or Parallel ATA (PATA) hard disks, 69 Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a8149677de23..72fd5043cfa1 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -151,6 +151,8 @@ scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
151scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o 151scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
152 152
153sd_mod-objs := sd.o 153sd_mod-objs := sd.o
154sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
155
154sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o 156sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
155ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ 157ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
156 := -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \ 158 := -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 56b9501d12f3..8e08d51a0f05 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -373,6 +373,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
373 struct scsi_cmnd *SCpnt; 373 struct scsi_cmnd *SCpnt;
374 struct scsi_device *sdp = q->queuedata; 374 struct scsi_device *sdp = q->queuedata;
375 struct gendisk *disk = rq->rq_disk; 375 struct gendisk *disk = rq->rq_disk;
376 struct scsi_disk *sdkp;
376 sector_t block = rq->sector; 377 sector_t block = rq->sector;
377 unsigned int this_count = rq->nr_sectors; 378 unsigned int this_count = rq->nr_sectors;
378 unsigned int timeout = sdp->timeout; 379 unsigned int timeout = sdp->timeout;
@@ -389,6 +390,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
389 if (ret != BLKPREP_OK) 390 if (ret != BLKPREP_OK)
390 goto out; 391 goto out;
391 SCpnt = rq->special; 392 SCpnt = rq->special;
393 sdkp = scsi_disk(disk);
392 394
393 /* from here on until we're complete, any goto out 395 /* from here on until we're complete, any goto out
394 * is used for a killable error condition */ 396 * is used for a killable error condition */
@@ -478,6 +480,11 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
478 } 480 }
479 SCpnt->cmnd[0] = WRITE_6; 481 SCpnt->cmnd[0] = WRITE_6;
480 SCpnt->sc_data_direction = DMA_TO_DEVICE; 482 SCpnt->sc_data_direction = DMA_TO_DEVICE;
483
484 if (blk_integrity_rq(rq) &&
485 sd_dif_prepare(rq, block, sdp->sector_size) == -EIO)
486 goto out;
487
481 } else if (rq_data_dir(rq) == READ) { 488 } else if (rq_data_dir(rq) == READ) {
482 SCpnt->cmnd[0] = READ_6; 489 SCpnt->cmnd[0] = READ_6;
483 SCpnt->sc_data_direction = DMA_FROM_DEVICE; 490 SCpnt->sc_data_direction = DMA_FROM_DEVICE;
@@ -492,8 +499,12 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
492 "writing" : "reading", this_count, 499 "writing" : "reading", this_count,
493 rq->nr_sectors)); 500 rq->nr_sectors));
494 501
495 SCpnt->cmnd[1] = 0; 502 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
496 503 if (scsi_host_dif_capable(sdp->host, sdkp->protection_type))
504 SCpnt->cmnd[1] = 1 << 5;
505 else
506 SCpnt->cmnd[1] = 0;
507
497 if (block > 0xffffffff) { 508 if (block > 0xffffffff) {
498 SCpnt->cmnd[0] += READ_16 - READ_6; 509 SCpnt->cmnd[0] += READ_16 - READ_6;
499 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0; 510 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
@@ -511,6 +522,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
511 SCpnt->cmnd[13] = (unsigned char) this_count & 0xff; 522 SCpnt->cmnd[13] = (unsigned char) this_count & 0xff;
512 SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0; 523 SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0;
513 } else if ((this_count > 0xff) || (block > 0x1fffff) || 524 } else if ((this_count > 0xff) || (block > 0x1fffff) ||
525 scsi_device_protection(SCpnt->device) ||
514 SCpnt->device->use_10_for_rw) { 526 SCpnt->device->use_10_for_rw) {
515 if (this_count > 0xffff) 527 if (this_count > 0xffff)
516 this_count = 0xffff; 528 this_count = 0xffff;
@@ -545,6 +557,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
545 } 557 }
546 SCpnt->sdb.length = this_count * sdp->sector_size; 558 SCpnt->sdb.length = this_count * sdp->sector_size;
547 559
560 /* If DIF or DIX is enabled, tell HBA how to handle request */
561 if (sdkp->protection_type || scsi_prot_sg_count(SCpnt))
562 sd_dif_op(SCpnt, sdkp->protection_type, scsi_prot_sg_count(SCpnt));
563
548 /* 564 /*
549 * We shouldn't disconnect in the middle of a sector, so with a dumb 565 * We shouldn't disconnect in the middle of a sector, so with a dumb
550 * host adapter, it's safe to assume that we can at least transfer 566 * host adapter, it's safe to assume that we can at least transfer
@@ -939,6 +955,48 @@ static struct block_device_operations sd_fops = {
939 .revalidate_disk = sd_revalidate_disk, 955 .revalidate_disk = sd_revalidate_disk,
940}; 956};
941 957
958static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
959{
960 u64 start_lba = scmd->request->sector;
961 u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512);
962 u64 bad_lba;
963 int info_valid;
964
965 if (!blk_fs_request(scmd->request))
966 return 0;
967
968 info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
969 SCSI_SENSE_BUFFERSIZE,
970 &bad_lba);
971 if (!info_valid)
972 return 0;
973
974 if (scsi_bufflen(scmd) <= scmd->device->sector_size)
975 return 0;
976
977 if (scmd->device->sector_size < 512) {
978 /* only legitimate sector_size here is 256 */
979 start_lba <<= 1;
980 end_lba <<= 1;
981 } else {
982 /* be careful ... don't want any overflows */
983 u64 factor = scmd->device->sector_size / 512;
984 do_div(start_lba, factor);
985 do_div(end_lba, factor);
986 }
987
988 /* The bad lba was reported incorrectly, we have no idea where
989 * the error is.
990 */
991 if (bad_lba < start_lba || bad_lba >= end_lba)
992 return 0;
993
994 /* This computation should always be done in terms of
995 * the resolution of the device's medium.
996 */
997 return (bad_lba - start_lba) * scmd->device->sector_size;
998}
999
942/** 1000/**
943 * sd_done - bottom half handler: called when the lower level 1001 * sd_done - bottom half handler: called when the lower level
944 * driver has completed (successfully or otherwise) a scsi command. 1002 * driver has completed (successfully or otherwise) a scsi command.
@@ -949,15 +1007,10 @@ static struct block_device_operations sd_fops = {
949static int sd_done(struct scsi_cmnd *SCpnt) 1007static int sd_done(struct scsi_cmnd *SCpnt)
950{ 1008{
951 int result = SCpnt->result; 1009 int result = SCpnt->result;
952 unsigned int xfer_size = scsi_bufflen(SCpnt); 1010 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
953 unsigned int good_bytes = result ? 0 : xfer_size;
954 u64 start_lba = SCpnt->request->sector;
955 u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
956 u64 bad_lba;
957 struct scsi_sense_hdr sshdr; 1011 struct scsi_sense_hdr sshdr;
958 int sense_valid = 0; 1012 int sense_valid = 0;
959 int sense_deferred = 0; 1013 int sense_deferred = 0;
960 int info_valid;
961 1014
962 if (result) { 1015 if (result) {
963 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); 1016 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
@@ -982,36 +1035,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
982 switch (sshdr.sense_key) { 1035 switch (sshdr.sense_key) {
983 case HARDWARE_ERROR: 1036 case HARDWARE_ERROR:
984 case MEDIUM_ERROR: 1037 case MEDIUM_ERROR:
985 if (!blk_fs_request(SCpnt->request)) 1038 good_bytes = sd_completed_bytes(SCpnt);
986 goto out;
987 info_valid = scsi_get_sense_info_fld(SCpnt->sense_buffer,
988 SCSI_SENSE_BUFFERSIZE,
989 &bad_lba);
990 if (!info_valid)
991 goto out;
992 if (xfer_size <= SCpnt->device->sector_size)
993 goto out;
994 if (SCpnt->device->sector_size < 512) {
995 /* only legitimate sector_size here is 256 */
996 start_lba <<= 1;
997 end_lba <<= 1;
998 } else {
999 /* be careful ... don't want any overflows */
1000 u64 factor = SCpnt->device->sector_size / 512;
1001 do_div(start_lba, factor);
1002 do_div(end_lba, factor);
1003 }
1004
1005 if (bad_lba < start_lba || bad_lba >= end_lba)
1006 /* the bad lba was reported incorrectly, we have
1007 * no idea where the error is
1008 */
1009 goto out;
1010
1011 /* This computation should always be done in terms of
1012 * the resolution of the device's medium.
1013 */
1014 good_bytes = (bad_lba - start_lba)*SCpnt->device->sector_size;
1015 break; 1039 break;
1016 case RECOVERED_ERROR: 1040 case RECOVERED_ERROR:
1017 case NO_SENSE: 1041 case NO_SENSE:
@@ -1021,10 +1045,23 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1021 scsi_print_sense("sd", SCpnt); 1045 scsi_print_sense("sd", SCpnt);
1022 SCpnt->result = 0; 1046 SCpnt->result = 0;
1023 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1047 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1024 good_bytes = xfer_size; 1048 good_bytes = scsi_bufflen(SCpnt);
1049 break;
1050 case ABORTED_COMMAND:
1051 if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */
1052 scsi_print_result(SCpnt);
1053 scsi_print_sense("sd", SCpnt);
1054 good_bytes = sd_completed_bytes(SCpnt);
1055 }
1025 break; 1056 break;
1026 case ILLEGAL_REQUEST: 1057 case ILLEGAL_REQUEST:
1027 if (SCpnt->device->use_10_for_rw && 1058 if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */
1059 scsi_print_result(SCpnt);
1060 scsi_print_sense("sd", SCpnt);
1061 good_bytes = sd_completed_bytes(SCpnt);
1062 }
1063 if (!scsi_device_protection(SCpnt->device) &&
1064 SCpnt->device->use_10_for_rw &&
1028 (SCpnt->cmnd[0] == READ_10 || 1065 (SCpnt->cmnd[0] == READ_10 ||
1029 SCpnt->cmnd[0] == WRITE_10)) 1066 SCpnt->cmnd[0] == WRITE_10))
1030 SCpnt->device->use_10_for_rw = 0; 1067 SCpnt->device->use_10_for_rw = 0;
@@ -1037,6 +1074,9 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1037 break; 1074 break;
1038 } 1075 }
1039 out: 1076 out:
1077 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
1078 sd_dif_complete(SCpnt, good_bytes);
1079
1040 return good_bytes; 1080 return good_bytes;
1041} 1081}
1042 1082
@@ -1826,6 +1866,7 @@ static int sd_probe(struct device *dev)
1826 1866
1827 dev_set_drvdata(dev, sdkp); 1867 dev_set_drvdata(dev, sdkp);
1828 add_disk(gd); 1868 add_disk(gd);
1869 sd_dif_config_host(sdkp);
1829 1870
1830 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 1871 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1831 sdp->removable ? "removable " : ""); 1872 sdp->removable ? "removable " : "");
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 86b18d4170fe..550b2f70a1f8 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -82,4 +82,29 @@ enum sd_dif_target_protection_types {
82 SD_DIF_TYPE3_PROTECTION = 0x3, 82 SD_DIF_TYPE3_PROTECTION = 0x3,
83}; 83};
84 84
85/*
86 * Data Integrity Field tuple.
87 */
88struct sd_dif_tuple {
89 __be16 guard_tag; /* Checksum */
90 __be16 app_tag; /* Opaque storage */
91 __be32 ref_tag; /* Target LBA or indirect LBA */
92};
93
94#if defined(CONFIG_BLK_DEV_INTEGRITY)
95
96extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int);
97extern void sd_dif_config_host(struct scsi_disk *);
98extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
99extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
100
101#else /* CONFIG_BLK_DEV_INTEGRITY */
102
103#define sd_dif_op(a, b, c) do { } while (0)
104#define sd_dif_config_host(a) do { } while (0)
105#define sd_dif_prepare(a, b, c) (0)
106#define sd_dif_complete(a, b) (0)
107
108#endif /* CONFIG_BLK_DEV_INTEGRITY */
109
85#endif /* _SCSI_DISK_H */ 110#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
new file mode 100644
index 000000000000..4d17f3d35aac
--- /dev/null
+++ b/drivers/scsi/sd_dif.c
@@ -0,0 +1,538 @@
1/*
2 * sd_dif.c - SCSI Data Integrity Field
3 *
4 * Copyright (C) 2007, 2008 Oracle Corporation
5 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19 * USA.
20 *
21 */
22
23#include <linux/blkdev.h>
24#include <linux/crc-t10dif.h>
25
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_dbg.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_driver.h>
31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_ioctl.h>
34#include <scsi/scsicam.h>
35
36#include <net/checksum.h>
37
38#include "sd.h"
39
40typedef __u16 (csum_fn) (void *, unsigned int);
41
42static __u16 sd_dif_crc_fn(void *data, unsigned int len)
43{
44 return cpu_to_be16(crc_t10dif(data, len));
45}
46
47static __u16 sd_dif_ip_fn(void *data, unsigned int len)
48{
49 return ip_compute_csum(data, len);
50}
51
52/*
53 * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
54 * 16 bit app tag, 32 bit reference tag.
55 */
56static void sd_dif_type1_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
57{
58 void *buf = bix->data_buf;
59 struct sd_dif_tuple *sdt = bix->prot_buf;
60 sector_t sector = bix->sector;
61 unsigned int i;
62
63 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
64 sdt->guard_tag = fn(buf, bix->sector_size);
65 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
66 sdt->app_tag = 0;
67
68 buf += bix->sector_size;
69 sector++;
70 }
71}
72
73static void sd_dif_type1_generate_crc(struct blk_integrity_exchg *bix)
74{
75 sd_dif_type1_generate(bix, sd_dif_crc_fn);
76}
77
78static void sd_dif_type1_generate_ip(struct blk_integrity_exchg *bix)
79{
80 sd_dif_type1_generate(bix, sd_dif_ip_fn);
81}
82
83static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
84{
85 void *buf = bix->data_buf;
86 struct sd_dif_tuple *sdt = bix->prot_buf;
87 sector_t sector = bix->sector;
88 unsigned int i;
89 __u16 csum;
90
91 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
92 /* Unwritten sectors */
93 if (sdt->app_tag == 0xffff)
94 return 0;
95
96 /* Bad ref tag received from disk */
97 if (sdt->ref_tag == 0xffffffff) {
98 printk(KERN_ERR
99 "%s: bad phys ref tag on sector %lu\n",
100 bix->disk_name, (unsigned long)sector);
101 return -EIO;
102 }
103
104 if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
105 printk(KERN_ERR
106 "%s: ref tag error on sector %lu (rcvd %u)\n",
107 bix->disk_name, (unsigned long)sector,
108 be32_to_cpu(sdt->ref_tag));
109 return -EIO;
110 }
111
112 csum = fn(buf, bix->sector_size);
113
114 if (sdt->guard_tag != csum) {
115 printk(KERN_ERR "%s: guard tag error on sector %lu " \
116 "(rcvd %04x, data %04x)\n", bix->disk_name,
117 (unsigned long)sector,
118 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
119 return -EIO;
120 }
121
122 buf += bix->sector_size;
123 sector++;
124 }
125
126 return 0;
127}
128
129static int sd_dif_type1_verify_crc(struct blk_integrity_exchg *bix)
130{
131 return sd_dif_type1_verify(bix, sd_dif_crc_fn);
132}
133
134static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix)
135{
136 return sd_dif_type1_verify(bix, sd_dif_ip_fn);
137}
138
139/*
140 * Functions for interleaving and deinterleaving application tags
141 */
142static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
143{
144 struct sd_dif_tuple *sdt = prot;
145 char *tag = tag_buf;
146 unsigned int i, j;
147
148 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
149 sdt->app_tag = tag[j] << 8 | tag[j+1];
150 BUG_ON(sdt->app_tag == 0xffff);
151 }
152}
153
154static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
155{
156 struct sd_dif_tuple *sdt = prot;
157 char *tag = tag_buf;
158 unsigned int i, j;
159
160 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
161 tag[j] = (sdt->app_tag & 0xff00) >> 8;
162 tag[j+1] = sdt->app_tag & 0xff;
163 }
164}
165
166static struct blk_integrity dif_type1_integrity_crc = {
167 .name = "T10-DIF-TYPE1-CRC",
168 .generate_fn = sd_dif_type1_generate_crc,
169 .verify_fn = sd_dif_type1_verify_crc,
170 .get_tag_fn = sd_dif_type1_get_tag,
171 .set_tag_fn = sd_dif_type1_set_tag,
172 .tuple_size = sizeof(struct sd_dif_tuple),
173 .tag_size = 0,
174};
175
176static struct blk_integrity dif_type1_integrity_ip = {
177 .name = "T10-DIF-TYPE1-IP",
178 .generate_fn = sd_dif_type1_generate_ip,
179 .verify_fn = sd_dif_type1_verify_ip,
180 .get_tag_fn = sd_dif_type1_get_tag,
181 .set_tag_fn = sd_dif_type1_set_tag,
182 .tuple_size = sizeof(struct sd_dif_tuple),
183 .tag_size = 0,
184};
185
186
187/*
188 * Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque
189 * tag space.
190 */
191static void sd_dif_type3_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
192{
193 void *buf = bix->data_buf;
194 struct sd_dif_tuple *sdt = bix->prot_buf;
195 unsigned int i;
196
197 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
198 sdt->guard_tag = fn(buf, bix->sector_size);
199 sdt->ref_tag = 0;
200 sdt->app_tag = 0;
201
202 buf += bix->sector_size;
203 }
204}
205
206static void sd_dif_type3_generate_crc(struct blk_integrity_exchg *bix)
207{
208 sd_dif_type3_generate(bix, sd_dif_crc_fn);
209}
210
211static void sd_dif_type3_generate_ip(struct blk_integrity_exchg *bix)
212{
213 sd_dif_type3_generate(bix, sd_dif_ip_fn);
214}
215
216static int sd_dif_type3_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
217{
218 void *buf = bix->data_buf;
219 struct sd_dif_tuple *sdt = bix->prot_buf;
220 sector_t sector = bix->sector;
221 unsigned int i;
222 __u16 csum;
223
224 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
225 /* Unwritten sectors */
226 if (sdt->app_tag == 0xffff && sdt->ref_tag == 0xffffffff)
227 return 0;
228
229 csum = fn(buf, bix->sector_size);
230
231 if (sdt->guard_tag != csum) {
232 printk(KERN_ERR "%s: guard tag error on sector %lu " \
233 "(rcvd %04x, data %04x)\n", bix->disk_name,
234 (unsigned long)sector,
235 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
236 return -EIO;
237 }
238
239 buf += bix->sector_size;
240 sector++;
241 }
242
243 return 0;
244}
245
246static int sd_dif_type3_verify_crc(struct blk_integrity_exchg *bix)
247{
248 return sd_dif_type3_verify(bix, sd_dif_crc_fn);
249}
250
251static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix)
252{
253 return sd_dif_type3_verify(bix, sd_dif_ip_fn);
254}
255
256static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
257{
258 struct sd_dif_tuple *sdt = prot;
259 char *tag = tag_buf;
260 unsigned int i, j;
261
262 for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
263 sdt->app_tag = tag[j] << 8 | tag[j+1];
264 sdt->ref_tag = tag[j+2] << 24 | tag[j+3] << 16 |
265 tag[j+4] << 8 | tag[j+5];
266 }
267}
268
269static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
270{
271 struct sd_dif_tuple *sdt = prot;
272 char *tag = tag_buf;
273 unsigned int i, j;
274
275 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
276 tag[j] = (sdt->app_tag & 0xff00) >> 8;
277 tag[j+1] = sdt->app_tag & 0xff;
278 tag[j+2] = (sdt->ref_tag & 0xff000000) >> 24;
279 tag[j+3] = (sdt->ref_tag & 0xff0000) >> 16;
280 tag[j+4] = (sdt->ref_tag & 0xff00) >> 8;
281 tag[j+5] = sdt->ref_tag & 0xff;
282 BUG_ON(sdt->app_tag == 0xffff || sdt->ref_tag == 0xffffffff);
283 }
284}
285
286static struct blk_integrity dif_type3_integrity_crc = {
287 .name = "T10-DIF-TYPE3-CRC",
288 .generate_fn = sd_dif_type3_generate_crc,
289 .verify_fn = sd_dif_type3_verify_crc,
290 .get_tag_fn = sd_dif_type3_get_tag,
291 .set_tag_fn = sd_dif_type3_set_tag,
292 .tuple_size = sizeof(struct sd_dif_tuple),
293 .tag_size = 0,
294};
295
296static struct blk_integrity dif_type3_integrity_ip = {
297 .name = "T10-DIF-TYPE3-IP",
298 .generate_fn = sd_dif_type3_generate_ip,
299 .verify_fn = sd_dif_type3_verify_ip,
300 .get_tag_fn = sd_dif_type3_get_tag,
301 .set_tag_fn = sd_dif_type3_set_tag,
302 .tuple_size = sizeof(struct sd_dif_tuple),
303 .tag_size = 0,
304};
305
306/*
307 * Configure exchange of protection information between OS and HBA.
308 */
309void sd_dif_config_host(struct scsi_disk *sdkp)
310{
311 struct scsi_device *sdp = sdkp->device;
312 struct gendisk *disk = sdkp->disk;
313 u8 type = sdkp->protection_type;
314
315 /* If this HBA doesn't support DIX, resort to normal I/O or DIF */
316 if (scsi_host_dix_capable(sdp->host, type) == 0) {
317
318 if (type == SD_DIF_TYPE0_PROTECTION)
319 return;
320
321 if (scsi_host_dif_capable(sdp->host, type) == 0) {
322 sd_printk(KERN_INFO, sdkp, "Type %d protection " \
323 "unsupported by HBA. Disabling DIF.\n", type);
324 sdkp->protection_type = 0;
325 return;
326 }
327
328 sd_printk(KERN_INFO, sdkp, "Enabling DIF Type %d protection\n",
329 type);
330
331 return;
332 }
333
334 /* Enable DMA of protection information */
335 if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
336 if (type == SD_DIF_TYPE3_PROTECTION)
337 blk_integrity_register(disk, &dif_type3_integrity_ip);
338 else
339 blk_integrity_register(disk, &dif_type1_integrity_ip);
340 else
341 if (type == SD_DIF_TYPE3_PROTECTION)
342 blk_integrity_register(disk, &dif_type3_integrity_crc);
343 else
344 blk_integrity_register(disk, &dif_type1_integrity_crc);
345
346 sd_printk(KERN_INFO, sdkp,
347 "Enabling %s integrity protection\n", disk->integrity->name);
348
349 /* Signal to block layer that we support sector tagging */
350 if (type && sdkp->ATO) {
351 if (type == SD_DIF_TYPE3_PROTECTION)
352 disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
353 else
354 disk->integrity->tag_size = sizeof(u16);
355
356 sd_printk(KERN_INFO, sdkp, "DIF application tag size %u\n",
357 disk->integrity->tag_size);
358 }
359}
360
361/*
362 * DIF DMA operation magic decoder ring.
363 */
364void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
365{
366 int csum_convert, prot_op;
367
368 prot_op = 0;
369
370 /* Convert checksum? */
371 if (scsi_host_get_guard(scmd->device->host) != SHOST_DIX_GUARD_CRC)
372 csum_convert = 1;
373 else
374 csum_convert = 0;
375
376 switch (scmd->cmnd[0]) {
377 case READ_10:
378 case READ_12:
379 case READ_16:
380 if (dif && dix)
381 if (csum_convert)
382 prot_op = SCSI_PROT_READ_CONVERT;
383 else
384 prot_op = SCSI_PROT_READ_PASS;
385 else if (dif && !dix)
386 prot_op = SCSI_PROT_READ_STRIP;
387 else if (!dif && dix)
388 prot_op = SCSI_PROT_READ_INSERT;
389
390 break;
391
392 case WRITE_10:
393 case WRITE_12:
394 case WRITE_16:
395 if (dif && dix)
396 if (csum_convert)
397 prot_op = SCSI_PROT_WRITE_CONVERT;
398 else
399 prot_op = SCSI_PROT_WRITE_PASS;
400 else if (dif && !dix)
401 prot_op = SCSI_PROT_WRITE_INSERT;
402 else if (!dif && dix)
403 prot_op = SCSI_PROT_WRITE_STRIP;
404
405 break;
406 }
407
408 scsi_set_prot_op(scmd, prot_op);
409 scsi_set_prot_type(scmd, dif);
410}
411
412/*
413 * The virtual start sector is the one that was originally submitted
414 * by the block layer. Due to partitioning, MD/DM cloning, etc. the
415 * actual physical start sector is likely to be different. Remap
416 * protection information to match the physical LBA.
417 *
418 * From a protocol perspective there's a slight difference between
419 * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
420 * reference tag is seeded in the CDB. This gives us the potential to
421 * avoid virt->phys remapping during write. However, at read time we
422 * don't know whether the virt sector is the same as when we wrote it
423 * (we could be reading from real disk as opposed to MD/DM device. So
424 * we always remap Type 2 making it identical to Type 1.
425 *
426 * Type 3 does not have a reference tag so no remapping is required.
427 */
428int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_sz)
429{
430 const int tuple_sz = sizeof(struct sd_dif_tuple);
431 struct bio *bio;
432 struct scsi_disk *sdkp;
433 struct sd_dif_tuple *sdt;
434 unsigned int i, j;
435 u32 phys, virt;
436
437 /* Already remapped? */
438 if (rq->cmd_flags & REQ_INTEGRITY)
439 return 0;
440
441 sdkp = rq->bio->bi_bdev->bd_disk->private_data;
442
443 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
444 return 0;
445
446 rq->cmd_flags |= REQ_INTEGRITY;
447 phys = hw_sector & 0xffffffff;
448
449 __rq_for_each_bio(bio, rq) {
450 struct bio_vec *iv;
451
452 virt = bio->bi_integrity->bip_sector & 0xffffffff;
453
454 bip_for_each_vec(iv, bio->bi_integrity, i) {
455 sdt = kmap_atomic(iv->bv_page, KM_USER0)
456 + iv->bv_offset;
457
458 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
459
460 if (be32_to_cpu(sdt->ref_tag) != virt)
461 goto error;
462
463 sdt->ref_tag = cpu_to_be32(phys);
464 virt++;
465 phys++;
466 }
467
468 kunmap_atomic(sdt, KM_USER0);
469 }
470 }
471
472 return 0;
473
474error:
475 kunmap_atomic(sdt, KM_USER0);
476 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u\n",
477 __func__, virt, phys, be32_to_cpu(sdt->ref_tag));
478
479 return -EIO;
480}
481
482/*
483 * Remap physical sector values in the reference tag to the virtual
484 * values expected by the block layer.
485 */
486void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
487{
488 const int tuple_sz = sizeof(struct sd_dif_tuple);
489 struct scsi_disk *sdkp;
490 struct bio *bio;
491 struct sd_dif_tuple *sdt;
492 unsigned int i, j, sectors, sector_sz;
493 u32 phys, virt;
494
495 sdkp = scsi_disk(scmd->request->rq_disk);
496
497 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
498 return;
499
500 sector_sz = scmd->device->sector_size;
501 sectors = good_bytes / sector_sz;
502
503 phys = scmd->request->sector & 0xffffffff;
504 if (sector_sz == 4096)
505 phys >>= 3;
506
507 __rq_for_each_bio(bio, scmd->request) {
508 struct bio_vec *iv;
509
510 virt = bio->bi_integrity->bip_sector & 0xffffffff;
511
512 bip_for_each_vec(iv, bio->bi_integrity, i) {
513 sdt = kmap_atomic(iv->bv_page, KM_USER0)
514 + iv->bv_offset;
515
516 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
517
518 if (sectors == 0) {
519 kunmap_atomic(sdt, KM_USER0);
520 return;
521 }
522
523 if (be32_to_cpu(sdt->ref_tag) != phys &&
524 sdt->app_tag != 0xffff)
525 sdt->ref_tag = 0xffffffff; /* Bad ref */
526 else
527 sdt->ref_tag = cpu_to_be32(virt);
528
529 virt++;
530 phys++;
531 sectors--;
532 }
533
534 kunmap_atomic(sdt, KM_USER0);
535 }
536 }
537}
538