aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorScott Teel <scott.teel@hp.com>2014-02-18 14:57:31 -0500
committerJames Bottomley <JBottomley@Parallels.com>2014-03-15 13:19:08 -0400
commitdd0e19f3ceb87a768d09ea4726ee33961665bfbb (patch)
tree6ff9bba8283523741b1df621f3a3b5ed011259ba /drivers/scsi
parent51c35139e1f93f3b33e6598afb972b720d7e898c (diff)
[SCSI] hpsa: add controller base data-at-rest encryption compatibility ioaccel2
Add controller-based data-at-rest encryption compatibility to ioaccel2 path (HP SSD Smart Path). Encryption feature requires driver to supply additional fields for encryption enable, tweak index, and data encryption key index in the ioaccel2 request structure. Encryption enable flag and data encryption key index come from raid_map data structure from raid offload command. During ioaccel2 submission, check device structure's raid map to see if encryption is enabled for the device. If so, call new function below. Add function set_encrypt_ioaccel2 to set encryption flag, data encryption key index, and calculate tweak value from request's logical block address. Signed-off-by: Scott Teel <scott.teel@hp.com> Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/hpsa.c179
-rw-r--r--drivers/scsi/hpsa_cmd.h19
2 files changed, 184 insertions, 14 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 092d06d4b9c3..442fef90ffb0 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2027,6 +2027,14 @@ static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2027 le16_to_cpu(map_buff->row_cnt)); 2027 le16_to_cpu(map_buff->row_cnt));
2028 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 2028 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2029 le16_to_cpu(map_buff->layout_map_count)); 2029 le16_to_cpu(map_buff->layout_map_count));
2030 dev_info(&h->pdev->dev, "flags = %u\n",
2031 le16_to_cpu(map_buff->flags));
2032 if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
2033 dev_info(&h->pdev->dev, "encrypytion = ON\n");
2034 else
2035 dev_info(&h->pdev->dev, "encrypytion = OFF\n");
2036 dev_info(&h->pdev->dev, "dekindex = %u\n",
2037 le16_to_cpu(map_buff->dekindex));
2030 2038
2031 map_cnt = le16_to_cpu(map_buff->layout_map_count); 2039 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2032 for (map = 0; map < map_cnt; map++) { 2040 for (map = 0; map < map_cnt; map++) {
@@ -2967,6 +2975,128 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
2967 cmd->cmnd, cmd->cmd_len, dev->scsi3addr); 2975 cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
2968} 2976}
2969 2977
2978/*
2979 * Set encryption parameters for the ioaccel2 request
2980 */
2981static void set_encrypt_ioaccel2(struct ctlr_info *h,
2982 struct CommandList *c, struct io_accel2_cmd *cp)
2983{
2984 struct scsi_cmnd *cmd = c->scsi_cmd;
2985 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
2986 struct raid_map_data *map = &dev->raid_map;
2987 u64 first_block;
2988
2989 BUG_ON(!(dev->offload_config && dev->offload_enabled));
2990
2991 /* Are we doing encryption on this device */
2992 if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
2993 return;
2994 /* Set the data encryption key index. */
2995 cp->dekindex = map->dekindex;
2996
2997 /* Set the encryption enable flag, encoded into direction field. */
2998 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
2999
3000 /* Set encryption tweak values based on logical block address
3001 * If block size is 512, tweak value is LBA.
3002 * For other block sizes, tweak is (LBA * block size)/ 512)
3003 */
3004 switch (cmd->cmnd[0]) {
3005 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3006 case WRITE_6:
3007 case READ_6:
3008 if (map->volume_blk_size == 512) {
3009 cp->tweak_lower =
3010 (((u32) cmd->cmnd[2]) << 8) |
3011 cmd->cmnd[3];
3012 cp->tweak_upper = 0;
3013 } else {
3014 first_block =
3015 (((u64) cmd->cmnd[2]) << 8) |
3016 cmd->cmnd[3];
3017 first_block = (first_block * map->volume_blk_size)/512;
3018 cp->tweak_lower = (u32)first_block;
3019 cp->tweak_upper = (u32)(first_block >> 32);
3020 }
3021 break;
3022 case WRITE_10:
3023 case READ_10:
3024 if (map->volume_blk_size == 512) {
3025 cp->tweak_lower =
3026 (((u32) cmd->cmnd[2]) << 24) |
3027 (((u32) cmd->cmnd[3]) << 16) |
3028 (((u32) cmd->cmnd[4]) << 8) |
3029 cmd->cmnd[5];
3030 cp->tweak_upper = 0;
3031 } else {
3032 first_block =
3033 (((u64) cmd->cmnd[2]) << 24) |
3034 (((u64) cmd->cmnd[3]) << 16) |
3035 (((u64) cmd->cmnd[4]) << 8) |
3036 cmd->cmnd[5];
3037 first_block = (first_block * map->volume_blk_size)/512;
3038 cp->tweak_lower = (u32)first_block;
3039 cp->tweak_upper = (u32)(first_block >> 32);
3040 }
3041 break;
3042 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3043 case WRITE_12:
3044 case READ_12:
3045 if (map->volume_blk_size == 512) {
3046 cp->tweak_lower =
3047 (((u32) cmd->cmnd[2]) << 24) |
3048 (((u32) cmd->cmnd[3]) << 16) |
3049 (((u32) cmd->cmnd[4]) << 8) |
3050 cmd->cmnd[5];
3051 cp->tweak_upper = 0;
3052 } else {
3053 first_block =
3054 (((u64) cmd->cmnd[2]) << 24) |
3055 (((u64) cmd->cmnd[3]) << 16) |
3056 (((u64) cmd->cmnd[4]) << 8) |
3057 cmd->cmnd[5];
3058 first_block = (first_block * map->volume_blk_size)/512;
3059 cp->tweak_lower = (u32)first_block;
3060 cp->tweak_upper = (u32)(first_block >> 32);
3061 }
3062 break;
3063 case WRITE_16:
3064 case READ_16:
3065 if (map->volume_blk_size == 512) {
3066 cp->tweak_lower =
3067 (((u32) cmd->cmnd[6]) << 24) |
3068 (((u32) cmd->cmnd[7]) << 16) |
3069 (((u32) cmd->cmnd[8]) << 8) |
3070 cmd->cmnd[9];
3071 cp->tweak_upper =
3072 (((u32) cmd->cmnd[2]) << 24) |
3073 (((u32) cmd->cmnd[3]) << 16) |
3074 (((u32) cmd->cmnd[4]) << 8) |
3075 cmd->cmnd[5];
3076 } else {
3077 first_block =
3078 (((u64) cmd->cmnd[2]) << 56) |
3079 (((u64) cmd->cmnd[3]) << 48) |
3080 (((u64) cmd->cmnd[4]) << 40) |
3081 (((u64) cmd->cmnd[5]) << 32) |
3082 (((u64) cmd->cmnd[6]) << 24) |
3083 (((u64) cmd->cmnd[7]) << 16) |
3084 (((u64) cmd->cmnd[8]) << 8) |
3085 cmd->cmnd[9];
3086 first_block = (first_block * map->volume_blk_size)/512;
3087 cp->tweak_lower = (u32)first_block;
3088 cp->tweak_upper = (u32)(first_block >> 32);
3089 }
3090 break;
3091 default:
3092 dev_err(&h->pdev->dev,
3093 "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
3094 __func__);
3095 BUG();
3096 break;
3097 }
3098}
3099
2970static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, 3100static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
2971 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3101 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
2972 u8 *scsi3addr) 3102 u8 *scsi3addr)
@@ -3016,13 +3146,16 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3016 3146
3017 switch (cmd->sc_data_direction) { 3147 switch (cmd->sc_data_direction) {
3018 case DMA_TO_DEVICE: 3148 case DMA_TO_DEVICE:
3019 cp->direction = IOACCEL2_DIR_DATA_OUT; 3149 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3150 cp->direction |= IOACCEL2_DIR_DATA_OUT;
3020 break; 3151 break;
3021 case DMA_FROM_DEVICE: 3152 case DMA_FROM_DEVICE:
3022 cp->direction = IOACCEL2_DIR_DATA_IN; 3153 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3154 cp->direction |= IOACCEL2_DIR_DATA_IN;
3023 break; 3155 break;
3024 case DMA_NONE: 3156 case DMA_NONE:
3025 cp->direction = IOACCEL2_DIR_NO_DATA; 3157 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3158 cp->direction |= IOACCEL2_DIR_NO_DATA;
3026 break; 3159 break;
3027 default: 3160 default:
3028 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3161 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
@@ -3031,10 +3164,15 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3031 break; 3164 break;
3032 } 3165 }
3033 } else { 3166 } else {
3034 cp->direction = IOACCEL2_DIR_NO_DATA; 3167 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3168 cp->direction |= IOACCEL2_DIR_NO_DATA;
3035 } 3169 }
3170
3171 /* Set encryption parameters, if necessary */
3172 set_encrypt_ioaccel2(h, c, cp);
3173
3036 cp->scsi_nexus = ioaccel_handle; 3174 cp->scsi_nexus = ioaccel_handle;
3037 cp->Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT) | 3175 cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
3038 DIRECT_LOOKUP_BIT; 3176 DIRECT_LOOKUP_BIT;
3039 memcpy(cp->cdb, cdb, sizeof(cp->cdb)); 3177 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3040 memset(cp->cciss_lun, 0, sizeof(cp->cciss_lun)); 3178 memset(cp->cciss_lun, 0, sizeof(cp->cciss_lun));
@@ -3792,8 +3930,9 @@ static void hpsa_get_tag(struct ctlr_info *h,
3792 if (c->cmd_type == CMD_IOACCEL2) { 3930 if (c->cmd_type == CMD_IOACCEL2) {
3793 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) 3931 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
3794 &h->ioaccel2_cmd_pool[c->cmdindex]; 3932 &h->ioaccel2_cmd_pool[c->cmdindex];
3795 *tagupper = cm2->Tag.upper; 3933 /* upper tag not used in ioaccel2 mode */
3796 *taglower = cm2->Tag.lower; 3934 memset(tagupper, 0, sizeof(*tagupper));
3935 *taglower = cm2->Tag;
3797 return; 3936 return;
3798 } 3937 }
3799 *tagupper = c->Header.Tag.upper; 3938 *tagupper = c->Header.Tag.upper;
@@ -3841,8 +3980,8 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
3841 break; 3980 break;
3842 } 3981 }
3843 cmd_special_free(h, c); 3982 cmd_special_free(h, c);
3844 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, 3983 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
3845 abort->Header.Tag.upper, abort->Header.Tag.lower); 3984 __func__, tagupper, taglower);
3846 return rc; 3985 return rc;
3847} 3986}
3848 3987
@@ -6971,6 +7110,28 @@ static void __exit hpsa_cleanup(void)
6971static void __attribute__((unused)) verify_offsets(void) 7110static void __attribute__((unused)) verify_offsets(void)
6972{ 7111{
6973#define VERIFY_OFFSET(member, offset) \ 7112#define VERIFY_OFFSET(member, offset) \
7113 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7114
7115 VERIFY_OFFSET(structure_size, 0);
7116 VERIFY_OFFSET(volume_blk_size, 4);
7117 VERIFY_OFFSET(volume_blk_cnt, 8);
7118 VERIFY_OFFSET(phys_blk_shift, 16);
7119 VERIFY_OFFSET(parity_rotation_shift, 17);
7120 VERIFY_OFFSET(strip_size, 18);
7121 VERIFY_OFFSET(disk_starting_blk, 20);
7122 VERIFY_OFFSET(disk_blk_cnt, 28);
7123 VERIFY_OFFSET(data_disks_per_row, 36);
7124 VERIFY_OFFSET(metadata_disks_per_row, 38);
7125 VERIFY_OFFSET(row_cnt, 40);
7126 VERIFY_OFFSET(layout_map_count, 42);
7127 VERIFY_OFFSET(flags, 44);
7128 VERIFY_OFFSET(dekindex, 46);
7129 /* VERIFY_OFFSET(reserved, 48 */
7130 VERIFY_OFFSET(data, 64);
7131
7132#undef VERIFY_OFFSET
7133
7134#define VERIFY_OFFSET(member, offset) \
6974 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) 7135 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
6975 7136
6976 VERIFY_OFFSET(IU_type, 0); 7137 VERIFY_OFFSET(IU_type, 0);
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 7768092f0891..eaa7fdaa2e9d 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -209,7 +209,10 @@ struct raid_map_data {
209 u16 row_cnt; /* rows in each layout map */ 209 u16 row_cnt; /* rows in each layout map */
210 u16 layout_map_count; /* layout maps (1 map per mirror/parity 210 u16 layout_map_count; /* layout maps (1 map per mirror/parity
211 * group) */ 211 * group) */
212 u8 reserved[20]; 212 u16 flags; /* Bit 0 set if encryption enabled */
213#define RAID_MAP_FLAG_ENCRYPT_ON 0x01
214 u16 dekindex; /* Data encryption key index. */
215 u8 reserved[16];
213 struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; 216 struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
214}; 217};
215 218
@@ -502,11 +505,17 @@ struct io_accel2_scsi_response {
502 */ 505 */
503struct io_accel2_cmd { 506struct io_accel2_cmd {
504 u8 IU_type; /* IU Type */ 507 u8 IU_type; /* IU Type */
505 u8 direction; /* Transfer direction, 2 bits */ 508 u8 direction; /* direction, memtype, and encryption */
509#define IOACCEL2_DIRECTION_MASK 0x03 /* bits 0,1: direction */
510#define IOACCEL2_DIRECTION_MEMTYPE_MASK 0x04 /* bit 2: memtype source/dest */
511 /* 0b=PCIe, 1b=DDR */
512#define IOACCEL2_DIRECTION_ENCRYPT_MASK 0x08 /* bit 3: encryption flag */
513 /* 0=off, 1=on */
506 u8 reply_queue; /* Reply Queue ID */ 514 u8 reply_queue; /* Reply Queue ID */
507 u8 reserved1; /* Reserved */ 515 u8 reserved1; /* Reserved */
508 u32 scsi_nexus; /* Device Handle */ 516 u32 scsi_nexus; /* Device Handle */
509 struct vals32 Tag; /* cciss tag */ 517 u32 Tag; /* cciss tag, lower 4 bytes only */
518 u32 tweak_lower; /* Encryption tweak, lower 4 bytes */
510 u8 cdb[16]; /* SCSI Command Descriptor Block */ 519 u8 cdb[16]; /* SCSI Command Descriptor Block */
511 u8 cciss_lun[8]; /* 8 byte SCSI address */ 520 u8 cciss_lun[8]; /* 8 byte SCSI address */
512 u32 data_len; /* Total bytes to transfer */ 521 u32 data_len; /* Total bytes to transfer */
@@ -514,10 +523,10 @@ struct io_accel2_cmd {
514#define IOACCEL2_PRIORITY_MASK 0x78 523#define IOACCEL2_PRIORITY_MASK 0x78
515#define IOACCEL2_ATTR_MASK 0x07 524#define IOACCEL2_ATTR_MASK 0x07
516 u8 sg_count; /* Number of sg elements */ 525 u8 sg_count; /* Number of sg elements */
517 u8 reserved3[2]; /* Reserved */ 526 u16 dekindex; /* Data encryption key index */
518 u64 err_ptr; /* Error Pointer */ 527 u64 err_ptr; /* Error Pointer */
519 u32 err_len; /* Error Length*/ 528 u32 err_len; /* Error Length*/
520 u8 reserved4[4]; /* Reserved */ 529 u32 tweak_upper; /* Encryption tweak, upper 4 bytes */
521 struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; 530 struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
522 struct io_accel2_scsi_response error_data; 531 struct io_accel2_scsi_response error_data;
523 u8 pad[IOACCEL2_PAD]; 532 u8 pad[IOACCEL2_PAD];