aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2019-04-17 17:44:38 -0400
committerMartin K. Petersen <martin.petersen@oracle.com>2019-04-29 17:24:51 -0400
commit15b7a68c1d030b2365c823730d0eb9257f2aa60e (patch)
tree2076d18c7fa823873c41d12bb945178b43a30fc9 /drivers
parentbc04459ce4e5d394d79fe2a0660d43c1a40b6eb8 (diff)
scsi: qla2xxx: Introduce the dsd32 and dsd64 data structures
Introduce two structures for the (DMA address, length) combination instead of using separate structure members for the DMA address and length. This patch fixes several Coverity complaints about 'cur_dsd' being used to write outside the bounds of structure members. Cc: Himanshu Madhani <hmadhani@marvell.com> Cc: Giridhar Malavali <gmalavali@marvell.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Acked-by: Himanshu Madhani <hmadhani@marvell.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h58
-rw-r--r--drivers/scsi/qla2xxx/qla_dsd.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c255
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h13
13 files changed, 221 insertions, 345 deletions
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 45f26ea5f9d9..5441557b424b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1056,9 +1056,8 @@ qla84xx_updatefw(struct bsg_job *bsg_job)
1056 mn->fw_ver = cpu_to_le32(fw_ver); 1056 mn->fw_ver = cpu_to_le32(fw_ver);
1057 mn->fw_size = cpu_to_le32(data_len); 1057 mn->fw_size = cpu_to_le32(data_len);
1058 mn->fw_seq_size = cpu_to_le32(data_len); 1058 mn->fw_seq_size = cpu_to_le32(data_len);
1059 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma)); 1059 put_unaligned_le64(fw_dma, &mn->dsd.address);
1060 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma)); 1060 mn->dsd.length = cpu_to_le32(data_len);
1061 mn->dseg_length = cpu_to_le32(data_len);
1062 mn->data_seg_cnt = cpu_to_le16(1); 1061 mn->data_seg_cnt = cpu_to_le16(1);
1063 1062
1064 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 1063 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
@@ -1237,9 +1236,8 @@ qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1237 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1236 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1238 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1237 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1239 mn->dseg_count = cpu_to_le16(1); 1238 mn->dseg_count = cpu_to_le16(1);
1240 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma)); 1239 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1241 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma)); 1240 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1242 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1243 } 1241 }
1244 1242
1245 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1243 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 8acaeba98da1..502a4812bf51 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -35,6 +35,7 @@
35#include <scsi/scsi_bsg_fc.h> 35#include <scsi/scsi_bsg_fc.h>
36 36
37#include "qla_bsg.h" 37#include "qla_bsg.h"
38#include "qla_dsd.h"
38#include "qla_nx.h" 39#include "qla_nx.h"
39#include "qla_nx2.h" 40#include "qla_nx2.h"
40#include "qla_nvme.h" 41#include "qla_nvme.h"
@@ -1754,12 +1755,10 @@ typedef struct {
1754 uint16_t dseg_count; /* Data segment count. */ 1755 uint16_t dseg_count; /* Data segment count. */
1755 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ 1756 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
1756 uint32_t byte_count; /* Total byte count. */ 1757 uint32_t byte_count; /* Total byte count. */
1757 uint32_t dseg_0_address; /* Data segment 0 address. */ 1758 union {
1758 uint32_t dseg_0_length; /* Data segment 0 length. */ 1759 struct dsd32 dsd32[3];
1759 uint32_t dseg_1_address; /* Data segment 1 address. */ 1760 struct dsd64 dsd64[2];
1760 uint32_t dseg_1_length; /* Data segment 1 length. */ 1761 };
1761 uint32_t dseg_2_address; /* Data segment 2 address. */
1762 uint32_t dseg_2_length; /* Data segment 2 length. */
1763} cmd_entry_t; 1762} cmd_entry_t;
1764 1763
1765/* 1764/*
@@ -1780,10 +1779,7 @@ typedef struct {
1780 uint16_t dseg_count; /* Data segment count. */ 1779 uint16_t dseg_count; /* Data segment count. */
1781 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ 1780 uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
1782 uint32_t byte_count; /* Total byte count. */ 1781 uint32_t byte_count; /* Total byte count. */
1783 uint32_t dseg_0_address[2]; /* Data segment 0 address. */ 1782 struct dsd64 dsd[2];
1784 uint32_t dseg_0_length; /* Data segment 0 length. */
1785 uint32_t dseg_1_address[2]; /* Data segment 1 address. */
1786 uint32_t dseg_1_length; /* Data segment 1 length. */
1787} cmd_a64_entry_t, request_t; 1783} cmd_a64_entry_t, request_t;
1788 1784
1789/* 1785/*
@@ -1796,20 +1792,7 @@ typedef struct {
1796 uint8_t sys_define; /* System defined. */ 1792 uint8_t sys_define; /* System defined. */
1797 uint8_t entry_status; /* Entry Status. */ 1793 uint8_t entry_status; /* Entry Status. */
1798 uint32_t reserved; 1794 uint32_t reserved;
1799 uint32_t dseg_0_address; /* Data segment 0 address. */ 1795 struct dsd32 dsd[7];
1800 uint32_t dseg_0_length; /* Data segment 0 length. */
1801 uint32_t dseg_1_address; /* Data segment 1 address. */
1802 uint32_t dseg_1_length; /* Data segment 1 length. */
1803 uint32_t dseg_2_address; /* Data segment 2 address. */
1804 uint32_t dseg_2_length; /* Data segment 2 length. */
1805 uint32_t dseg_3_address; /* Data segment 3 address. */
1806 uint32_t dseg_3_length; /* Data segment 3 length. */
1807 uint32_t dseg_4_address; /* Data segment 4 address. */
1808 uint32_t dseg_4_length; /* Data segment 4 length. */
1809 uint32_t dseg_5_address; /* Data segment 5 address. */
1810 uint32_t dseg_5_length; /* Data segment 5 length. */
1811 uint32_t dseg_6_address; /* Data segment 6 address. */
1812 uint32_t dseg_6_length; /* Data segment 6 length. */
1813} cont_entry_t; 1796} cont_entry_t;
1814 1797
1815/* 1798/*
@@ -1821,16 +1804,7 @@ typedef struct {
1821 uint8_t entry_count; /* Entry count. */ 1804 uint8_t entry_count; /* Entry count. */
1822 uint8_t sys_define; /* System defined. */ 1805 uint8_t sys_define; /* System defined. */
1823 uint8_t entry_status; /* Entry Status. */ 1806 uint8_t entry_status; /* Entry Status. */
1824 uint32_t dseg_0_address[2]; /* Data segment 0 address. */ 1807 struct dsd64 dsd[5];
1825 uint32_t dseg_0_length; /* Data segment 0 length. */
1826 uint32_t dseg_1_address[2]; /* Data segment 1 address. */
1827 uint32_t dseg_1_length; /* Data segment 1 length. */
1828 uint32_t dseg_2_address [2]; /* Data segment 2 address. */
1829 uint32_t dseg_2_length; /* Data segment 2 length. */
1830 uint32_t dseg_3_address[2]; /* Data segment 3 address. */
1831 uint32_t dseg_3_length; /* Data segment 3 length. */
1832 uint32_t dseg_4_address[2]; /* Data segment 4 address. */
1833 uint32_t dseg_4_length; /* Data segment 4 length. */
1834} cont_a64_entry_t; 1808} cont_a64_entry_t;
1835 1809
1836#define PO_MODE_DIF_INSERT 0 1810#define PO_MODE_DIF_INSERT 0
@@ -1874,8 +1848,7 @@ struct crc_context {
1874 uint16_t reserved_2; 1848 uint16_t reserved_2;
1875 uint16_t reserved_3; 1849 uint16_t reserved_3;
1876 uint32_t reserved_4; 1850 uint32_t reserved_4;
1877 uint32_t data_address[2]; 1851 struct dsd64 data_dsd;
1878 uint32_t data_length;
1879 uint32_t reserved_5[2]; 1852 uint32_t reserved_5[2];
1880 uint32_t reserved_6; 1853 uint32_t reserved_6;
1881 } nobundling; 1854 } nobundling;
@@ -1885,11 +1858,8 @@ struct crc_context {
1885 uint16_t reserved_1; 1858 uint16_t reserved_1;
1886 __le16 dseg_count; /* Data segment count */ 1859 __le16 dseg_count; /* Data segment count */
1887 uint32_t reserved_2; 1860 uint32_t reserved_2;
1888 uint32_t data_address[2]; 1861 struct dsd64 data_dsd;
1889 uint32_t data_length; 1862 struct dsd64 dif_dsd;
1890 uint32_t dif_address[2];
1891 uint32_t dif_length; /* Data segment 0
1892 * length */
1893 } bundling; 1863 } bundling;
1894 } u; 1864 } u;
1895 1865
@@ -2088,10 +2058,8 @@ typedef struct {
2088 uint32_t handle2; 2058 uint32_t handle2;
2089 uint32_t rsp_bytecount; 2059 uint32_t rsp_bytecount;
2090 uint32_t req_bytecount; 2060 uint32_t req_bytecount;
2091 uint32_t dseg_req_address[2]; /* Data segment 0 address. */ 2061 struct dsd64 req_dsd;
2092 uint32_t dseg_req_length; /* Data segment 0 length. */ 2062 struct dsd64 rsp_dsd;
2093 uint32_t dseg_rsp_address[2]; /* Data segment 1 address. */
2094 uint32_t dseg_rsp_length; /* Data segment 1 length. */
2095} ms_iocb_entry_t; 2063} ms_iocb_entry_t;
2096 2064
2097 2065
diff --git a/drivers/scsi/qla2xxx/qla_dsd.h b/drivers/scsi/qla2xxx/qla_dsd.h
new file mode 100644
index 000000000000..7479924ba422
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_dsd.h
@@ -0,0 +1,30 @@
1#ifndef _QLA_DSD_H_
2#define _QLA_DSD_H_
3
4/* 32-bit data segment descriptor (8 bytes) */
5struct dsd32 {
6 __le32 address;
7 __le32 length;
8};
9
10static inline void append_dsd32(struct dsd32 **dsd, struct scatterlist *sg)
11{
12 put_unaligned_le32(sg_dma_address(sg), &(*dsd)->address);
13 put_unaligned_le32(sg_dma_len(sg), &(*dsd)->length);
14 (*dsd)++;
15}
16
17/* 64-bit data segment descriptor (12 bytes) */
18struct dsd64 {
19 __le64 address;
20 __le32 length;
21} __packed;
22
23static inline void append_dsd64(struct dsd64 **dsd, struct scatterlist *sg)
24{
25 put_unaligned_le64(sg_dma_address(sg), &(*dsd)->address);
26 put_unaligned_le32(sg_dma_len(sg), &(*dsd)->length);
27 (*dsd)++;
28}
29
30#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d53cd7875a85..604eb4682ac0 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -10,6 +10,8 @@
10#include <linux/nvme.h> 10#include <linux/nvme.h>
11#include <linux/nvme-fc.h> 11#include <linux/nvme-fc.h>
12 12
13#include "qla_dsd.h"
14
13#define MBS_CHECKSUM_ERROR 0x4010 15#define MBS_CHECKSUM_ERROR 0x4010
14#define MBS_INVALID_PRODUCT_KEY 0x4020 16#define MBS_INVALID_PRODUCT_KEY 0x4020
15 17
@@ -463,8 +465,7 @@ struct cmd_bidir {
463 uint8_t port_id[3]; /* PortID of destination port.*/ 465 uint8_t port_id[3]; /* PortID of destination port.*/
464 uint8_t vp_index; 466 uint8_t vp_index;
465 467
466 uint32_t fcp_data_dseg_address[2]; /* Data segment address. */ 468 struct dsd64 fcp_dsd;
467 uint16_t fcp_data_dseg_len; /* Data segment length. */
468}; 469};
469 470
470#define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */ 471#define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */
@@ -501,8 +502,7 @@ struct cmd_type_6 {
501 uint8_t port_id[3]; /* PortID of destination port. */ 502 uint8_t port_id[3]; /* PortID of destination port. */
502 uint8_t vp_index; 503 uint8_t vp_index;
503 504
504 uint32_t fcp_data_dseg_address[2]; /* Data segment address. */ 505 struct dsd64 fcp_dsd;
505 uint32_t fcp_data_dseg_len; /* Data segment length. */
506}; 506};
507 507
508#define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */ 508#define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */
@@ -548,8 +548,7 @@ struct cmd_type_7 {
548 uint8_t port_id[3]; /* PortID of destination port. */ 548 uint8_t port_id[3]; /* PortID of destination port. */
549 uint8_t vp_index; 549 uint8_t vp_index;
550 550
551 uint32_t dseg_0_address[2]; /* Data segment 0 address. */ 551 struct dsd64 dsd;
552 uint32_t dseg_0_len; /* Data segment 0 length. */
553}; 552};
554 553
555#define COMMAND_TYPE_CRC_2 0x6A /* Command Type CRC_2 (Type 6) 554#define COMMAND_TYPE_CRC_2 0x6A /* Command Type CRC_2 (Type 6)
@@ -717,10 +716,7 @@ struct ct_entry_24xx {
717 uint32_t rsp_byte_count; 716 uint32_t rsp_byte_count;
718 uint32_t cmd_byte_count; 717 uint32_t cmd_byte_count;
719 718
720 uint32_t dseg_0_address[2]; /* Data segment 0 address. */ 719 struct dsd64 dsd[2];
721 uint32_t dseg_0_len; /* Data segment 0 length. */
722 uint32_t dseg_1_address[2]; /* Data segment 1 address. */
723 uint32_t dseg_1_len; /* Data segment 1 length. */
724}; 720};
725 721
726/* 722/*
@@ -1606,8 +1602,7 @@ struct verify_chip_entry_84xx {
1606 uint32_t fw_seq_size; 1602 uint32_t fw_seq_size;
1607 uint32_t relative_offset; 1603 uint32_t relative_offset;
1608 1604
1609 uint32_t dseg_address[2]; 1605 struct dsd64 dsd;
1610 uint32_t dseg_length;
1611}; 1606};
1612 1607
1613struct verify_chip_rsp_84xx { 1608struct verify_chip_rsp_84xx {
@@ -1664,8 +1659,7 @@ struct access_chip_84xx {
1664 uint32_t total_byte_cnt; 1659 uint32_t total_byte_cnt;
1665 uint32_t reserved4; 1660 uint32_t reserved4;
1666 1661
1667 uint32_t dseg_address[2]; 1662 struct dsd64 dsd;
1668 uint32_t dseg_length;
1669}; 1663};
1670 1664
1671struct access_chip_rsp_84xx { 1665struct access_chip_rsp_84xx {
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9c7fbd4da9b8..bbe69ab5cf3f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -287,11 +287,11 @@ extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
287extern void *__qla2x00_alloc_iocbs(struct qla_qpair *, srb_t *); 287extern void *__qla2x00_alloc_iocbs(struct qla_qpair *, srb_t *);
288extern int qla2x00_issue_marker(scsi_qla_host_t *, int); 288extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
289extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, 289extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
290 uint32_t *, uint16_t, struct qla_tc_param *); 290 struct dsd64 *, uint16_t, struct qla_tc_param *);
291extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, 291extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
292 uint32_t *, uint16_t, struct qla_tc_param *); 292 struct dsd64 *, uint16_t, struct qla_tc_param *);
293extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, 293extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
294 uint32_t *, uint16_t, struct qla_tgt_cmd *); 294 struct dsd64 *, uint16_t, struct qla_tgt_cmd *);
295extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); 295extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
296extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); 296extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
297 297
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4235769ec4cc..e2653bbc117d 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -45,13 +45,11 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
45 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size); 45 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
46 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size); 46 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
47 47
48 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma)); 48 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
49 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma)); 49 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
50 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
51 50
52 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma)); 51 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
53 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma)); 52 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
54 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
55 53
56 vha->qla_stats.control_requests++; 54 vha->qla_stats.control_requests++;
57 55
@@ -83,13 +81,11 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
83 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size); 81 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
84 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size); 82 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
85 83
86 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma)); 84 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
87 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma)); 85 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
88 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
89 86
90 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma)); 87 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
91 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma)); 88 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
92 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
93 ct_pkt->vp_index = vha->vp_idx; 89 ct_pkt->vp_index = vha->vp_idx;
94 90
95 vha->qla_stats.control_requests++; 91 vha->qla_stats.control_requests++;
@@ -1438,13 +1434,11 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1438 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); 1434 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1439 ms_pkt->req_bytecount = cpu_to_le32(req_size); 1435 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1440 1436
1441 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1437 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
1442 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1438 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1443 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1444 1439
1445 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1440 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
1446 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1441 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
1447 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
1448 1442
1449 return ms_pkt; 1443 return ms_pkt;
1450} 1444}
@@ -1476,13 +1470,11 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1476 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 1470 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1477 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 1471 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1478 1472
1479 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1473 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
1480 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1474 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1481 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1482 1475
1483 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1476 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
1484 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1477 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
1485 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1486 ct_pkt->vp_index = vha->vp_idx; 1478 ct_pkt->vp_index = vha->vp_idx;
1487 1479
1488 return ct_pkt; 1480 return ct_pkt;
@@ -1497,10 +1489,10 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1497 1489
1498 if (IS_FWI2_CAPABLE(ha)) { 1490 if (IS_FWI2_CAPABLE(ha)) {
1499 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 1491 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1500 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count; 1492 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1501 } else { 1493 } else {
1502 ms_pkt->req_bytecount = cpu_to_le32(req_size); 1494 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1503 ms_pkt->dseg_req_length = ms_pkt->req_bytecount; 1495 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1504 } 1496 }
1505 1497
1506 return ms_pkt; 1498 return ms_pkt;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 19eb18be2316..ef895e1142c9 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -192,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 uint16_t tot_dsds) 192 uint16_t tot_dsds)
193{ 193{
194 uint16_t avail_dsds; 194 uint16_t avail_dsds;
195 uint32_t *cur_dsd; 195 struct dsd32 *cur_dsd;
196 scsi_qla_host_t *vha; 196 scsi_qla_host_t *vha;
197 struct scsi_cmnd *cmd; 197 struct scsi_cmnd *cmd;
198 struct scatterlist *sg; 198 struct scatterlist *sg;
@@ -213,8 +213,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
213 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 213 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
214 214
215 /* Three DSDs are available in the Command Type 2 IOCB */ 215 /* Three DSDs are available in the Command Type 2 IOCB */
216 avail_dsds = 3; 216 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
217 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 217 cur_dsd = cmd_pkt->dsd32;
218 218
219 /* Load data segments */ 219 /* Load data segments */
220 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 220 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
@@ -227,12 +227,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
227 * Type 0 IOCB. 227 * Type 0 IOCB.
228 */ 228 */
229 cont_pkt = qla2x00_prep_cont_type0_iocb(vha); 229 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
230 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 230 cur_dsd = cont_pkt->dsd;
231 avail_dsds = 7; 231 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
232 } 232 }
233 233
234 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg)); 234 append_dsd32(&cur_dsd, sg);
235 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
236 avail_dsds--; 235 avail_dsds--;
237 } 236 }
238} 237}
@@ -249,7 +248,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
249 uint16_t tot_dsds) 248 uint16_t tot_dsds)
250{ 249{
251 uint16_t avail_dsds; 250 uint16_t avail_dsds;
252 uint32_t *cur_dsd; 251 struct dsd64 *cur_dsd;
253 scsi_qla_host_t *vha; 252 scsi_qla_host_t *vha;
254 struct scsi_cmnd *cmd; 253 struct scsi_cmnd *cmd;
255 struct scatterlist *sg; 254 struct scatterlist *sg;
@@ -270,12 +269,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
270 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 269 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
271 270
272 /* Two DSDs are available in the Command Type 3 IOCB */ 271 /* Two DSDs are available in the Command Type 3 IOCB */
273 avail_dsds = 2; 272 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
274 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 273 cur_dsd = cmd_pkt->dsd64;
275 274
276 /* Load data segments */ 275 /* Load data segments */
277 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 276 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
278 dma_addr_t sle_dma;
279 cont_a64_entry_t *cont_pkt; 277 cont_a64_entry_t *cont_pkt;
280 278
281 /* Allocate additional continuation packets? */ 279 /* Allocate additional continuation packets? */
@@ -285,14 +283,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
285 * Type 1 IOCB. 283 * Type 1 IOCB.
286 */ 284 */
287 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 285 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
288 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 286 cur_dsd = cont_pkt->dsd;
289 avail_dsds = 5; 287 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
290 } 288 }
291 289
292 sle_dma = sg_dma_address(sg); 290 append_dsd64(&cur_dsd, sg);
293 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
294 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
295 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
296 avail_dsds--; 291 avail_dsds--;
297 } 292 }
298} 293}
@@ -578,13 +573,11 @@ static inline int
578qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 573qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
579 uint16_t tot_dsds) 574 uint16_t tot_dsds)
580{ 575{
581 uint32_t *cur_dsd = NULL; 576 struct dsd64 *cur_dsd = NULL, *next_dsd;
582 scsi_qla_host_t *vha; 577 scsi_qla_host_t *vha;
583 struct qla_hw_data *ha; 578 struct qla_hw_data *ha;
584 struct scsi_cmnd *cmd; 579 struct scsi_cmnd *cmd;
585 struct scatterlist *cur_seg; 580 struct scatterlist *cur_seg;
586 uint32_t *dsd_seg;
587 void *next_dsd;
588 uint8_t avail_dsds; 581 uint8_t avail_dsds;
589 uint8_t first_iocb = 1; 582 uint8_t first_iocb = 1;
590 uint32_t dsd_list_len; 583 uint32_t dsd_list_len;
@@ -636,32 +629,27 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
636 629
637 if (first_iocb) { 630 if (first_iocb) {
638 first_iocb = 0; 631 first_iocb = 0;
639 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 632 put_unaligned_le64(dsd_ptr->dsd_list_dma,
640 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 633 &cmd_pkt->fcp_dsd.address);
641 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 634 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
642 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
643 } else { 635 } else {
644 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 636 put_unaligned_le64(dsd_ptr->dsd_list_dma,
645 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 637 &cur_dsd->address);
646 *cur_dsd++ = cpu_to_le32(dsd_list_len); 638 cur_dsd->length = cpu_to_le32(dsd_list_len);
639 cur_dsd++;
647 } 640 }
648 cur_dsd = (uint32_t *)next_dsd; 641 cur_dsd = next_dsd;
649 while (avail_dsds) { 642 while (avail_dsds) {
650 dma_addr_t sle_dma; 643 append_dsd64(&cur_dsd, cur_seg);
651
652 sle_dma = sg_dma_address(cur_seg);
653 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
654 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
655 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
656 cur_seg = sg_next(cur_seg); 644 cur_seg = sg_next(cur_seg);
657 avail_dsds--; 645 avail_dsds--;
658 } 646 }
659 } 647 }
660 648
661 /* Null termination */ 649 /* Null termination */
662 *cur_dsd++ = 0; 650 cur_dsd->address = 0;
663 *cur_dsd++ = 0; 651 cur_dsd->length = 0;
664 *cur_dsd++ = 0; 652 cur_dsd++;
665 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; 653 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
666 return 0; 654 return 0;
667} 655}
@@ -700,7 +688,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
700 uint16_t tot_dsds, struct req_que *req) 688 uint16_t tot_dsds, struct req_que *req)
701{ 689{
702 uint16_t avail_dsds; 690 uint16_t avail_dsds;
703 uint32_t *cur_dsd; 691 struct dsd64 *cur_dsd;
704 scsi_qla_host_t *vha; 692 scsi_qla_host_t *vha;
705 struct scsi_cmnd *cmd; 693 struct scsi_cmnd *cmd;
706 struct scatterlist *sg; 694 struct scatterlist *sg;
@@ -732,12 +720,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
732 720
733 /* One DSD is available in the Command Type 3 IOCB */ 721 /* One DSD is available in the Command Type 3 IOCB */
734 avail_dsds = 1; 722 avail_dsds = 1;
735 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 723 cur_dsd = &cmd_pkt->dsd;
736 724
737 /* Load data segments */ 725 /* Load data segments */
738 726
739 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 727 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
740 dma_addr_t sle_dma;
741 cont_a64_entry_t *cont_pkt; 728 cont_a64_entry_t *cont_pkt;
742 729
743 /* Allocate additional continuation packets? */ 730 /* Allocate additional continuation packets? */
@@ -747,14 +734,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
747 * Type 1 IOCB. 734 * Type 1 IOCB.
748 */ 735 */
749 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); 736 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
750 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 737 cur_dsd = cont_pkt->dsd;
751 avail_dsds = 5; 738 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
752 } 739 }
753 740
754 sle_dma = sg_dma_address(sg); 741 append_dsd64(&cur_dsd, sg);
755 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
756 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
757 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
758 avail_dsds--; 742 avail_dsds--;
759 } 743 }
760} 744}
@@ -890,14 +874,14 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
890 874
891int 875int
892qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 876qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
893 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 877 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
894{ 878{
895 void *next_dsd; 879 void *next_dsd;
896 uint8_t avail_dsds = 0; 880 uint8_t avail_dsds = 0;
897 uint32_t dsd_list_len; 881 uint32_t dsd_list_len;
898 struct dsd_dma *dsd_ptr; 882 struct dsd_dma *dsd_ptr;
899 struct scatterlist *sg_prot; 883 struct scatterlist *sg_prot;
900 uint32_t *cur_dsd = dsd; 884 struct dsd64 *cur_dsd = dsd;
901 uint16_t used_dsds = tot_dsds; 885 uint16_t used_dsds = tot_dsds;
902 uint32_t prot_int; /* protection interval */ 886 uint32_t prot_int; /* protection interval */
903 uint32_t partial; 887 uint32_t partial;
@@ -971,14 +955,14 @@ alloc_and_fill:
971 955
972 956
973 /* add new list to cmd iocb or last list */ 957 /* add new list to cmd iocb or last list */
974 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 958 put_unaligned_le64(dsd_ptr->dsd_list_dma,
975 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 959 &cur_dsd->address);
976 *cur_dsd++ = cpu_to_le32(dsd_list_len); 960 cur_dsd->length = cpu_to_le32(dsd_list_len);
977 cur_dsd = (uint32_t *)next_dsd; 961 cur_dsd = next_dsd;
978 } 962 }
979 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 963 put_unaligned_le64(sle_dma, &cur_dsd->address);
980 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 964 cur_dsd->length = cpu_to_le32(sle_dma_len);
981 *cur_dsd++ = cpu_to_le32(sle_dma_len); 965 cur_dsd++;
982 avail_dsds--; 966 avail_dsds--;
983 967
984 if (partial == 0) { 968 if (partial == 0) {
@@ -997,22 +981,22 @@ alloc_and_fill:
997 } 981 }
998 } 982 }
999 /* Null termination */ 983 /* Null termination */
1000 *cur_dsd++ = 0; 984 cur_dsd->address = 0;
1001 *cur_dsd++ = 0; 985 cur_dsd->length = 0;
1002 *cur_dsd++ = 0; 986 cur_dsd++;
1003 return 0; 987 return 0;
1004} 988}
1005 989
1006int 990int
1007qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 991qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1008 uint16_t tot_dsds, struct qla_tc_param *tc) 992 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1009{ 993{
1010 void *next_dsd; 994 void *next_dsd;
1011 uint8_t avail_dsds = 0; 995 uint8_t avail_dsds = 0;
1012 uint32_t dsd_list_len; 996 uint32_t dsd_list_len;
1013 struct dsd_dma *dsd_ptr; 997 struct dsd_dma *dsd_ptr;
1014 struct scatterlist *sg, *sgl; 998 struct scatterlist *sg, *sgl;
1015 uint32_t *cur_dsd = dsd; 999 struct dsd64 *cur_dsd = dsd;
1016 int i; 1000 int i;
1017 uint16_t used_dsds = tot_dsds; 1001 uint16_t used_dsds = tot_dsds;
1018 struct scsi_cmnd *cmd; 1002 struct scsi_cmnd *cmd;
@@ -1029,8 +1013,6 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1029 1013
1030 1014
1031 for_each_sg(sgl, sg, tot_dsds, i) { 1015 for_each_sg(sgl, sg, tot_dsds, i) {
1032 dma_addr_t sle_dma;
1033
1034 /* Allocate additional continuation packets? */ 1016 /* Allocate additional continuation packets? */
1035 if (avail_dsds == 0) { 1017 if (avail_dsds == 0) {
1036 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1018 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
@@ -1070,29 +1052,25 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1070 } 1052 }
1071 1053
1072 /* add new list to cmd iocb or last list */ 1054 /* add new list to cmd iocb or last list */
1073 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1055 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1074 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 1056 &cur_dsd->address);
1075 *cur_dsd++ = cpu_to_le32(dsd_list_len); 1057 cur_dsd->length = cpu_to_le32(dsd_list_len);
1076 cur_dsd = (uint32_t *)next_dsd; 1058 cur_dsd = next_dsd;
1077 } 1059 }
1078 sle_dma = sg_dma_address(sg); 1060 append_dsd64(&cur_dsd, sg);
1079
1080 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1081 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1082 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1083 avail_dsds--; 1061 avail_dsds--;
1084 1062
1085 } 1063 }
1086 /* Null termination */ 1064 /* Null termination */
1087 *cur_dsd++ = 0; 1065 cur_dsd->address = 0;
1088 *cur_dsd++ = 0; 1066 cur_dsd->length = 0;
1089 *cur_dsd++ = 0; 1067 cur_dsd++;
1090 return 0; 1068 return 0;
1091} 1069}
1092 1070
1093int 1071int
1094qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1072qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1095 uint32_t *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1073 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1096{ 1074{
1097 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd; 1075 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1098 struct scatterlist *sg, *sgl; 1076 struct scatterlist *sg, *sgl;
@@ -1313,16 +1291,15 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1313 } 1291 }
1314 1292
1315 /* add new list to cmd iocb or last list */ 1293 /* add new list to cmd iocb or last list */
1316 *cur_dsd++ = 1294 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1317 cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1295 &cur_dsd->address);
1318 *cur_dsd++ = 1296 cur_dsd->length = cpu_to_le32(dsd_list_len);
1319 cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1320 *cur_dsd++ = cpu_to_le32(dsd_list_len);
1321 cur_dsd = dsd_ptr->dsd_addr; 1297 cur_dsd = dsd_ptr->dsd_addr;
1322 } 1298 }
1323 *cur_dsd++ = cpu_to_le32(LSD(dif_dsd->dsd_list_dma)); 1299 put_unaligned_le64(dif_dsd->dsd_list_dma,
1324 *cur_dsd++ = cpu_to_le32(MSD(dif_dsd->dsd_list_dma)); 1300 &cur_dsd->address);
1325 *cur_dsd++ = cpu_to_le32(sglen); 1301 cur_dsd->length = cpu_to_le32(sglen);
1302 cur_dsd++;
1326 avail_dsds--; 1303 avail_dsds--;
1327 difctx->dif_bundl_len -= sglen; 1304 difctx->dif_bundl_len -= sglen;
1328 track_difbundl_buf--; 1305 track_difbundl_buf--;
@@ -1333,8 +1310,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1333 difctx->no_ldif_dsd, difctx->no_dif_bundl); 1310 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1334 } else { 1311 } else {
1335 for_each_sg(sgl, sg, tot_dsds, i) { 1312 for_each_sg(sgl, sg, tot_dsds, i) {
1336 dma_addr_t sle_dma;
1337
1338 /* Allocate additional continuation packets? */ 1313 /* Allocate additional continuation packets? */
1339 if (avail_dsds == 0) { 1314 if (avail_dsds == 0) {
1340 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1315 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
@@ -1374,24 +1349,19 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1374 } 1349 }
1375 1350
1376 /* add new list to cmd iocb or last list */ 1351 /* add new list to cmd iocb or last list */
1377 *cur_dsd++ = 1352 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1378 cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1353 &cur_dsd->address);
1379 *cur_dsd++ = 1354 cur_dsd->length = cpu_to_le32(dsd_list_len);
1380 cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1381 *cur_dsd++ = cpu_to_le32(dsd_list_len);
1382 cur_dsd = dsd_ptr->dsd_addr; 1355 cur_dsd = dsd_ptr->dsd_addr;
1383 } 1356 }
1384 sle_dma = sg_dma_address(sg); 1357 append_dsd64(&cur_dsd, sg);
1385 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1386 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1387 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1388 avail_dsds--; 1358 avail_dsds--;
1389 } 1359 }
1390 } 1360 }
1391 /* Null termination */ 1361 /* Null termination */
1392 *cur_dsd++ = 0; 1362 cur_dsd->address = 0;
1393 *cur_dsd++ = 0; 1363 cur_dsd->length = 0;
1394 *cur_dsd++ = 0; 1364 cur_dsd++;
1395 return 0; 1365 return 0;
1396} 1366}
1397/** 1367/**
@@ -1408,7 +1378,8 @@ static inline int
1408qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, 1378qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1409 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) 1379 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1410{ 1380{
1411 uint32_t *cur_dsd, *fcp_dl; 1381 struct dsd64 *cur_dsd;
1382 uint32_t *fcp_dl;
1412 scsi_qla_host_t *vha; 1383 scsi_qla_host_t *vha;
1413 struct scsi_cmnd *cmd; 1384 struct scsi_cmnd *cmd;
1414 uint32_t total_bytes = 0; 1385 uint32_t total_bytes = 0;
@@ -1547,7 +1518,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1547 } 1518 }
1548 1519
1549 if (!bundling) { 1520 if (!bundling) {
1550 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 1521 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd;
1551 } else { 1522 } else {
1552 /* 1523 /*
1553 * Configure Bundling if we need to fetch interlaving 1524 * Configure Bundling if we need to fetch interlaving
@@ -1557,7 +1528,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1557 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 1528 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1558 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - 1529 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1559 tot_prot_dsds); 1530 tot_prot_dsds);
1560 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; 1531 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd;
1561 } 1532 }
1562 1533
1563 /* Finish the common fields of CRC pkt */ 1534 /* Finish the common fields of CRC pkt */
@@ -1590,7 +1561,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1590 if (bundling && tot_prot_dsds) { 1561 if (bundling && tot_prot_dsds) {
1591 /* Walks dif segments */ 1562 /* Walks dif segments */
1592 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); 1563 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1593 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 1564 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1594 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, 1565 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1595 tot_prot_dsds, NULL)) 1566 tot_prot_dsds, NULL))
1596 goto crc_queuing_error; 1567 goto crc_queuing_error;
@@ -3007,7 +2978,7 @@ static void
3007qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) 2978qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3008{ 2979{
3009 uint16_t avail_dsds; 2980 uint16_t avail_dsds;
3010 uint32_t *cur_dsd; 2981 struct dsd64 *cur_dsd;
3011 struct scatterlist *sg; 2982 struct scatterlist *sg;
3012 int index; 2983 int index;
3013 uint16_t tot_dsds; 2984 uint16_t tot_dsds;
@@ -3033,25 +3004,20 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3033 ct_iocb->rsp_bytecount = 3004 ct_iocb->rsp_bytecount =
3034 cpu_to_le32(bsg_job->reply_payload.payload_len); 3005 cpu_to_le32(bsg_job->reply_payload.payload_len);
3035 3006
3036 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address 3007 ct_iocb->req_dsd.address =
3037 (bsg_job->request_payload.sg_list))); 3008 cpu_to_le64(sg_dma_address(bsg_job->request_payload.sg_list));
3038 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address 3009 ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3039 (bsg_job->request_payload.sg_list)));
3040 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
3041 3010
3042 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address 3011 ct_iocb->rsp_dsd.address =
3043 (bsg_job->reply_payload.sg_list))); 3012 cpu_to_le64(sg_dma_address(bsg_job->reply_payload.sg_list));
3044 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address 3013 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3045 (bsg_job->reply_payload.sg_list)));
3046 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
3047 3014
3048 avail_dsds = 1; 3015 avail_dsds = 1;
3049 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address; 3016 cur_dsd = &ct_iocb->rsp_dsd;
3050 index = 0; 3017 index = 0;
3051 tot_dsds = bsg_job->reply_payload.sg_cnt; 3018 tot_dsds = bsg_job->reply_payload.sg_cnt;
3052 3019
3053 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 3020 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3054 dma_addr_t sle_dma;
3055 cont_a64_entry_t *cont_pkt; 3021 cont_a64_entry_t *cont_pkt;
3056 3022
3057 /* Allocate additional continuation packets? */ 3023 /* Allocate additional continuation packets? */
@@ -3062,15 +3028,12 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3062 */ 3028 */
3063 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 3029 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3064 vha->hw->req_q_map[0]); 3030 vha->hw->req_q_map[0]);
3065 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 3031 cur_dsd = cont_pkt->dsd;
3066 avail_dsds = 5; 3032 avail_dsds = 5;
3067 entry_count++; 3033 entry_count++;
3068 } 3034 }
3069 3035
3070 sle_dma = sg_dma_address(sg); 3036 append_dsd64(&cur_dsd, sg);
3071 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3072 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3073 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3074 avail_dsds--; 3037 avail_dsds--;
3075 } 3038 }
3076 ct_iocb->entry_count = entry_count; 3039 ct_iocb->entry_count = entry_count;
@@ -3082,7 +3045,7 @@ static void
3082qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) 3045qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3083{ 3046{
3084 uint16_t avail_dsds; 3047 uint16_t avail_dsds;
3085 uint32_t *cur_dsd; 3048 struct dsd64 *cur_dsd;
3086 struct scatterlist *sg; 3049 struct scatterlist *sg;
3087 int index; 3050 int index;
3088 uint16_t cmd_dsds, rsp_dsds; 3051 uint16_t cmd_dsds, rsp_dsds;
@@ -3111,12 +3074,10 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3111 cpu_to_le32(bsg_job->request_payload.payload_len); 3074 cpu_to_le32(bsg_job->request_payload.payload_len);
3112 3075
3113 avail_dsds = 2; 3076 avail_dsds = 2;
3114 cur_dsd = (uint32_t *)ct_iocb->dseg_0_address; 3077 cur_dsd = ct_iocb->dsd;
3115 index = 0; 3078 index = 0;
3116 3079
3117 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) { 3080 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3118 dma_addr_t sle_dma;
3119
3120 /* Allocate additional continuation packets? */ 3081 /* Allocate additional continuation packets? */
3121 if (avail_dsds == 0) { 3082 if (avail_dsds == 0) {
3122 /* 3083 /*
@@ -3125,23 +3086,18 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3125 */ 3086 */
3126 cont_pkt = qla2x00_prep_cont_type1_iocb( 3087 cont_pkt = qla2x00_prep_cont_type1_iocb(
3127 vha, ha->req_q_map[0]); 3088 vha, ha->req_q_map[0]);
3128 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 3089 cur_dsd = cont_pkt->dsd;
3129 avail_dsds = 5; 3090 avail_dsds = 5;
3130 entry_count++; 3091 entry_count++;
3131 } 3092 }
3132 3093
3133 sle_dma = sg_dma_address(sg); 3094 append_dsd64(&cur_dsd, sg);
3134 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3135 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3136 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3137 avail_dsds--; 3095 avail_dsds--;
3138 } 3096 }
3139 3097
3140 index = 0; 3098 index = 0;
3141 3099
3142 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) { 3100 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3143 dma_addr_t sle_dma;
3144
3145 /* Allocate additional continuation packets? */ 3101 /* Allocate additional continuation packets? */
3146 if (avail_dsds == 0) { 3102 if (avail_dsds == 0) {
3147 /* 3103 /*
@@ -3150,15 +3106,12 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3150 */ 3106 */
3151 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 3107 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3152 ha->req_q_map[0]); 3108 ha->req_q_map[0]);
3153 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 3109 cur_dsd = cont_pkt->dsd;
3154 avail_dsds = 5; 3110 avail_dsds = 5;
3155 entry_count++; 3111 entry_count++;
3156 } 3112 }
3157 3113
3158 sle_dma = sg_dma_address(sg); 3114 append_dsd64(&cur_dsd, sg);
3159 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3160 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3161 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3162 avail_dsds--; 3115 avail_dsds--;
3163 } 3116 }
3164 ct_iocb->entry_count = entry_count; 3117 ct_iocb->entry_count = entry_count;
@@ -3599,15 +3552,13 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3599 3552
3600 cmd_pkt->tx_dseg_count = 1; 3553 cmd_pkt->tx_dseg_count = 1;
3601 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len; 3554 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3602 cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len; 3555 cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
3603 cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma)); 3556 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3604 cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3605 3557
3606 cmd_pkt->rx_dseg_count = 1; 3558 cmd_pkt->rx_dseg_count = 1;
3607 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len; 3559 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3608 cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len; 3560 cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
3609 cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma)); 3561 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3610 cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3611 3562
3612 return rval; 3563 return rval;
3613} 3564}
@@ -3746,7 +3697,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3746 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) 3697 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3747{ 3698{
3748 uint16_t avail_dsds; 3699 uint16_t avail_dsds;
3749 uint32_t *cur_dsd; 3700 struct dsd64 *cur_dsd;
3750 uint32_t req_data_len = 0; 3701 uint32_t req_data_len = 0;
3751 uint32_t rsp_data_len = 0; 3702 uint32_t rsp_data_len = 0;
3752 struct scatterlist *sg; 3703 struct scatterlist *sg;
@@ -3781,13 +3732,12 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3781 * are bundled in continuation iocb 3732 * are bundled in continuation iocb
3782 */ 3733 */
3783 avail_dsds = 1; 3734 avail_dsds = 1;
3784 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 3735 cur_dsd = &cmd_pkt->fcp_dsd;
3785 3736
3786 index = 0; 3737 index = 0;
3787 3738
3788 for_each_sg(bsg_job->request_payload.sg_list, sg, 3739 for_each_sg(bsg_job->request_payload.sg_list, sg,
3789 bsg_job->request_payload.sg_cnt, index) { 3740 bsg_job->request_payload.sg_cnt, index) {
3790 dma_addr_t sle_dma;
3791 cont_a64_entry_t *cont_pkt; 3741 cont_a64_entry_t *cont_pkt;
3792 3742
3793 /* Allocate additional continuation packets */ 3743 /* Allocate additional continuation packets */
@@ -3796,14 +3746,11 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3796 * 5 DSDS 3746 * 5 DSDS
3797 */ 3747 */
3798 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3799 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 3749 cur_dsd = cont_pkt->dsd;
3800 avail_dsds = 5; 3750 avail_dsds = 5;
3801 entry_count++; 3751 entry_count++;
3802 } 3752 }
3803 sle_dma = sg_dma_address(sg); 3753 append_dsd64(&cur_dsd, sg);
3804 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3805 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3806 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3807 avail_dsds--; 3754 avail_dsds--;
3808 } 3755 }
3809 /* For read request DSD will always goes to continuation IOCB 3756 /* For read request DSD will always goes to continuation IOCB
@@ -3813,7 +3760,6 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3813 */ 3760 */
3814 for_each_sg(bsg_job->reply_payload.sg_list, sg, 3761 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3815 bsg_job->reply_payload.sg_cnt, index) { 3762 bsg_job->reply_payload.sg_cnt, index) {
3816 dma_addr_t sle_dma;
3817 cont_a64_entry_t *cont_pkt; 3763 cont_a64_entry_t *cont_pkt;
3818 3764
3819 /* Allocate additional continuation packets */ 3765 /* Allocate additional continuation packets */
@@ -3822,14 +3768,11 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3822 * 5 DSDS 3768 * 5 DSDS
3823 */ 3769 */
3824 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3770 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3825 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 3771 cur_dsd = cont_pkt->dsd;
3826 avail_dsds = 5; 3772 avail_dsds = 5;
3827 entry_count++; 3773 entry_count++;
3828 } 3774 }
3829 sle_dma = sg_dma_address(sg); 3775 append_dsd64(&cur_dsd, sg);
3830 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3831 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3832 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3833 avail_dsds--; 3776 avail_dsds--;
3834 } 3777 }
3835 /* This value should be same as number of IOCB required for this cmd */ 3778 /* This value should be same as number of IOCB required for this cmd */
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 783f24db89f7..a08d83dbcece 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -2993,7 +2993,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
2993 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt) 2993 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
2994{ 2994{
2995 uint16_t avail_dsds; 2995 uint16_t avail_dsds;
2996 __le32 *cur_dsd; 2996 struct dsd64 *cur_dsd;
2997 scsi_qla_host_t *vha; 2997 scsi_qla_host_t *vha;
2998 struct scsi_cmnd *cmd; 2998 struct scsi_cmnd *cmd;
2999 struct scatterlist *sg; 2999 struct scatterlist *sg;
@@ -3029,12 +3029,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
3029 3029
3030 /* One DSD is available in the Command Type 3 IOCB */ 3030 /* One DSD is available in the Command Type 3 IOCB */
3031 avail_dsds = 1; 3031 avail_dsds = 1;
3032 cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address; 3032 cur_dsd = &lcmd_pkt->dsd;
3033 3033
3034 /* Load data segments */ 3034 /* Load data segments */
3035 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 3035 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
3036 dma_addr_t sle_dma;
3037
3038 /* Allocate additional continuation packets? */ 3036 /* Allocate additional continuation packets? */
3039 if (avail_dsds == 0) { 3037 if (avail_dsds == 0) {
3040 /* 3038 /*
@@ -3044,15 +3042,12 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
3044 memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE); 3042 memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
3045 cont_pkt = 3043 cont_pkt =
3046 qlafx00_prep_cont_type1_iocb(req, &lcont_pkt); 3044 qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
3047 cur_dsd = (__le32 *)lcont_pkt.dseg_0_address; 3045 cur_dsd = lcont_pkt.dsd;
3048 avail_dsds = 5; 3046 avail_dsds = 5;
3049 cont = 1; 3047 cont = 1;
3050 } 3048 }
3051 3049
3052 sle_dma = sg_dma_address(sg); 3050 append_dsd64(&cur_dsd, sg);
3053 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3054 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3055 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3056 avail_dsds--; 3051 avail_dsds--;
3057 if (avail_dsds == 0 && cont == 1) { 3052 if (avail_dsds == 0 && cont == 1) {
3058 cont = 0; 3053 cont = 0;
@@ -3283,11 +3278,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3283 fx_iocb.req_dsdcnt = cpu_to_le16(1); 3278 fx_iocb.req_dsdcnt = cpu_to_le16(1);
3284 fx_iocb.req_xfrcnt = 3279 fx_iocb.req_xfrcnt =
3285 cpu_to_le16(fxio->u.fxiocb.req_len); 3280 cpu_to_le16(fxio->u.fxiocb.req_len);
3286 fx_iocb.dseg_rq_address[0] = 3281 fx_iocb.dseg_rq.address =
3287 cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle)); 3282 cpu_to_le64(fxio->u.fxiocb.req_dma_handle);
3288 fx_iocb.dseg_rq_address[1] = 3283 fx_iocb.dseg_rq.length =
3289 cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
3290 fx_iocb.dseg_rq_len =
3291 cpu_to_le32(fxio->u.fxiocb.req_len); 3284 cpu_to_le32(fxio->u.fxiocb.req_len);
3292 } 3285 }
3293 3286
@@ -3295,11 +3288,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3295 fx_iocb.rsp_dsdcnt = cpu_to_le16(1); 3288 fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
3296 fx_iocb.rsp_xfrcnt = 3289 fx_iocb.rsp_xfrcnt =
3297 cpu_to_le16(fxio->u.fxiocb.rsp_len); 3290 cpu_to_le16(fxio->u.fxiocb.rsp_len);
3298 fx_iocb.dseg_rsp_address[0] = 3291 fx_iocb.dseg_rsp.address =
3299 cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle)); 3292 cpu_to_le64(fxio->u.fxiocb.rsp_dma_handle);
3300 fx_iocb.dseg_rsp_address[1] = 3293 fx_iocb.dseg_rsp.length =
3301 cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
3302 fx_iocb.dseg_rsp_len =
3303 cpu_to_le32(fxio->u.fxiocb.rsp_len); 3294 cpu_to_le32(fxio->u.fxiocb.rsp_len);
3304 } 3295 }
3305 3296
@@ -3329,19 +3320,17 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3329 int avail_dsds, tot_dsds; 3320 int avail_dsds, tot_dsds;
3330 cont_a64_entry_t lcont_pkt; 3321 cont_a64_entry_t lcont_pkt;
3331 cont_a64_entry_t *cont_pkt = NULL; 3322 cont_a64_entry_t *cont_pkt = NULL;
3332 __le32 *cur_dsd; 3323 struct dsd64 *cur_dsd;
3333 int index = 0, cont = 0; 3324 int index = 0, cont = 0;
3334 3325
3335 fx_iocb.req_dsdcnt = 3326 fx_iocb.req_dsdcnt =
3336 cpu_to_le16(bsg_job->request_payload.sg_cnt); 3327 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3337 tot_dsds = 3328 tot_dsds =
3338 bsg_job->request_payload.sg_cnt; 3329 bsg_job->request_payload.sg_cnt;
3339 cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0]; 3330 cur_dsd = &fx_iocb.dseg_rq;
3340 avail_dsds = 1; 3331 avail_dsds = 1;
3341 for_each_sg(bsg_job->request_payload.sg_list, sg, 3332 for_each_sg(bsg_job->request_payload.sg_list, sg,
3342 tot_dsds, index) { 3333 tot_dsds, index) {
3343 dma_addr_t sle_dma;
3344
3345 /* Allocate additional continuation packets? */ 3334 /* Allocate additional continuation packets? */
3346 if (avail_dsds == 0) { 3335 if (avail_dsds == 0) {
3347 /* 3336 /*
@@ -3353,17 +3342,13 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3353 cont_pkt = 3342 cont_pkt =
3354 qlafx00_prep_cont_type1_iocb( 3343 qlafx00_prep_cont_type1_iocb(
3355 sp->vha->req, &lcont_pkt); 3344 sp->vha->req, &lcont_pkt);
3356 cur_dsd = (__le32 *) 3345 cur_dsd = lcont_pkt.dsd;
3357 lcont_pkt.dseg_0_address;
3358 avail_dsds = 5; 3346 avail_dsds = 5;
3359 cont = 1; 3347 cont = 1;
3360 entry_cnt++; 3348 entry_cnt++;
3361 } 3349 }
3362 3350
3363 sle_dma = sg_dma_address(sg); 3351 append_dsd64(&cur_dsd, sg);
3364 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3365 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3366 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3367 avail_dsds--; 3352 avail_dsds--;
3368 3353
3369 if (avail_dsds == 0 && cont == 1) { 3354 if (avail_dsds == 0 && cont == 1) {
@@ -3391,19 +3376,17 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3391 int avail_dsds, tot_dsds; 3376 int avail_dsds, tot_dsds;
3392 cont_a64_entry_t lcont_pkt; 3377 cont_a64_entry_t lcont_pkt;
3393 cont_a64_entry_t *cont_pkt = NULL; 3378 cont_a64_entry_t *cont_pkt = NULL;
3394 __le32 *cur_dsd; 3379 struct dsd64 *cur_dsd;
3395 int index = 0, cont = 0; 3380 int index = 0, cont = 0;
3396 3381
3397 fx_iocb.rsp_dsdcnt = 3382 fx_iocb.rsp_dsdcnt =
3398 cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3383 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3399 tot_dsds = bsg_job->reply_payload.sg_cnt; 3384 tot_dsds = bsg_job->reply_payload.sg_cnt;
3400 cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0]; 3385 cur_dsd = &fx_iocb.dseg_rsp;
3401 avail_dsds = 1; 3386 avail_dsds = 1;
3402 3387
3403 for_each_sg(bsg_job->reply_payload.sg_list, sg, 3388 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3404 tot_dsds, index) { 3389 tot_dsds, index) {
3405 dma_addr_t sle_dma;
3406
3407 /* Allocate additional continuation packets? */ 3390 /* Allocate additional continuation packets? */
3408 if (avail_dsds == 0) { 3391 if (avail_dsds == 0) {
3409 /* 3392 /*
@@ -3415,17 +3398,13 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3415 cont_pkt = 3398 cont_pkt =
3416 qlafx00_prep_cont_type1_iocb( 3399 qlafx00_prep_cont_type1_iocb(
3417 sp->vha->req, &lcont_pkt); 3400 sp->vha->req, &lcont_pkt);
3418 cur_dsd = (__le32 *) 3401 cur_dsd = lcont_pkt.dsd;
3419 lcont_pkt.dseg_0_address;
3420 avail_dsds = 5; 3402 avail_dsds = 5;
3421 cont = 1; 3403 cont = 1;
3422 entry_cnt++; 3404 entry_cnt++;
3423 } 3405 }
3424 3406
3425 sle_dma = sg_dma_address(sg); 3407 append_dsd64(&cur_dsd, sg);
3426 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3427 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3428 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3429 avail_dsds--; 3408 avail_dsds--;
3430 3409
3431 if (avail_dsds == 0 && cont == 1) { 3410 if (avail_dsds == 0 && cont == 1) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index aeaa1b40b1fc..4567f0c42486 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -7,6 +7,8 @@
7#ifndef __QLA_MR_H 7#ifndef __QLA_MR_H
8#define __QLA_MR_H 8#define __QLA_MR_H
9 9
10#include "qla_dsd.h"
11
10/* 12/*
11 * The PCI VendorID and DeviceID for our board. 13 * The PCI VendorID and DeviceID for our board.
12 */ 14 */
@@ -46,8 +48,7 @@ struct cmd_type_7_fx00 {
46 uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */ 48 uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */
47 __le32 byte_count; /* Total byte count. */ 49 __le32 byte_count; /* Total byte count. */
48 50
49 uint32_t dseg_0_address[2]; /* Data segment 0 address. */ 51 struct dsd64 dsd;
50 uint32_t dseg_0_len; /* Data segment 0 length. */
51}; 52};
52 53
53#define STATUS_TYPE_FX00 0x01 /* Status entry. */ 54#define STATUS_TYPE_FX00 0x01 /* Status entry. */
@@ -176,10 +177,8 @@ struct fxdisc_entry_fx00 {
176 uint8_t flags; 177 uint8_t flags;
177 uint8_t reserved_1; 178 uint8_t reserved_1;
178 179
179 __le32 dseg_rq_address[2]; /* Data segment 0 address. */ 180 struct dsd64 dseg_rq;
180 __le32 dseg_rq_len; /* Data segment 0 length. */ 181 struct dsd64 dseg_rsp;
181 __le32 dseg_rsp_address[2]; /* Data segment 1 address. */
182 __le32 dseg_rsp_len; /* Data segment 1 length. */
183 182
184 __le32 dataword; 183 __le32 dataword;
185 __le32 adapid; 184 __le32 adapid;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 0829ab7f0d54..29b30df97c58 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -305,7 +305,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
305 uint16_t req_cnt; 305 uint16_t req_cnt;
306 uint16_t tot_dsds; 306 uint16_t tot_dsds;
307 uint16_t avail_dsds; 307 uint16_t avail_dsds;
308 uint32_t *cur_dsd; 308 struct dsd64 *cur_dsd;
309 struct req_que *req = NULL; 309 struct req_que *req = NULL;
310 struct scsi_qla_host *vha = sp->fcport->vha; 310 struct scsi_qla_host *vha = sp->fcport->vha;
311 struct qla_hw_data *ha = vha->hw; 311 struct qla_hw_data *ha = vha->hw;
@@ -423,12 +423,11 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
423 423
424 /* One DSD is available in the Command Type NVME IOCB */ 424 /* One DSD is available in the Command Type NVME IOCB */
425 avail_dsds = 1; 425 avail_dsds = 1;
426 cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0]; 426 cur_dsd = &cmd_pkt->nvme_dsd;
427 sgl = fd->first_sgl; 427 sgl = fd->first_sgl;
428 428
429 /* Load data segments */ 429 /* Load data segments */
430 for_each_sg(sgl, sg, tot_dsds, i) { 430 for_each_sg(sgl, sg, tot_dsds, i) {
431 dma_addr_t sle_dma;
432 cont_a64_entry_t *cont_pkt; 431 cont_a64_entry_t *cont_pkt;
433 432
434 /* Allocate additional continuation packets? */ 433 /* Allocate additional continuation packets? */
@@ -450,14 +449,11 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
450 put_unaligned_le32(CONTINUE_A64_TYPE, 449 put_unaligned_le32(CONTINUE_A64_TYPE,
451 &cont_pkt->entry_type); 450 &cont_pkt->entry_type);
452 451
453 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 452 cur_dsd = cont_pkt->dsd;
454 avail_dsds = 5; 453 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
455 } 454 }
456 455
457 sle_dma = sg_dma_address(sg); 456 append_dsd64(&cur_dsd, sg);
458 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
459 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
460 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
461 avail_dsds--; 457 avail_dsds--;
462 } 458 }
463 459
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index 0db04f0a4d5d..b67aa271f6cc 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -13,6 +13,7 @@
13#include <linux/nvme-fc-driver.h> 13#include <linux/nvme-fc-driver.h>
14 14
15#include "qla_def.h" 15#include "qla_def.h"
16#include "qla_dsd.h"
16 17
17/* default dev loss time (seconds) before transport tears down ctrl */ 18/* default dev loss time (seconds) before transport tears down ctrl */
18#define NVME_FC_DEV_LOSS_TMO 30 19#define NVME_FC_DEV_LOSS_TMO 30
@@ -72,8 +73,7 @@ struct cmd_nvme {
72 uint8_t port_id[3]; /* PortID of destination port. */ 73 uint8_t port_id[3]; /* PortID of destination port. */
73 uint8_t vp_index; 74 uint8_t vp_index;
74 75
75 uint32_t nvme_data_dseg_address[2]; /* Data segment address. */ 76 struct dsd64 nvme_dsd;
76 uint32_t nvme_data_dseg_len; /* Data segment length. */
77}; 77};
78 78
79#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */ 79#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */
@@ -101,10 +101,7 @@ struct pt_ls4_request {
101 uint32_t rsvd3; 101 uint32_t rsvd3;
102 uint32_t rx_byte_count; 102 uint32_t rx_byte_count;
103 uint32_t tx_byte_count; 103 uint32_t tx_byte_count;
104 uint32_t dseg0_address[2]; 104 struct dsd64 dsd[2];
105 uint32_t dseg0_len;
106 uint32_t dseg1_address[2];
107 uint32_t dseg1_len;
108}; 105};
109 106
110#define PT_LS4_UNSOL 0x56 /* pass-up unsolicited rec FC-NVMe request */ 107#define PT_LS4_UNSOL 0x56 /* pass-up unsolicited rec FC-NVMe request */
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 420c6cd0a7ea..4ee54b13a612 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2640,7 +2640,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2640static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) 2640static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2641{ 2641{
2642 int cnt; 2642 int cnt;
2643 uint32_t *dword_ptr; 2643 struct dsd64 *cur_dsd;
2644 2644
2645 /* Build continuation packets */ 2645 /* Build continuation packets */
2646 while (prm->seg_cnt > 0) { 2646 while (prm->seg_cnt > 0) {
@@ -2661,19 +2661,13 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2661 cont_pkt64->sys_define = 0; 2661 cont_pkt64->sys_define = 0;
2662 2662
2663 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 2663 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2664 dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address; 2664 cur_dsd = cont_pkt64->dsd;
2665 2665
2666 /* Load continuation entry data segments */ 2666 /* Load continuation entry data segments */
2667 for (cnt = 0; 2667 for (cnt = 0;
2668 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; 2668 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2669 cnt++, prm->seg_cnt--) { 2669 cnt++, prm->seg_cnt--) {
2670 *dword_ptr++ = 2670 append_dsd64(&cur_dsd, prm->sg);
2671 cpu_to_le32(lower_32_bits
2672 (sg_dma_address(prm->sg)));
2673 *dword_ptr++ = cpu_to_le32(upper_32_bits
2674 (sg_dma_address(prm->sg)));
2675 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2676
2677 prm->sg = sg_next(prm->sg); 2671 prm->sg = sg_next(prm->sg);
2678 } 2672 }
2679 } 2673 }
@@ -2686,13 +2680,13 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2686static void qlt_load_data_segments(struct qla_tgt_prm *prm) 2680static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2687{ 2681{
2688 int cnt; 2682 int cnt;
2689 uint32_t *dword_ptr; 2683 struct dsd64 *cur_dsd;
2690 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 2684 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2691 2685
2692 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 2686 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2693 2687
2694 /* Setup packet address segment pointer */ 2688 /* Setup packet address segment pointer */
2695 dword_ptr = pkt24->u.status0.dseg_0_address; 2689 cur_dsd = &pkt24->u.status0.dsd;
2696 2690
2697 /* Set total data segment count */ 2691 /* Set total data segment count */
2698 if (prm->seg_cnt) 2692 if (prm->seg_cnt)
@@ -2700,8 +2694,8 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2700 2694
2701 if (prm->seg_cnt == 0) { 2695 if (prm->seg_cnt == 0) {
2702 /* No data transfer */ 2696 /* No data transfer */
2703 *dword_ptr++ = 0; 2697 cur_dsd->address = 0;
2704 *dword_ptr = 0; 2698 cur_dsd->length = 0;
2705 return; 2699 return;
2706 } 2700 }
2707 2701
@@ -2711,14 +2705,7 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2711 for (cnt = 0; 2705 for (cnt = 0;
2712 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; 2706 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2713 cnt++, prm->seg_cnt--) { 2707 cnt++, prm->seg_cnt--) {
2714 *dword_ptr++ = 2708 append_dsd64(&cur_dsd, prm->sg);
2715 cpu_to_le32(lower_32_bits(sg_dma_address(prm->sg)));
2716
2717 *dword_ptr++ = cpu_to_le32(upper_32_bits(
2718 sg_dma_address(prm->sg)));
2719
2720 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2721
2722 prm->sg = sg_next(prm->sg); 2709 prm->sg = sg_next(prm->sg);
2723 } 2710 }
2724 2711
@@ -3042,7 +3029,7 @@ qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
3042static inline int 3029static inline int
3043qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) 3030qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3044{ 3031{
3045 uint32_t *cur_dsd; 3032 struct dsd64 *cur_dsd;
3046 uint32_t transfer_length = 0; 3033 uint32_t transfer_length = 0;
3047 uint32_t data_bytes; 3034 uint32_t data_bytes;
3048 uint32_t dif_bytes; 3035 uint32_t dif_bytes;
@@ -3193,7 +3180,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3193 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 3180 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
3194 3181
3195 if (!bundling) { 3182 if (!bundling) {
3196 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 3183 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd;
3197 } else { 3184 } else {
3198 /* 3185 /*
3199 * Configure Bundling if we need to fetch interlaving 3186 * Configure Bundling if we need to fetch interlaving
@@ -3203,7 +3190,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3203 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 3190 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
3204 crc_ctx_pkt->u.bundling.dseg_count = 3191 crc_ctx_pkt->u.bundling.dseg_count =
3205 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 3192 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
3206 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; 3193 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd;
3207 } 3194 }
3208 3195
3209 /* Finish the common fields of CRC pkt */ 3196 /* Finish the common fields of CRC pkt */
@@ -3236,7 +3223,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3236 /* Walks dif segments */ 3223 /* Walks dif segments */
3237 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 3224 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3238 3225
3239 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 3226 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
3240 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 3227 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3241 prm->prot_seg_cnt, cmd)) 3228 prm->prot_seg_cnt, cmd))
3242 goto crc_queuing_error; 3229 goto crc_queuing_error;
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 262fc33fb473..b09a9232b817 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -29,6 +29,7 @@
29#define __QLA_TARGET_H 29#define __QLA_TARGET_H
30 30
31#include "qla_def.h" 31#include "qla_def.h"
32#include "qla_dsd.h"
32 33
33/* 34/*
34 * Must be changed on any change in any initiator visible interfaces or 35 * Must be changed on any change in any initiator visible interfaces or
@@ -224,12 +225,7 @@ struct ctio_to_2xxx {
224 uint16_t reserved_1[3]; 225 uint16_t reserved_1[3];
225 uint16_t scsi_status; 226 uint16_t scsi_status;
226 uint32_t transfer_length; 227 uint32_t transfer_length;
227 uint32_t dseg_0_address; /* Data segment 0 address. */ 228 struct dsd32 dsd[3];
228 uint32_t dseg_0_length; /* Data segment 0 length. */
229 uint32_t dseg_1_address; /* Data segment 1 address. */
230 uint32_t dseg_1_length; /* Data segment 1 length. */
231 uint32_t dseg_2_address; /* Data segment 2 address. */
232 uint32_t dseg_2_length; /* Data segment 2 length. */
233} __packed; 229} __packed;
234#define ATIO_PATH_INVALID 0x07 230#define ATIO_PATH_INVALID 0x07
235#define ATIO_CANT_PROV_CAP 0x16 231#define ATIO_CANT_PROV_CAP 0x16
@@ -429,10 +425,7 @@ struct ctio7_to_24xx {
429 uint32_t reserved2; 425 uint32_t reserved2;
430 uint32_t transfer_length; 426 uint32_t transfer_length;
431 uint32_t reserved3; 427 uint32_t reserved3;
432 /* Data segment 0 address. */ 428 struct dsd64 dsd;
433 uint32_t dseg_0_address[2];
434 /* Data segment 0 length. */
435 uint32_t dseg_0_length;
436 } status0; 429 } status0;
437 struct { 430 struct {
438 uint16_t sense_length; 431 uint16_t sense_length;