aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c264
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
2 files changed, 114 insertions, 152 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 027a818f287b..14fd0f401c3d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1233,18 +1233,26 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1233 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1233 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1234 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1234 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1235 1235
1236 pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n"); 1236 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1237
1238 if (wr->sge) { 1237 if (wr->sge) {
1239 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE); 1238 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1239 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1240 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1241 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1240 wr->sge = NULL; 1242 wr->sge = NULL;
1241 } 1243 }
1242 1244
1243 kfree(wr->send_wr); 1245 if (wr->send_wr) {
1244 wr->send_wr = NULL; 1246 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1247 kfree(wr->send_wr);
1248 wr->send_wr = NULL;
1249 }
1245 1250
1246 kfree(isert_cmd->ib_sge); 1251 if (wr->ib_sge) {
1247 isert_cmd->ib_sge = NULL; 1252 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1253 kfree(wr->ib_sge);
1254 wr->ib_sge = NULL;
1255 }
1248} 1256}
1249 1257
1250static void 1258static void
@@ -1339,25 +1347,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1339 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1347 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1340 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1348 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1341 struct se_cmd *se_cmd = &cmd->se_cmd; 1349 struct se_cmd *se_cmd = &cmd->se_cmd;
1342 struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device; 1350 struct isert_conn *isert_conn = isert_cmd->conn;
1343 1351
1344 iscsit_stop_dataout_timer(cmd); 1352 iscsit_stop_dataout_timer(cmd);
1353 isert_unmap_cmd(isert_cmd, isert_conn);
1354 cmd->write_data_done = wr->cur_rdma_length;
1345 1355
1346 if (wr->sge) { 1356 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1347 pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1348 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1349 wr->sge = NULL;
1350 }
1351
1352 if (isert_cmd->ib_sge) {
1353 pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1354 kfree(isert_cmd->ib_sge);
1355 isert_cmd->ib_sge = NULL;
1356 }
1357
1358 cmd->write_data_done = se_cmd->data_length;
1359
1360 pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1361 spin_lock_bh(&cmd->istate_lock); 1357 spin_lock_bh(&cmd->istate_lock);
1362 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1358 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1363 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1359 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -1816,8 +1812,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1816 ib_sg_dma_len(ib_dev, tmp_sg) - page_off); 1812 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
1817 ib_sge->lkey = isert_conn->conn_mr->lkey; 1813 ib_sge->lkey = isert_conn->conn_mr->lkey;
1818 1814
1819 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n", 1815 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
1820 ib_sge->addr, ib_sge->length); 1816 ib_sge->addr, ib_sge->length, ib_sge->lkey);
1821 page_off = 0; 1817 page_off = 0;
1822 data_left -= ib_sge->length; 1818 data_left -= ib_sge->length;
1823 ib_sge++; 1819 ib_sge++;
@@ -1831,84 +1827,127 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1831} 1827}
1832 1828
1833static int 1829static int
1834isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 1830isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1831 struct isert_rdma_wr *wr)
1835{ 1832{
1836 struct se_cmd *se_cmd = &cmd->se_cmd; 1833 struct se_cmd *se_cmd = &cmd->se_cmd;
1837 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1834 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1838 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1839 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1835 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1840 struct ib_send_wr *wr_failed, *send_wr;
1841 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1836 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1837 struct ib_send_wr *send_wr;
1842 struct ib_sge *ib_sge; 1838 struct ib_sge *ib_sge;
1843 struct scatterlist *sg; 1839 struct scatterlist *sg_start;
1844 u32 offset = 0, data_len, data_left, rdma_write_max; 1840 u32 sg_off = 0, sg_nents;
1845 int rc, ret = 0, count, sg_nents, i, ib_sge_cnt; 1841 u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
1846 1842 int ret = 0, count, i, ib_sge_cnt;
1847 pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length); 1843
1844 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
1845 data_left = se_cmd->data_length;
1846 iscsit_increment_maxcmdsn(cmd, conn->sess);
1847 cmd->stat_sn = conn->stat_sn++;
1848 } else {
1849 sg_off = cmd->write_data_done / PAGE_SIZE;
1850 data_left = se_cmd->data_length - cmd->write_data_done;
1851 offset = cmd->write_data_done;
1852 isert_cmd->tx_desc.isert_cmd = isert_cmd;
1853 }
1848 1854
1849 sg = &se_cmd->t_data_sg[0]; 1855 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1850 sg_nents = se_cmd->t_data_nents; 1856 sg_nents = se_cmd->t_data_nents - sg_off;
1851 1857
1852 count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE); 1858 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
1859 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1860 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1853 if (unlikely(!count)) { 1861 if (unlikely(!count)) {
1854 pr_err("Unable to map put_datain SGs\n"); 1862 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
1855 return -EINVAL; 1863 return -EINVAL;
1856 } 1864 }
1857 wr->sge = sg; 1865 wr->sge = sg_start;
1858 wr->num_sge = sg_nents; 1866 wr->num_sge = sg_nents;
1859 pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n", 1867 wr->cur_rdma_length = data_left;
1860 count, sg, sg_nents); 1868 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1869 isert_cmd, count, sg_start, sg_nents, data_left);
1861 1870
1862 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); 1871 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1863 if (!ib_sge) { 1872 if (!ib_sge) {
1864 pr_warn("Unable to allocate datain ib_sge\n"); 1873 pr_warn("Unable to allocate ib_sge\n");
1865 ret = -ENOMEM; 1874 ret = -ENOMEM;
1866 goto unmap_sg; 1875 goto unmap_sg;
1867 } 1876 }
1868 isert_cmd->ib_sge = ib_sge; 1877 wr->ib_sge = ib_sge;
1869
1870 pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1871 ib_sge, se_cmd->t_data_nents);
1872 1878
1873 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); 1879 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1874 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 1880 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1875 GFP_KERNEL); 1881 GFP_KERNEL);
1876 if (!wr->send_wr) { 1882 if (!wr->send_wr) {
1877 pr_err("Unable to allocate wr->send_wr\n"); 1883 pr_debug("Unable to allocate wr->send_wr\n");
1878 ret = -ENOMEM; 1884 ret = -ENOMEM;
1879 goto unmap_sg; 1885 goto unmap_sg;
1880 } 1886 }
1881 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1882 wr->send_wr, wr->send_wr_num);
1883
1884 iscsit_increment_maxcmdsn(cmd, conn->sess);
1885 cmd->stat_sn = conn->stat_sn++;
1886 1887
1887 wr->isert_cmd = isert_cmd; 1888 wr->isert_cmd = isert_cmd;
1888 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 1889 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1889 data_left = se_cmd->data_length;
1890 1890
1891 for (i = 0; i < wr->send_wr_num; i++) { 1891 for (i = 0; i < wr->send_wr_num; i++) {
1892 send_wr = &isert_cmd->rdma_wr.send_wr[i]; 1892 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1893 data_len = min(data_left, rdma_write_max); 1893 data_len = min(data_left, rdma_write_max);
1894 1894
1895 send_wr->opcode = IB_WR_RDMA_WRITE;
1896 send_wr->send_flags = 0; 1895 send_wr->send_flags = 0;
1897 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; 1896 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
1898 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 1897 send_wr->opcode = IB_WR_RDMA_WRITE;
1898 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
1899 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
1900 if (i + 1 == wr->send_wr_num)
1901 send_wr->next = &isert_cmd->tx_desc.send_wr;
1902 else
1903 send_wr->next = &wr->send_wr[i + 1];
1904 } else {
1905 send_wr->opcode = IB_WR_RDMA_READ;
1906 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
1907 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
1908 if (i + 1 == wr->send_wr_num)
1909 send_wr->send_flags = IB_SEND_SIGNALED;
1910 else
1911 send_wr->next = &wr->send_wr[i + 1];
1912 }
1899 1913
1900 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 1914 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1901 send_wr, data_len, offset); 1915 send_wr, data_len, offset);
1902 ib_sge += ib_sge_cnt; 1916 ib_sge += ib_sge_cnt;
1903 1917
1904 if (i + 1 == wr->send_wr_num)
1905 send_wr->next = &isert_cmd->tx_desc.send_wr;
1906 else
1907 send_wr->next = &wr->send_wr[i + 1];
1908
1909 offset += data_len; 1918 offset += data_len;
1919 va_offset += data_len;
1910 data_left -= data_len; 1920 data_left -= data_len;
1911 } 1921 }
1922
1923 return 0;
1924unmap_sg:
1925 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
1926 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1927 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1928 return ret;
1929}
1930
1931static int
1932isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1933{
1934 struct se_cmd *se_cmd = &cmd->se_cmd;
1935 struct isert_cmd *isert_cmd = container_of(cmd,
1936 struct isert_cmd, iscsi_cmd);
1937 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1938 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1939 struct ib_send_wr *wr_failed;
1940 int rc;
1941
1942 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
1943 isert_cmd, se_cmd->data_length);
1944 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
1945 rc = isert_map_rdma(conn, cmd, wr);
1946 if (rc) {
1947 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
1948 return rc;
1949 }
1950
1912 /* 1951 /*
1913 * Build isert_conn->tx_desc for iSCSI response PDU and attach 1952 * Build isert_conn->tx_desc for iSCSI response PDU and attach
1914 */ 1953 */
@@ -1925,12 +1964,10 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1925 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 1964 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
1926 atomic_dec(&isert_conn->post_send_buf_count); 1965 atomic_dec(&isert_conn->post_send_buf_count);
1927 } 1966 }
1928 pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n"); 1967 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
1929 return 1; 1968 isert_cmd);
1930 1969
1931unmap_sg: 1970 return 1;
1932 ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1933 return ret;
1934} 1971}
1935 1972
1936static int 1973static int
@@ -1940,89 +1977,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1940 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 1977 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1941 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1978 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1942 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 1979 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1943 struct ib_send_wr *wr_failed, *send_wr; 1980 struct ib_send_wr *wr_failed;
1944 struct ib_sge *ib_sge; 1981 int rc;
1945 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1946 struct scatterlist *sg_start;
1947 u32 sg_off, sg_nents, page_off, va_offset = 0;
1948 u32 offset = 0, data_len, data_left, rdma_write_max;
1949 int rc, ret = 0, count, i, ib_sge_cnt;
1950
1951 pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
1952 se_cmd->data_length, cmd->write_data_done);
1953
1954 sg_off = cmd->write_data_done / PAGE_SIZE;
1955 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1956 page_off = cmd->write_data_done % PAGE_SIZE;
1957
1958 pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1959 sg_off, sg_start, page_off);
1960
1961 data_left = se_cmd->data_length - cmd->write_data_done;
1962 sg_nents = se_cmd->t_data_nents - sg_off;
1963
1964 pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
1965 data_left, sg_nents);
1966
1967 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1968 if (unlikely(!count)) {
1969 pr_err("Unable to map get_dataout SGs\n");
1970 return -EINVAL;
1971 }
1972 wr->sge = sg_start;
1973 wr->num_sge = sg_nents;
1974 pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
1975 count, sg_start, sg_nents);
1976
1977 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1978 if (!ib_sge) {
1979 pr_warn("Unable to allocate dataout ib_sge\n");
1980 ret = -ENOMEM;
1981 goto unmap_sg;
1982 }
1983 isert_cmd->ib_sge = ib_sge;
1984
1985 pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
1986 ib_sge, sg_nents);
1987
1988 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1989 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1990 GFP_KERNEL);
1991 if (!wr->send_wr) {
1992 pr_debug("Unable to allocate wr->send_wr\n");
1993 ret = -ENOMEM;
1994 goto unmap_sg;
1995 }
1996 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1997 wr->send_wr, wr->send_wr_num);
1998
1999 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2000 1982
1983 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
1984 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2001 wr->iser_ib_op = ISER_IB_RDMA_READ; 1985 wr->iser_ib_op = ISER_IB_RDMA_READ;
2002 wr->isert_cmd = isert_cmd; 1986 rc = isert_map_rdma(conn, cmd, wr);
2003 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 1987 if (rc) {
2004 offset = cmd->write_data_done; 1988 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2005 1989 return rc;
2006 for (i = 0; i < wr->send_wr_num; i++) {
2007 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2008 data_len = min(data_left, rdma_write_max);
2009
2010 send_wr->opcode = IB_WR_RDMA_READ;
2011 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2012 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2013
2014 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2015 send_wr, data_len, offset);
2016 ib_sge += ib_sge_cnt;
2017
2018 if (i + 1 == wr->send_wr_num)
2019 send_wr->send_flags = IB_SEND_SIGNALED;
2020 else
2021 send_wr->next = &wr->send_wr[i + 1];
2022
2023 offset += data_len;
2024 va_offset += data_len;
2025 data_left -= data_len;
2026 } 1990 }
2027 1991
2028 atomic_inc(&isert_conn->post_send_buf_count); 1992 atomic_inc(&isert_conn->post_send_buf_count);
@@ -2032,12 +1996,10 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2032 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 1996 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2033 atomic_dec(&isert_conn->post_send_buf_count); 1997 atomic_dec(&isert_conn->post_send_buf_count);
2034 } 1998 }
2035 pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n"); 1999 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2036 return 0; 2000 isert_cmd);
2037 2001
2038unmap_sg: 2002 return 0;
2039 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
2040 return ret;
2041} 2003}
2042 2004
2043static int 2005static int
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 0d45945201cb..21ffd4eff004 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -54,6 +54,7 @@ struct isert_rdma_wr {
54 struct scatterlist *sge; 54 struct scatterlist *sge;
55 int send_wr_num; 55 int send_wr_num;
56 struct ib_send_wr *send_wr; 56 struct ib_send_wr *send_wr;
57 u32 cur_rdma_length;
57}; 58};
58 59
59struct isert_cmd { 60struct isert_cmd {
@@ -68,7 +69,6 @@ struct isert_cmd {
68 u32 rdma_wr_num; 69 u32 rdma_wr_num;
69 struct isert_conn *conn; 70 struct isert_conn *conn;
70 struct iscsi_cmd *iscsi_cmd; 71 struct iscsi_cmd *iscsi_cmd;
71 struct ib_sge *ib_sge;
72 struct iser_tx_desc tx_desc; 72 struct iser_tx_desc tx_desc;
73 struct isert_rdma_wr rdma_wr; 73 struct isert_rdma_wr rdma_wr;
74 struct work_struct comp_work; 74 struct work_struct comp_work;