summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-15 23:38:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-15 23:38:15 -0400
commit2a3c389a0fde49b241430df806a34276568cfb29 (patch)
tree9cf35829317e8cc2aaffc4341fb824dad63fce02 /drivers/infiniband/ulp
parent8de262531f5fbb7458463224a7587429800c24bf (diff)
parent0b043644c0ca601cb19943a81aa1f1455dbe9461 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "A smaller cycle this time. Notably we see another new driver, 'Soft iWarp', and the deletion of an ancient unused driver for nes. - Revise and simplify the signature offload RDMA MR APIs - More progress on hoisting object allocation boiler plate code out of the drivers - Driver bug fixes and revisions for hns, hfi1, efa, cxgb4, qib, i40iw - Tree wide cleanups: struct_size, put_user_page, xarray, rst doc conversion - Removal of obsolete ib_ucm chardev and nes driver - netlink based discovery of chardevs and autoloading of the modules providing them - Move more of the rdamvt/hfi1 uapi to include/uapi/rdma - New driver 'siw' for software based iWarp running on top of netdev, much like rxe's software RoCE. - mlx5 feature to report events in their raw devx format to userspace - Expose per-object counters through rdma tool - Adaptive interrupt moderation for RDMA (DIM), sharing the DIM core from netdev" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (194 commits) RMDA/siw: Require a 64 bit arch RDMA/siw: Mark expected switch fall-throughs RDMA/core: Fix -Wunused-const-variable warnings rdma/siw: Remove set but not used variable 's' rdma/siw: Add missing dependencies on LIBCRC32C and DMA_VIRT_OPS RDMA/siw: Add missing rtnl_lock around access to ifa rdma/siw: Use proper enumerated type in map_cqe_status RDMA/siw: Remove unnecessary kthread create/destroy printouts IB/rdmavt: Fix variable shadowing issue in rvt_create_cq RDMA/core: Fix race when resolving IP address RDMA/core: Make rdma_counter.h compile stand alone IB/core: Work on the caller socket net namespace in nldev_newlink() RDMA/rxe: Fill in wc byte_len with IB_WC_RECV_RDMA_WITH_IMM RDMA/mlx5: Set RDMA DIM to be enabled by default RDMA/nldev: Added configuration of RDMA dynamic interrupt moderation to netlink RDMA/core: Provide RDMA DIM support for ULPs linux/dim: Implement RDMA adaptive moderation (DIM) IB/mlx5: Report correctly tag matching rendezvous capability docs: infiniband: add it to the driver-api bookset IB/mlx5: Implement VHCA tunnel mechanism in DEVX ...
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c34
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c7
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c12
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h64
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c12
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c121
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c156
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c19
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c21
12 files changed, 174 insertions, 278 deletions
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 4760ce465d89..7af68604af77 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -7,7 +7,7 @@ config INFINIBAND_IPOIB
7 transports IP packets over InfiniBand so you can use your IB 7 transports IP packets over InfiniBand so you can use your IB
8 device as a fancy NIC. 8 device as a fancy NIC.
9 9
10 See Documentation/infiniband/ipoib.txt for more information 10 See Documentation/infiniband/ipoib.rst for more information
11 11
12config INFINIBAND_IPOIB_CM 12config INFINIBAND_IPOIB_CM
13 bool "IP-over-InfiniBand Connected Mode support" 13 bool "IP-over-InfiniBand Connected Mode support"
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index aa9dcfc36cd3..c59e00a0881f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1153,7 +1153,6 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1153 ret = -ENOMEM; 1153 ret = -ENOMEM;
1154 goto err_tx; 1154 goto err_tx;
1155 } 1155 }
1156 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof(*p->tx_ring));
1157 1156
1158 p->qp = ipoib_cm_create_tx_qp(p->dev, p); 1157 p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1159 memalloc_noio_restore(noio_flag); 1158 memalloc_noio_restore(noio_flag);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 83429925dfc6..63e4f9d15fd9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -138,7 +138,6 @@ static void ipoib_get_strings(struct net_device __always_unused *dev,
138 p += ETH_GSTRING_LEN; 138 p += ETH_GSTRING_LEN;
139 } 139 }
140 break; 140 break;
141 case ETH_SS_TEST:
142 default: 141 default:
143 break; 142 break;
144 } 143 }
@@ -149,7 +148,6 @@ static int ipoib_get_sset_count(struct net_device __always_unused *dev,
149 switch (sset) { 148 switch (sset) {
150 case ETH_SS_STATS: 149 case ETH_SS_STATS:
151 return IPOIB_GLOBAL_STATS_LEN; 150 return IPOIB_GLOBAL_STATS_LEN;
152 case ETH_SS_TEST:
153 default: 151 default:
154 break; 152 break;
155 } 153 }
@@ -222,6 +220,7 @@ static const struct ethtool_ops ipoib_ethtool_ops = {
222 .get_strings = ipoib_get_strings, 220 .get_strings = ipoib_get_strings,
223 .get_ethtool_stats = ipoib_get_ethtool_stats, 221 .get_ethtool_stats = ipoib_get_ethtool_stats,
224 .get_sset_count = ipoib_get_sset_count, 222 .get_sset_count = ipoib_get_sset_count,
223 .get_link = ethtool_op_get_link,
225}; 224};
226 225
227void ipoib_set_ethtool_ops(struct net_device *dev) 226void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 04ea7db08e87..ac0583ff280d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1893,12 +1893,6 @@ static void ipoib_child_init(struct net_device *ndev)
1893 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 1893 struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1894 struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); 1894 struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1895 1895
1896 dev_hold(priv->parent);
1897
1898 down_write(&ppriv->vlan_rwsem);
1899 list_add_tail(&priv->list, &ppriv->child_intfs);
1900 up_write(&ppriv->vlan_rwsem);
1901
1902 priv->max_ib_mtu = ppriv->max_ib_mtu; 1896 priv->max_ib_mtu = ppriv->max_ib_mtu;
1903 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 1897 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
1904 memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); 1898 memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
@@ -1941,6 +1935,17 @@ static int ipoib_ndo_init(struct net_device *ndev)
1941 if (rc) { 1935 if (rc) {
1942 pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n", 1936 pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
1943 priv->ca->name, priv->dev->name, priv->port, rc); 1937 priv->ca->name, priv->dev->name, priv->port, rc);
1938 return rc;
1939 }
1940
1941 if (priv->parent) {
1942 struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1943
1944 dev_hold(priv->parent);
1945
1946 down_write(&ppriv->vlan_rwsem);
1947 list_add_tail(&priv->list, &ppriv->child_intfs);
1948 up_write(&ppriv->vlan_rwsem);
1944 } 1949 }
1945 1950
1946 return 0; 1951 return 0;
@@ -1958,6 +1963,14 @@ static void ipoib_ndo_uninit(struct net_device *dev)
1958 */ 1963 */
1959 WARN_ON(!list_empty(&priv->child_intfs)); 1964 WARN_ON(!list_empty(&priv->child_intfs));
1960 1965
1966 if (priv->parent) {
1967 struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1968
1969 down_write(&ppriv->vlan_rwsem);
1970 list_del(&priv->list);
1971 up_write(&ppriv->vlan_rwsem);
1972 }
1973
1961 ipoib_neigh_hash_uninit(dev); 1974 ipoib_neigh_hash_uninit(dev);
1962 1975
1963 ipoib_ib_dev_cleanup(dev); 1976 ipoib_ib_dev_cleanup(dev);
@@ -1969,15 +1982,8 @@ static void ipoib_ndo_uninit(struct net_device *dev)
1969 priv->wq = NULL; 1982 priv->wq = NULL;
1970 } 1983 }
1971 1984
1972 if (priv->parent) { 1985 if (priv->parent)
1973 struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1974
1975 down_write(&ppriv->vlan_rwsem);
1976 list_del(&priv->list);
1977 up_write(&ppriv->vlan_rwsem);
1978
1979 dev_put(priv->parent); 1986 dev_put(priv->parent);
1980 }
1981} 1987}
1982 1988
1983static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state) 1989static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index ba09068f6200..b69304d28f06 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -260,11 +260,8 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
260 priv->qp = NULL; 260 priv->qp = NULL;
261 } 261 }
262 262
263 if (ib_destroy_cq(priv->send_cq)) 263 ib_destroy_cq(priv->send_cq);
264 ipoib_warn(priv, "ib_cq_destroy (send) failed\n"); 264 ib_destroy_cq(priv->recv_cq);
265
266 if (ib_destroy_cq(priv->recv_cq))
267 ipoib_warn(priv, "ib_cq_destroy (recv) failed\n");
268} 265}
269 266
270void ipoib_event(struct ib_event_handler *handler, 267void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9c185a8dabd3..c7a3d75fb308 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -205,7 +205,8 @@ iser_initialize_task_headers(struct iscsi_task *task,
205 goto out; 205 goto out;
206 } 206 }
207 207
208 tx_desc->wr_idx = 0; 208 tx_desc->inv_wr.next = NULL;
209 tx_desc->reg_wr.wr.next = NULL;
209 tx_desc->mapped = true; 210 tx_desc->mapped = true;
210 tx_desc->dma_addr = dma_addr; 211 tx_desc->dma_addr = dma_addr;
211 tx_desc->tx_sg[0].addr = tx_desc->dma_addr; 212 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
@@ -406,13 +407,10 @@ static u8
406iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) 407iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
407{ 408{
408 struct iscsi_iser_task *iser_task = task->dd_data; 409 struct iscsi_iser_task *iser_task = task->dd_data;
410 enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ?
411 ISER_DIR_IN : ISER_DIR_OUT;
409 412
410 if (iser_task->dir[ISER_DIR_IN]) 413 return iser_check_task_pi_status(iser_task, dir, sector);
411 return iser_check_task_pi_status(iser_task, ISER_DIR_IN,
412 sector);
413 else
414 return iser_check_task_pi_status(iser_task, ISER_DIR_OUT,
415 sector);
416} 414}
417 415
418/** 416/**
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 36d525110fd2..39bf213444cb 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -225,14 +225,6 @@ enum iser_desc_type {
225 ISCSI_TX_DATAOUT 225 ISCSI_TX_DATAOUT
226}; 226};
227 227
228/* Maximum number of work requests per task:
229 * Data memory region local invalidate + fast registration
230 * Protection memory region local invalidate + fast registration
231 * Signature memory region local invalidate + fast registration
232 * PDU send
233 */
234#define ISER_MAX_WRS 7
235
236/** 228/**
237 * struct iser_tx_desc - iSER TX descriptor 229 * struct iser_tx_desc - iSER TX descriptor
238 * 230 *
@@ -245,11 +237,9 @@ enum iser_desc_type {
245 * unsolicited data-out or control 237 * unsolicited data-out or control
246 * @num_sge: number sges used on this TX task 238 * @num_sge: number sges used on this TX task
247 * @mapped: Is the task header mapped 239 * @mapped: Is the task header mapped
248 * @wr_idx: Current WR index 240 * reg_wr: registration WR
249 * @wrs: Array of WRs per task 241 * send_wr: send WR
250 * @data_reg: Data buffer registration details 242 * inv_wr: invalidate WR
251 * @prot_reg: Protection buffer registration details
252 * @sig_attrs: Signature attributes
253 */ 243 */
254struct iser_tx_desc { 244struct iser_tx_desc {
255 struct iser_ctrl iser_header; 245 struct iser_ctrl iser_header;
@@ -260,15 +250,9 @@ struct iser_tx_desc {
260 int num_sge; 250 int num_sge;
261 struct ib_cqe cqe; 251 struct ib_cqe cqe;
262 bool mapped; 252 bool mapped;
263 u8 wr_idx; 253 struct ib_reg_wr reg_wr;
264 union iser_wr { 254 struct ib_send_wr send_wr;
265 struct ib_send_wr send; 255 struct ib_send_wr inv_wr;
266 struct ib_reg_wr fast_reg;
267 struct ib_sig_handover_wr sig;
268 } wrs[ISER_MAX_WRS];
269 struct iser_mem_reg data_reg;
270 struct iser_mem_reg prot_reg;
271 struct ib_sig_attrs sig_attrs;
272}; 256};
273 257
274#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ 258#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
@@ -388,6 +372,7 @@ struct iser_device {
388 * 372 *
389 * @mr: memory region 373 * @mr: memory region
390 * @fmr_pool: pool of fmrs 374 * @fmr_pool: pool of fmrs
375 * @sig_mr: signature memory region
391 * @page_vec: fast reg page list used by fmr pool 376 * @page_vec: fast reg page list used by fmr pool
392 * @mr_valid: is mr valid indicator 377 * @mr_valid: is mr valid indicator
393 */ 378 */
@@ -396,36 +381,22 @@ struct iser_reg_resources {
396 struct ib_mr *mr; 381 struct ib_mr *mr;
397 struct ib_fmr_pool *fmr_pool; 382 struct ib_fmr_pool *fmr_pool;
398 }; 383 };
384 struct ib_mr *sig_mr;
399 struct iser_page_vec *page_vec; 385 struct iser_page_vec *page_vec;
400 u8 mr_valid:1; 386 u8 mr_valid:1;
401}; 387};
402 388
403/** 389/**
404 * struct iser_pi_context - Protection information context
405 *
406 * @rsc: protection buffer registration resources
407 * @sig_mr: signature enable memory region
408 * @sig_mr_valid: is sig_mr valid indicator
409 * @sig_protected: is region protected indicator
410 */
411struct iser_pi_context {
412 struct iser_reg_resources rsc;
413 struct ib_mr *sig_mr;
414 u8 sig_mr_valid:1;
415 u8 sig_protected:1;
416};
417
418/**
419 * struct iser_fr_desc - Fast registration descriptor 390 * struct iser_fr_desc - Fast registration descriptor
420 * 391 *
421 * @list: entry in connection fastreg pool 392 * @list: entry in connection fastreg pool
422 * @rsc: data buffer registration resources 393 * @rsc: data buffer registration resources
423 * @pi_ctx: protection information context 394 * @sig_protected: is region protected indicator
424 */ 395 */
425struct iser_fr_desc { 396struct iser_fr_desc {
426 struct list_head list; 397 struct list_head list;
427 struct iser_reg_resources rsc; 398 struct iser_reg_resources rsc;
428 struct iser_pi_context *pi_ctx; 399 bool sig_protected;
429 struct list_head all_list; 400 struct list_head all_list;
430}; 401};
431 402
@@ -674,21 +645,6 @@ void
674iser_reg_desc_put_fmr(struct ib_conn *ib_conn, 645iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
675 struct iser_fr_desc *desc); 646 struct iser_fr_desc *desc);
676 647
677static inline struct ib_send_wr *
678iser_tx_next_wr(struct iser_tx_desc *tx_desc)
679{
680 struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send;
681 struct ib_send_wr *last_wr;
682
683 if (tx_desc->wr_idx) {
684 last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send;
685 last_wr->next = cur_wr;
686 }
687 tx_desc->wr_idx++;
688
689 return cur_wr;
690}
691
692static inline struct iser_conn * 648static inline struct iser_conn *
693to_iser_conn(struct ib_conn *ib_conn) 649to_iser_conn(struct ib_conn *ib_conn)
694{ 650{
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 96af06cfe0af..5cbb4b3a0566 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -592,15 +592,14 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
592static inline int 592static inline int
593iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) 593iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
594{ 594{
595 if (likely(rkey == desc->rsc.mr->rkey)) { 595 if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
596 desc->rsc.mr_valid = 0; 596 (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
597 } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) {
598 desc->pi_ctx->sig_mr_valid = 0;
599 } else {
600 iser_err("Bogus remote invalidation for rkey %#x\n", rkey); 597 iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
601 return -EINVAL; 598 return -EINVAL;
602 } 599 }
603 600
601 desc->rsc.mr_valid = 0;
602
604 return 0; 603 return 0;
605} 604}
606 605
@@ -750,6 +749,9 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
750 iser_task->prot[ISER_DIR_IN].data_len = 0; 749 iser_task->prot[ISER_DIR_IN].data_len = 0;
751 iser_task->prot[ISER_DIR_OUT].data_len = 0; 750 iser_task->prot[ISER_DIR_OUT].data_len = 0;
752 751
752 iser_task->prot[ISER_DIR_IN].dma_nents = 0;
753 iser_task->prot[ISER_DIR_OUT].dma_nents = 0;
754
753 memset(&iser_task->rdma_reg[ISER_DIR_IN], 0, 755 memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
754 sizeof(struct iser_mem_reg)); 756 sizeof(struct iser_mem_reg));
755 memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0, 757 memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2ba70729d7b0..2cc89a9b9e9b 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -302,8 +302,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
302} 302}
303 303
304static void 304static void
305iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs, 305iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
306 struct ib_sig_domain *domain)
307{ 306{
308 domain->sig_type = IB_SIG_TYPE_T10_DIF; 307 domain->sig_type = IB_SIG_TYPE_T10_DIF;
309 domain->sig.dif.pi_interval = scsi_prot_interval(sc); 308 domain->sig.dif.pi_interval = scsi_prot_interval(sc);
@@ -326,21 +325,21 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
326 case SCSI_PROT_WRITE_INSERT: 325 case SCSI_PROT_WRITE_INSERT:
327 case SCSI_PROT_READ_STRIP: 326 case SCSI_PROT_READ_STRIP:
328 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 327 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
329 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire); 328 iser_set_dif_domain(sc, &sig_attrs->wire);
330 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; 329 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
331 break; 330 break;
332 case SCSI_PROT_READ_INSERT: 331 case SCSI_PROT_READ_INSERT:
333 case SCSI_PROT_WRITE_STRIP: 332 case SCSI_PROT_WRITE_STRIP:
334 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 333 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
335 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); 334 iser_set_dif_domain(sc, &sig_attrs->mem);
336 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? 335 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
337 IB_T10DIF_CSUM : IB_T10DIF_CRC; 336 IB_T10DIF_CSUM : IB_T10DIF_CRC;
338 break; 337 break;
339 case SCSI_PROT_READ_PASS: 338 case SCSI_PROT_READ_PASS:
340 case SCSI_PROT_WRITE_PASS: 339 case SCSI_PROT_WRITE_PASS:
341 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire); 340 iser_set_dif_domain(sc, &sig_attrs->wire);
342 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; 341 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
343 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); 342 iser_set_dif_domain(sc, &sig_attrs->mem);
344 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? 343 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
345 IB_T10DIF_CSUM : IB_T10DIF_CRC; 344 IB_T10DIF_CSUM : IB_T10DIF_CRC;
346 break; 345 break;
@@ -366,27 +365,29 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
366static inline void 365static inline void
367iser_inv_rkey(struct ib_send_wr *inv_wr, 366iser_inv_rkey(struct ib_send_wr *inv_wr,
368 struct ib_mr *mr, 367 struct ib_mr *mr,
369 struct ib_cqe *cqe) 368 struct ib_cqe *cqe,
369 struct ib_send_wr *next_wr)
370{ 370{
371 inv_wr->opcode = IB_WR_LOCAL_INV; 371 inv_wr->opcode = IB_WR_LOCAL_INV;
372 inv_wr->wr_cqe = cqe; 372 inv_wr->wr_cqe = cqe;
373 inv_wr->ex.invalidate_rkey = mr->rkey; 373 inv_wr->ex.invalidate_rkey = mr->rkey;
374 inv_wr->send_flags = 0; 374 inv_wr->send_flags = 0;
375 inv_wr->num_sge = 0; 375 inv_wr->num_sge = 0;
376 inv_wr->next = next_wr;
376} 377}
377 378
378static int 379static int
379iser_reg_sig_mr(struct iscsi_iser_task *iser_task, 380iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
380 struct iser_pi_context *pi_ctx, 381 struct iser_data_buf *mem,
381 struct iser_mem_reg *data_reg, 382 struct iser_data_buf *sig_mem,
382 struct iser_mem_reg *prot_reg, 383 struct iser_reg_resources *rsc,
383 struct iser_mem_reg *sig_reg) 384 struct iser_mem_reg *sig_reg)
384{ 385{
385 struct iser_tx_desc *tx_desc = &iser_task->desc; 386 struct iser_tx_desc *tx_desc = &iser_task->desc;
386 struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
387 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; 387 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
388 struct ib_sig_handover_wr *wr; 388 struct ib_mr *mr = rsc->sig_mr;
389 struct ib_mr *mr = pi_ctx->sig_mr; 389 struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
390 struct ib_reg_wr *wr = &tx_desc->reg_wr;
390 int ret; 391 int ret;
391 392
392 memset(sig_attrs, 0, sizeof(*sig_attrs)); 393 memset(sig_attrs, 0, sizeof(*sig_attrs));
@@ -396,33 +397,36 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
396 397
397 iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); 398 iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
398 399
399 if (pi_ctx->sig_mr_valid) 400 if (rsc->mr_valid)
400 iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe); 401 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
401 402
402 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); 403 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
403 404
404 wr = container_of(iser_tx_next_wr(tx_desc), struct ib_sig_handover_wr, 405 ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL,
405 wr); 406 sig_mem->sg, sig_mem->dma_nents, NULL, SZ_4K);
406 wr->wr.opcode = IB_WR_REG_SIG_MR; 407 if (unlikely(ret)) {
408 iser_err("failed to map PI sg (%d)\n",
409 mem->dma_nents + sig_mem->dma_nents);
410 goto err;
411 }
412
413 memset(wr, 0, sizeof(*wr));
414 wr->wr.next = &tx_desc->send_wr;
415 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
407 wr->wr.wr_cqe = cqe; 416 wr->wr.wr_cqe = cqe;
408 wr->wr.sg_list = &data_reg->sge; 417 wr->wr.num_sge = 0;
409 wr->wr.num_sge = 1;
410 wr->wr.send_flags = 0; 418 wr->wr.send_flags = 0;
411 wr->sig_attrs = sig_attrs; 419 wr->mr = mr;
412 wr->sig_mr = mr; 420 wr->key = mr->rkey;
413 if (scsi_prot_sg_count(iser_task->sc)) 421 wr->access = IB_ACCESS_LOCAL_WRITE |
414 wr->prot = &prot_reg->sge; 422 IB_ACCESS_REMOTE_READ |
415 else 423 IB_ACCESS_REMOTE_WRITE;
416 wr->prot = NULL; 424 rsc->mr_valid = 1;
417 wr->access_flags = IB_ACCESS_LOCAL_WRITE |
418 IB_ACCESS_REMOTE_READ |
419 IB_ACCESS_REMOTE_WRITE;
420 pi_ctx->sig_mr_valid = 1;
421 425
422 sig_reg->sge.lkey = mr->lkey; 426 sig_reg->sge.lkey = mr->lkey;
423 sig_reg->rkey = mr->rkey; 427 sig_reg->rkey = mr->rkey;
424 sig_reg->sge.addr = 0; 428 sig_reg->sge.addr = mr->iova;
425 sig_reg->sge.length = scsi_transfer_length(iser_task->sc); 429 sig_reg->sge.length = mr->length;
426 430
427 iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n", 431 iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
428 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, 432 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
@@ -439,11 +443,11 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
439 struct iser_tx_desc *tx_desc = &iser_task->desc; 443 struct iser_tx_desc *tx_desc = &iser_task->desc;
440 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; 444 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
441 struct ib_mr *mr = rsc->mr; 445 struct ib_mr *mr = rsc->mr;
442 struct ib_reg_wr *wr; 446 struct ib_reg_wr *wr = &tx_desc->reg_wr;
443 int n; 447 int n;
444 448
445 if (rsc->mr_valid) 449 if (rsc->mr_valid)
446 iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe); 450 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
447 451
448 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); 452 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
449 453
@@ -454,7 +458,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
454 return n < 0 ? n : -EINVAL; 458 return n < 0 ? n : -EINVAL;
455 } 459 }
456 460
457 wr = container_of(iser_tx_next_wr(tx_desc), struct ib_reg_wr, wr); 461 wr->wr.next = &tx_desc->send_wr;
458 wr->wr.opcode = IB_WR_REG_MR; 462 wr->wr.opcode = IB_WR_REG_MR;
459 wr->wr.wr_cqe = cqe; 463 wr->wr.wr_cqe = cqe;
460 wr->wr.send_flags = 0; 464 wr->wr.send_flags = 0;
@@ -479,21 +483,6 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
479} 483}
480 484
481static int 485static int
482iser_reg_prot_sg(struct iscsi_iser_task *task,
483 struct iser_data_buf *mem,
484 struct iser_fr_desc *desc,
485 bool use_dma_key,
486 struct iser_mem_reg *reg)
487{
488 struct iser_device *device = task->iser_conn->ib_conn.device;
489
490 if (use_dma_key)
491 return iser_reg_dma(device, mem, reg);
492
493 return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
494}
495
496static int
497iser_reg_data_sg(struct iscsi_iser_task *task, 486iser_reg_data_sg(struct iscsi_iser_task *task,
498 struct iser_data_buf *mem, 487 struct iser_data_buf *mem,
499 struct iser_fr_desc *desc, 488 struct iser_fr_desc *desc,
@@ -516,7 +505,6 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
516 struct iser_device *device = ib_conn->device; 505 struct iser_device *device = ib_conn->device;
517 struct iser_data_buf *mem = &task->data[dir]; 506 struct iser_data_buf *mem = &task->data[dir];
518 struct iser_mem_reg *reg = &task->rdma_reg[dir]; 507 struct iser_mem_reg *reg = &task->rdma_reg[dir];
519 struct iser_mem_reg *data_reg;
520 struct iser_fr_desc *desc = NULL; 508 struct iser_fr_desc *desc = NULL;
521 bool use_dma_key; 509 bool use_dma_key;
522 int err; 510 int err;
@@ -529,32 +517,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
529 reg->mem_h = desc; 517 reg->mem_h = desc;
530 } 518 }
531 519
532 if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) 520 if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
533 data_reg = reg; 521 err = iser_reg_data_sg(task, mem, desc, use_dma_key, reg);
534 else 522 if (unlikely(err))
535 data_reg = &task->desc.data_reg; 523 goto err_reg;
536 524 } else {
537 err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); 525 err = iser_reg_sig_mr(task, mem, &task->prot[dir],
538 if (unlikely(err)) 526 &desc->rsc, reg);
539 goto err_reg;
540
541 if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
542 struct iser_mem_reg *prot_reg = &task->desc.prot_reg;
543
544 if (scsi_prot_sg_count(task->sc)) {
545 mem = &task->prot[dir];
546 err = iser_reg_prot_sg(task, mem, desc,
547 use_dma_key, prot_reg);
548 if (unlikely(err))
549 goto err_reg;
550 }
551
552 err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg,
553 prot_reg, reg);
554 if (unlikely(err)) 527 if (unlikely(err))
555 goto err_reg; 528 goto err_reg;
556 529
557 desc->pi_ctx->sig_protected = 1; 530 desc->sig_protected = 1;
558 } 531 }
559 532
560 return 0; 533 return 0;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 4ff3d98fa6a4..a6548de0e218 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -233,116 +233,63 @@ void iser_free_fmr_pool(struct ib_conn *ib_conn)
233 kfree(desc); 233 kfree(desc);
234} 234}
235 235
236static int 236static struct iser_fr_desc *
237iser_alloc_reg_res(struct iser_device *device, 237iser_create_fastreg_desc(struct iser_device *device,
238 struct ib_pd *pd, 238 struct ib_pd *pd,
239 struct iser_reg_resources *res, 239 bool pi_enable,
240 unsigned int size) 240 unsigned int size)
241{ 241{
242 struct iser_fr_desc *desc;
242 struct ib_device *ib_dev = device->ib_device; 243 struct ib_device *ib_dev = device->ib_device;
243 enum ib_mr_type mr_type; 244 enum ib_mr_type mr_type;
244 int ret; 245 int ret;
245 246
247 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
248 if (!desc)
249 return ERR_PTR(-ENOMEM);
250
246 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) 251 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
247 mr_type = IB_MR_TYPE_SG_GAPS; 252 mr_type = IB_MR_TYPE_SG_GAPS;
248 else 253 else
249 mr_type = IB_MR_TYPE_MEM_REG; 254 mr_type = IB_MR_TYPE_MEM_REG;
250 255
251 res->mr = ib_alloc_mr(pd, mr_type, size); 256 desc->rsc.mr = ib_alloc_mr(pd, mr_type, size);
252 if (IS_ERR(res->mr)) { 257 if (IS_ERR(desc->rsc.mr)) {
253 ret = PTR_ERR(res->mr); 258 ret = PTR_ERR(desc->rsc.mr);
254 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); 259 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
255 return ret; 260 goto err_alloc_mr;
256 } 261 }
257 res->mr_valid = 0;
258
259 return 0;
260}
261 262
262static void 263 if (pi_enable) {
263iser_free_reg_res(struct iser_reg_resources *rsc) 264 desc->rsc.sig_mr = ib_alloc_mr_integrity(pd, size, size);
264{ 265 if (IS_ERR(desc->rsc.sig_mr)) {
265 ib_dereg_mr(rsc->mr); 266 ret = PTR_ERR(desc->rsc.sig_mr);
266} 267 iser_err("Failed to allocate sig_mr err=%d\n", ret);
267 268 goto err_alloc_mr_integrity;
268static int 269 }
269iser_alloc_pi_ctx(struct iser_device *device,
270 struct ib_pd *pd,
271 struct iser_fr_desc *desc,
272 unsigned int size)
273{
274 struct iser_pi_context *pi_ctx = NULL;
275 int ret;
276
277 desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
278 if (!desc->pi_ctx)
279 return -ENOMEM;
280
281 pi_ctx = desc->pi_ctx;
282
283 ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size);
284 if (ret) {
285 iser_err("failed to allocate reg_resources\n");
286 goto alloc_reg_res_err;
287 }
288
289 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
290 if (IS_ERR(pi_ctx->sig_mr)) {
291 ret = PTR_ERR(pi_ctx->sig_mr);
292 goto sig_mr_failure;
293 } 270 }
294 pi_ctx->sig_mr_valid = 0; 271 desc->rsc.mr_valid = 0;
295 desc->pi_ctx->sig_protected = 0;
296
297 return 0;
298 272
299sig_mr_failure: 273 return desc;
300 iser_free_reg_res(&pi_ctx->rsc);
301alloc_reg_res_err:
302 kfree(desc->pi_ctx);
303 274
304 return ret; 275err_alloc_mr_integrity:
305} 276 ib_dereg_mr(desc->rsc.mr);
277err_alloc_mr:
278 kfree(desc);
306 279
307static void 280 return ERR_PTR(ret);
308iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
309{
310 iser_free_reg_res(&pi_ctx->rsc);
311 ib_dereg_mr(pi_ctx->sig_mr);
312 kfree(pi_ctx);
313} 281}
314 282
315static struct iser_fr_desc * 283static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
316iser_create_fastreg_desc(struct iser_device *device,
317 struct ib_pd *pd,
318 bool pi_enable,
319 unsigned int size)
320{ 284{
321 struct iser_fr_desc *desc; 285 struct iser_reg_resources *res = &desc->rsc;
322 int ret;
323
324 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
325 if (!desc)
326 return ERR_PTR(-ENOMEM);
327
328 ret = iser_alloc_reg_res(device, pd, &desc->rsc, size);
329 if (ret)
330 goto reg_res_alloc_failure;
331 286
332 if (pi_enable) { 287 ib_dereg_mr(res->mr);
333 ret = iser_alloc_pi_ctx(device, pd, desc, size); 288 if (res->sig_mr) {
334 if (ret) 289 ib_dereg_mr(res->sig_mr);
335 goto pi_ctx_alloc_failure; 290 res->sig_mr = NULL;
336 } 291 }
337
338 return desc;
339
340pi_ctx_alloc_failure:
341 iser_free_reg_res(&desc->rsc);
342reg_res_alloc_failure:
343 kfree(desc); 292 kfree(desc);
344
345 return ERR_PTR(ret);
346} 293}
347 294
348/** 295/**
@@ -399,10 +346,7 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
399 346
400 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) { 347 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
401 list_del(&desc->all_list); 348 list_del(&desc->all_list);
402 iser_free_reg_res(&desc->rsc); 349 iser_destroy_fastreg_desc(desc);
403 if (desc->pi_ctx)
404 iser_free_pi_ctx(desc->pi_ctx);
405 kfree(desc);
406 ++i; 350 ++i;
407 } 351 }
408 352
@@ -455,7 +399,7 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
455 init_attr.qp_type = IB_QPT_RC; 399 init_attr.qp_type = IB_QPT_RC;
456 if (ib_conn->pi_support) { 400 if (ib_conn->pi_support) {
457 init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1; 401 init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
458 init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 402 init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
459 iser_conn->max_cmds = 403 iser_conn->max_cmds =
460 ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS); 404 ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
461 } else { 405 } else {
@@ -707,6 +651,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
707 struct ib_device_attr *attr = &device->ib_device->attrs; 651 struct ib_device_attr *attr = &device->ib_device->attrs;
708 unsigned short sg_tablesize, sup_sg_tablesize; 652 unsigned short sg_tablesize, sup_sg_tablesize;
709 unsigned short reserved_mr_pages; 653 unsigned short reserved_mr_pages;
654 u32 max_num_sg;
710 655
711 /* 656 /*
712 * FRs without SG_GAPS or FMRs can only map up to a (device) page per 657 * FRs without SG_GAPS or FMRs can only map up to a (device) page per
@@ -720,12 +665,17 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
720 else 665 else
721 reserved_mr_pages = 1; 666 reserved_mr_pages = 1;
722 667
668 if (iser_conn->ib_conn.pi_support)
669 max_num_sg = attr->max_pi_fast_reg_page_list_len;
670 else
671 max_num_sg = attr->max_fast_reg_page_list_len;
672
723 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); 673 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
724 if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) 674 if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
725 sup_sg_tablesize = 675 sup_sg_tablesize =
726 min_t( 676 min_t(
727 uint, ISCSI_ISER_MAX_SG_TABLESIZE, 677 uint, ISCSI_ISER_MAX_SG_TABLESIZE,
728 attr->max_fast_reg_page_list_len - reserved_mr_pages); 678 max_num_sg - reserved_mr_pages);
729 else 679 else
730 sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE; 680 sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
731 681
@@ -762,7 +712,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
762 /* connection T10-PI support */ 712 /* connection T10-PI support */
763 if (iser_pi_enable) { 713 if (iser_pi_enable) {
764 if (!(device->ib_device->attrs.device_cap_flags & 714 if (!(device->ib_device->attrs.device_cap_flags &
765 IB_DEVICE_SIGNATURE_HANDOVER)) { 715 IB_DEVICE_INTEGRITY_HANDOVER)) {
766 iser_warn("T10-PI requested but not supported on %s, " 716 iser_warn("T10-PI requested but not supported on %s, "
767 "continue without T10-PI\n", 717 "continue without T10-PI\n",
768 dev_name(&ib_conn->device->ib_device->dev)); 718 dev_name(&ib_conn->device->ib_device->dev));
@@ -1087,7 +1037,8 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
1087int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, 1037int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
1088 bool signal) 1038 bool signal)
1089{ 1039{
1090 struct ib_send_wr *wr = iser_tx_next_wr(tx_desc); 1040 struct ib_send_wr *wr = &tx_desc->send_wr;
1041 struct ib_send_wr *first_wr;
1091 int ib_ret; 1042 int ib_ret;
1092 1043
1093 ib_dma_sync_single_for_device(ib_conn->device->ib_device, 1044 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
@@ -1101,7 +1052,14 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
1101 wr->opcode = IB_WR_SEND; 1052 wr->opcode = IB_WR_SEND;
1102 wr->send_flags = signal ? IB_SEND_SIGNALED : 0; 1053 wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
1103 1054
1104 ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, NULL); 1055 if (tx_desc->inv_wr.next)
1056 first_wr = &tx_desc->inv_wr;
1057 else if (tx_desc->reg_wr.wr.next)
1058 first_wr = &tx_desc->reg_wr.wr;
1059 else
1060 first_wr = wr;
1061
1062 ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
1105 if (ib_ret) 1063 if (ib_ret)
1106 iser_err("ib_post_send failed, ret:%d opcode:%d\n", 1064 iser_err("ib_post_send failed, ret:%d opcode:%d\n",
1107 ib_ret, wr->opcode); 1065 ib_ret, wr->opcode);
@@ -1118,9 +1076,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1118 struct ib_mr_status mr_status; 1076 struct ib_mr_status mr_status;
1119 int ret; 1077 int ret;
1120 1078
1121 if (desc && desc->pi_ctx->sig_protected) { 1079 if (desc && desc->sig_protected) {
1122 desc->pi_ctx->sig_protected = 0; 1080 desc->sig_protected = 0;
1123 ret = ib_check_mr_status(desc->pi_ctx->sig_mr, 1081 ret = ib_check_mr_status(desc->rsc.sig_mr,
1124 IB_MR_CHECK_SIG_STATUS, &mr_status); 1082 IB_MR_CHECK_SIG_STATUS, &mr_status);
1125 if (ret) { 1083 if (ret) {
1126 pr_err("ib_check_mr_status failed, ret %d\n", ret); 1084 pr_err("ib_check_mr_status failed, ret %d\n", ret);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 0205cf142b2f..a1a035270cab 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -133,7 +133,7 @@ isert_create_qp(struct isert_conn *isert_conn,
133 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 133 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
134 attr.qp_type = IB_QPT_RC; 134 attr.qp_type = IB_QPT_RC;
135 if (device->pi_capable) 135 if (device->pi_capable)
136 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 136 attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
137 137
138 ret = rdma_create_qp(cma_id, device->pd, &attr); 138 ret = rdma_create_qp(cma_id, device->pd, &attr);
139 if (ret) { 139 if (ret) {
@@ -309,7 +309,7 @@ isert_create_device_ib_res(struct isert_device *device)
309 309
310 /* Check signature cap */ 310 /* Check signature cap */
311 device->pi_capable = ib_dev->attrs.device_cap_flags & 311 device->pi_capable = ib_dev->attrs.device_cap_flags &
312 IB_DEVICE_SIGNATURE_HANDOVER ? true : false; 312 IB_DEVICE_INTEGRITY_HANDOVER ? true : false;
313 313
314 return 0; 314 return 0;
315 315
@@ -1669,7 +1669,7 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1669 1669
1670 isert_dbg("Cmd %p\n", isert_cmd); 1670 isert_dbg("Cmd %p\n", isert_cmd);
1671 1671
1672 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); 1672 ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr);
1673 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1673 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1674 1674
1675 if (ret) { 1675 if (ret) {
@@ -1715,7 +1715,7 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1715 iscsit_stop_dataout_timer(cmd); 1715 iscsit_stop_dataout_timer(cmd);
1716 1716
1717 if (isert_prot_cmd(isert_conn, se_cmd)) 1717 if (isert_prot_cmd(isert_conn, se_cmd))
1718 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr); 1718 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr);
1719 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1719 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1720 cmd->write_data_done = 0; 1720 cmd->write_data_done = 0;
1721 1721
@@ -2059,8 +2059,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2059} 2059}
2060 2060
2061static inline void 2061static inline void
2062isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs, 2062isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain)
2063 struct ib_sig_domain *domain)
2064{ 2063{
2065 domain->sig_type = IB_SIG_TYPE_T10_DIF; 2064 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2066 domain->sig.dif.bg_type = IB_T10DIF_CRC; 2065 domain->sig.dif.bg_type = IB_T10DIF_CRC;
@@ -2088,17 +2087,17 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2088 case TARGET_PROT_DIN_INSERT: 2087 case TARGET_PROT_DIN_INSERT:
2089 case TARGET_PROT_DOUT_STRIP: 2088 case TARGET_PROT_DOUT_STRIP:
2090 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 2089 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2091 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); 2090 isert_set_dif_domain(se_cmd, &sig_attrs->wire);
2092 break; 2091 break;
2093 case TARGET_PROT_DOUT_INSERT: 2092 case TARGET_PROT_DOUT_INSERT:
2094 case TARGET_PROT_DIN_STRIP: 2093 case TARGET_PROT_DIN_STRIP:
2095 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 2094 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2096 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2095 isert_set_dif_domain(se_cmd, &sig_attrs->mem);
2097 break; 2096 break;
2098 case TARGET_PROT_DIN_PASS: 2097 case TARGET_PROT_DIN_PASS:
2099 case TARGET_PROT_DOUT_PASS: 2098 case TARGET_PROT_DOUT_PASS:
2100 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); 2099 isert_set_dif_domain(se_cmd, &sig_attrs->wire);
2101 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2100 isert_set_dif_domain(se_cmd, &sig_attrs->mem);
2102 break; 2101 break;
2103 default: 2102 default:
2104 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2103 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index d5cbad2c61e4..c7bd96edce80 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3466,13 +3466,14 @@ static const match_table_t srp_opt_tokens = {
3466 * @net: [in] Network namespace. 3466 * @net: [in] Network namespace.
3467 * @sa: [out] Address family, IP address and port number. 3467 * @sa: [out] Address family, IP address and port number.
3468 * @addr_port_str: [in] IP address and port number. 3468 * @addr_port_str: [in] IP address and port number.
3469 * @has_port: [out] Whether or not @addr_port_str includes a port number.
3469 * 3470 *
3470 * Parse the following address formats: 3471 * Parse the following address formats:
3471 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5. 3472 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3472 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5. 3473 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3473 */ 3474 */
3474static int srp_parse_in(struct net *net, struct sockaddr_storage *sa, 3475static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3475 const char *addr_port_str) 3476 const char *addr_port_str, bool *has_port)
3476{ 3477{
3477 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL); 3478 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3478 char *port_str; 3479 char *port_str;
@@ -3481,9 +3482,12 @@ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3481 if (!addr) 3482 if (!addr)
3482 return -ENOMEM; 3483 return -ENOMEM;
3483 port_str = strrchr(addr, ':'); 3484 port_str = strrchr(addr, ':');
3484 if (!port_str) 3485 if (port_str && strchr(port_str, ']'))
3485 return -EINVAL; 3486 port_str = NULL;
3486 *port_str++ = '\0'; 3487 if (port_str)
3488 *port_str++ = '\0';
3489 if (has_port)
3490 *has_port = port_str != NULL;
3487 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa); 3491 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3488 if (ret && addr[0]) { 3492 if (ret && addr[0]) {
3489 addr_end = addr + strlen(addr) - 1; 3493 addr_end = addr + strlen(addr) - 1;
@@ -3505,6 +3509,7 @@ static int srp_parse_options(struct net *net, const char *buf,
3505 char *p; 3509 char *p;
3506 substring_t args[MAX_OPT_ARGS]; 3510 substring_t args[MAX_OPT_ARGS];
3507 unsigned long long ull; 3511 unsigned long long ull;
3512 bool has_port;
3508 int opt_mask = 0; 3513 int opt_mask = 0;
3509 int token; 3514 int token;
3510 int ret = -EINVAL; 3515 int ret = -EINVAL;
@@ -3603,7 +3608,8 @@ static int srp_parse_options(struct net *net, const char *buf,
3603 ret = -ENOMEM; 3608 ret = -ENOMEM;
3604 goto out; 3609 goto out;
3605 } 3610 }
3606 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p); 3611 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3612 NULL);
3607 if (ret < 0) { 3613 if (ret < 0) {
3608 pr_warn("bad source parameter '%s'\n", p); 3614 pr_warn("bad source parameter '%s'\n", p);
3609 kfree(p); 3615 kfree(p);
@@ -3619,7 +3625,10 @@ static int srp_parse_options(struct net *net, const char *buf,
3619 ret = -ENOMEM; 3625 ret = -ENOMEM;
3620 goto out; 3626 goto out;
3621 } 3627 }
3622 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p); 3628 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3629 &has_port);
3630 if (!has_port)
3631 ret = -EINVAL;
3623 if (ret < 0) { 3632 if (ret < 0) {
3624 pr_warn("bad dest parameter '%s'\n", p); 3633 pr_warn("bad dest parameter '%s'\n", p);
3625 kfree(p); 3634 kfree(p);