diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-10-01 16:38:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-10-01 16:38:52 -0400 |
commit | 46c8217c4a54c17dd4c000ad804fa1e223a10578 (patch) | |
tree | c188a31698428a416ab39382cb49d421bc666715 | |
parent | f97b870eced0ec562f953d32eda03906c7dacad6 (diff) | |
parent | 2866196f294954ce9fa226825c8c1eaa64c7da8a (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma fixes from Doug Ledford:
- Fixes for mlx5 related issues
- Fixes for ipoib multicast handling
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
IB/ipoib: increase the max mcast backlog queue
IB/ipoib: Make sendonly multicast joins create the mcast group
IB/ipoib: Expire sendonly multicast joins
IB/mlx5: Remove pa_lkey usages
IB/mlx5: Remove support for IB_DEVICE_LOCAL_DMA_LKEY
IB/iser: Add module parameter for always register memory
xprtrdma: Replace global lkey with lkey local to PD
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 67 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 26 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 21 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/fw.c | 22 | ||||
-rw-r--r-- | include/linux/mlx5/device.h | 11 | ||||
-rw-r--r-- | include/linux/mlx5/driver.h | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/fmr_ops.c | 19 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/frwr_ops.c | 5 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/physical_ops.c | 10 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 2 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 1 |
18 files changed, 70 insertions, 167 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 41d6911e244e..f1ccd40beae9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
245 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; | 245 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; |
246 | if (MLX5_CAP_GEN(mdev, apm)) | 246 | if (MLX5_CAP_GEN(mdev, apm)) |
247 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; | 247 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; |
248 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; | ||
249 | if (MLX5_CAP_GEN(mdev, xrc)) | 248 | if (MLX5_CAP_GEN(mdev, xrc)) |
250 | props->device_cap_flags |= IB_DEVICE_XRC; | 249 | props->device_cap_flags |= IB_DEVICE_XRC; |
251 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; | 250 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; |
@@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm | |||
795 | return 0; | 794 | return 0; |
796 | } | 795 | } |
797 | 796 | ||
798 | static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) | ||
799 | { | ||
800 | struct mlx5_create_mkey_mbox_in *in; | ||
801 | struct mlx5_mkey_seg *seg; | ||
802 | struct mlx5_core_mr mr; | ||
803 | int err; | ||
804 | |||
805 | in = kzalloc(sizeof(*in), GFP_KERNEL); | ||
806 | if (!in) | ||
807 | return -ENOMEM; | ||
808 | |||
809 | seg = &in->seg; | ||
810 | seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; | ||
811 | seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); | ||
812 | seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); | ||
813 | seg->start_addr = 0; | ||
814 | |||
815 | err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), | ||
816 | NULL, NULL, NULL); | ||
817 | if (err) { | ||
818 | mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); | ||
819 | goto err_in; | ||
820 | } | ||
821 | |||
822 | kfree(in); | ||
823 | *key = mr.key; | ||
824 | |||
825 | return 0; | ||
826 | |||
827 | err_in: | ||
828 | kfree(in); | ||
829 | |||
830 | return err; | ||
831 | } | ||
832 | |||
833 | static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) | ||
834 | { | ||
835 | struct mlx5_core_mr mr; | ||
836 | int err; | ||
837 | |||
838 | memset(&mr, 0, sizeof(mr)); | ||
839 | mr.key = key; | ||
840 | err = mlx5_core_destroy_mkey(dev->mdev, &mr); | ||
841 | if (err) | ||
842 | mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); | ||
843 | } | ||
844 | |||
845 | static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, | 797 | static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, |
846 | struct ib_ucontext *context, | 798 | struct ib_ucontext *context, |
847 | struct ib_udata *udata) | 799 | struct ib_udata *udata) |
@@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, | |||
867 | kfree(pd); | 819 | kfree(pd); |
868 | return ERR_PTR(-EFAULT); | 820 | return ERR_PTR(-EFAULT); |
869 | } | 821 | } |
870 | } else { | ||
871 | err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); | ||
872 | if (err) { | ||
873 | mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); | ||
874 | kfree(pd); | ||
875 | return ERR_PTR(err); | ||
876 | } | ||
877 | } | 822 | } |
878 | 823 | ||
879 | return &pd->ibpd; | 824 | return &pd->ibpd; |
@@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd) | |||
884 | struct mlx5_ib_dev *mdev = to_mdev(pd->device); | 829 | struct mlx5_ib_dev *mdev = to_mdev(pd->device); |
885 | struct mlx5_ib_pd *mpd = to_mpd(pd); | 830 | struct mlx5_ib_pd *mpd = to_mpd(pd); |
886 | 831 | ||
887 | if (!pd->uobject) | ||
888 | free_pa_mkey(mdev, mpd->pa_lkey); | ||
889 | |||
890 | mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); | 832 | mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); |
891 | kfree(mpd); | 833 | kfree(mpd); |
892 | 834 | ||
@@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) | |||
1245 | struct ib_srq_init_attr attr; | 1187 | struct ib_srq_init_attr attr; |
1246 | struct mlx5_ib_dev *dev; | 1188 | struct mlx5_ib_dev *dev; |
1247 | struct ib_cq_init_attr cq_attr = {.cqe = 1}; | 1189 | struct ib_cq_init_attr cq_attr = {.cqe = 1}; |
1248 | u32 rsvd_lkey; | ||
1249 | int ret = 0; | 1190 | int ret = 0; |
1250 | 1191 | ||
1251 | dev = container_of(devr, struct mlx5_ib_dev, devr); | 1192 | dev = container_of(devr, struct mlx5_ib_dev, devr); |
1252 | 1193 | ||
1253 | ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey); | ||
1254 | if (ret) { | ||
1255 | pr_err("Failed to query special context %d\n", ret); | ||
1256 | return ret; | ||
1257 | } | ||
1258 | dev->ib_dev.local_dma_lkey = rsvd_lkey; | ||
1259 | |||
1260 | devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); | 1194 | devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); |
1261 | if (IS_ERR(devr->p0)) { | 1195 | if (IS_ERR(devr->p0)) { |
1262 | ret = PTR_ERR(devr->p0); | 1196 | ret = PTR_ERR(devr->p0); |
@@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
1418 | strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); | 1352 | strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); |
1419 | dev->ib_dev.owner = THIS_MODULE; | 1353 | dev->ib_dev.owner = THIS_MODULE; |
1420 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; | 1354 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; |
1355 | dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; | ||
1421 | dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); | 1356 | dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); |
1422 | dev->ib_dev.phys_port_cnt = dev->num_ports; | 1357 | dev->ib_dev.phys_port_cnt = dev->num_ports; |
1423 | dev->ib_dev.num_comp_vectors = | 1358 | dev->ib_dev.num_comp_vectors = |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index bb8cda79e881..22123b79d550 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte | |||
103 | struct mlx5_ib_pd { | 103 | struct mlx5_ib_pd { |
104 | struct ib_pd ibpd; | 104 | struct ib_pd ibpd; |
105 | u32 pdn; | 105 | u32 pdn; |
106 | u32 pa_lkey; | ||
107 | }; | 106 | }; |
108 | 107 | ||
109 | /* Use macros here so that don't have to duplicate | 108 | /* Use macros here so that don't have to duplicate |
@@ -213,7 +212,6 @@ struct mlx5_ib_qp { | |||
213 | int uuarn; | 212 | int uuarn; |
214 | 213 | ||
215 | int create_type; | 214 | int create_type; |
216 | u32 pa_lkey; | ||
217 | 215 | ||
218 | /* Store signature errors */ | 216 | /* Store signature errors */ |
219 | bool signature_en; | 217 | bool signature_en; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index c745c6c5e10d..6f521a3418e8 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
925 | err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); | 925 | err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); |
926 | if (err) | 926 | if (err) |
927 | mlx5_ib_dbg(dev, "err %d\n", err); | 927 | mlx5_ib_dbg(dev, "err %d\n", err); |
928 | else | ||
929 | qp->pa_lkey = to_mpd(pd)->pa_lkey; | ||
930 | } | 928 | } |
931 | 929 | ||
932 | if (err) | 930 | if (err) |
@@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, | |||
2045 | mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); | 2043 | mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); |
2046 | dseg->addr = cpu_to_be64(mfrpl->map); | 2044 | dseg->addr = cpu_to_be64(mfrpl->map); |
2047 | dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); | 2045 | dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); |
2048 | dseg->lkey = cpu_to_be32(pd->pa_lkey); | 2046 | dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); |
2049 | } | 2047 | } |
2050 | 2048 | ||
2051 | static __be32 send_ieth(struct ib_send_wr *wr) | 2049 | static __be32 send_ieth(struct ib_send_wr *wr) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ca2873698d75..4cd5428a2399 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -80,7 +80,7 @@ enum { | |||
80 | IPOIB_NUM_WC = 4, | 80 | IPOIB_NUM_WC = 4, |
81 | 81 | ||
82 | IPOIB_MAX_PATH_REC_QUEUE = 3, | 82 | IPOIB_MAX_PATH_REC_QUEUE = 3, |
83 | IPOIB_MAX_MCAST_QUEUE = 3, | 83 | IPOIB_MAX_MCAST_QUEUE = 64, |
84 | 84 | ||
85 | IPOIB_FLAG_OPER_UP = 0, | 85 | IPOIB_FLAG_OPER_UP = 0, |
86 | IPOIB_FLAG_INITIALIZED = 1, | 86 | IPOIB_FLAG_INITIALIZED = 1, |
@@ -548,6 +548,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter, | |||
548 | 548 | ||
549 | int ipoib_mcast_attach(struct net_device *dev, u16 mlid, | 549 | int ipoib_mcast_attach(struct net_device *dev, u16 mlid, |
550 | union ib_gid *mgid, int set_qkey); | 550 | union ib_gid *mgid, int set_qkey); |
551 | int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast); | ||
552 | struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid); | ||
551 | 553 | ||
552 | int ipoib_init_qp(struct net_device *dev); | 554 | int ipoib_init_qp(struct net_device *dev); |
553 | int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); | 555 | int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 36536ce5a3e2..f74316e679d2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) | |||
1149 | unsigned long dt; | 1149 | unsigned long dt; |
1150 | unsigned long flags; | 1150 | unsigned long flags; |
1151 | int i; | 1151 | int i; |
1152 | LIST_HEAD(remove_list); | ||
1153 | struct ipoib_mcast *mcast, *tmcast; | ||
1154 | struct net_device *dev = priv->dev; | ||
1152 | 1155 | ||
1153 | if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) | 1156 | if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) |
1154 | return; | 1157 | return; |
@@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) | |||
1176 | lockdep_is_held(&priv->lock))) != NULL) { | 1179 | lockdep_is_held(&priv->lock))) != NULL) { |
1177 | /* was the neigh idle for two GC periods */ | 1180 | /* was the neigh idle for two GC periods */ |
1178 | if (time_after(neigh_obsolete, neigh->alive)) { | 1181 | if (time_after(neigh_obsolete, neigh->alive)) { |
1182 | u8 *mgid = neigh->daddr + 4; | ||
1183 | |||
1184 | /* Is this multicast ? */ | ||
1185 | if (*mgid == 0xff) { | ||
1186 | mcast = __ipoib_mcast_find(dev, mgid); | ||
1187 | |||
1188 | if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { | ||
1189 | list_del(&mcast->list); | ||
1190 | rb_erase(&mcast->rb_node, &priv->multicast_tree); | ||
1191 | list_add_tail(&mcast->list, &remove_list); | ||
1192 | } | ||
1193 | } | ||
1194 | |||
1179 | rcu_assign_pointer(*np, | 1195 | rcu_assign_pointer(*np, |
1180 | rcu_dereference_protected(neigh->hnext, | 1196 | rcu_dereference_protected(neigh->hnext, |
1181 | lockdep_is_held(&priv->lock))); | 1197 | lockdep_is_held(&priv->lock))); |
@@ -1191,6 +1207,8 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) | |||
1191 | 1207 | ||
1192 | out_unlock: | 1208 | out_unlock: |
1193 | spin_unlock_irqrestore(&priv->lock, flags); | 1209 | spin_unlock_irqrestore(&priv->lock, flags); |
1210 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) | ||
1211 | ipoib_mcast_leave(dev, mcast); | ||
1194 | } | 1212 | } |
1195 | 1213 | ||
1196 | static void ipoib_reap_neigh(struct work_struct *work) | 1214 | static void ipoib_reap_neigh(struct work_struct *work) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 09a1748f9d13..136cbefe00f8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, | |||
153 | return mcast; | 153 | return mcast; |
154 | } | 154 | } |
155 | 155 | ||
156 | static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) | 156 | struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) |
157 | { | 157 | { |
158 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 158 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
159 | struct rb_node *n = priv->multicast_tree.rb_node; | 159 | struct rb_node *n = priv->multicast_tree.rb_node; |
@@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) | |||
508 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; | 508 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; |
509 | 509 | ||
510 | /* | 510 | /* |
511 | * Historically Linux IPoIB has never properly supported SEND | 511 | * Send-only IB Multicast joins do not work at the core |
512 | * ONLY join. It emulated it by not providing all the required | 512 | * IB layer yet, so we can't use them here. However, |
513 | * attributes, which is enough to prevent group creation and | 513 | * we are emulating an Ethernet multicast send, which |
514 | * detect if there are full members or not. A major problem | 514 | * does not require a multicast subscription and will |
515 | * with supporting SEND ONLY is detecting when the group is | 515 | * still send properly. The most appropriate thing to |
516 | * auto-destroyed as IPoIB will cache the MLID.. | 516 | * do is to create the group if it doesn't exist as that |
517 | * most closely emulates the behavior, from a user space | ||
518 | * application perspecitive, of Ethernet multicast | ||
519 | * operation. For now, we do a full join, maybe later | ||
520 | * when the core IB layers support send only joins we | ||
521 | * will use them. | ||
517 | */ | 522 | */ |
518 | #if 1 | 523 | #if 0 |
519 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) | ||
520 | comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; | ||
521 | #else | ||
522 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) | 524 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) |
523 | rec.join_state = 4; | 525 | rec.join_state = 4; |
524 | #endif | 526 | #endif |
@@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev) | |||
675 | return 0; | 677 | return 0; |
676 | } | 678 | } |
677 | 679 | ||
678 | static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) | 680 | int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) |
679 | { | 681 | { |
680 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 682 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
681 | int ret = 0; | 683 | int ret = 0; |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 1ace5d83a4d7..f58ff96b6cbb 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS; | |||
97 | module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); | 97 | module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); |
98 | MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); | 98 | MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); |
99 | 99 | ||
100 | bool iser_always_reg = true; | ||
101 | module_param_named(always_register, iser_always_reg, bool, S_IRUGO); | ||
102 | MODULE_PARM_DESC(always_register, | ||
103 | "Always register memory, even for continuous memory regions (default:true)"); | ||
104 | |||
100 | bool iser_pi_enable = false; | 105 | bool iser_pi_enable = false; |
101 | module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); | 106 | module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); |
102 | MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); | 107 | MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 86f6583485ef..a5edd6ede692 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -611,6 +611,7 @@ extern int iser_debug_level; | |||
611 | extern bool iser_pi_enable; | 611 | extern bool iser_pi_enable; |
612 | extern int iser_pi_guard; | 612 | extern int iser_pi_guard; |
613 | extern unsigned int iser_max_sectors; | 613 | extern unsigned int iser_max_sectors; |
614 | extern bool iser_always_reg; | ||
614 | 615 | ||
615 | int iser_assign_reg_ops(struct iser_device *device); | 616 | int iser_assign_reg_ops(struct iser_device *device); |
616 | 617 | ||
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 2493cc748db8..4c46d67d37a1 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -803,11 +803,12 @@ static int | |||
803 | iser_reg_prot_sg(struct iscsi_iser_task *task, | 803 | iser_reg_prot_sg(struct iscsi_iser_task *task, |
804 | struct iser_data_buf *mem, | 804 | struct iser_data_buf *mem, |
805 | struct iser_fr_desc *desc, | 805 | struct iser_fr_desc *desc, |
806 | bool use_dma_key, | ||
806 | struct iser_mem_reg *reg) | 807 | struct iser_mem_reg *reg) |
807 | { | 808 | { |
808 | struct iser_device *device = task->iser_conn->ib_conn.device; | 809 | struct iser_device *device = task->iser_conn->ib_conn.device; |
809 | 810 | ||
810 | if (mem->dma_nents == 1) | 811 | if (use_dma_key) |
811 | return iser_reg_dma(device, mem, reg); | 812 | return iser_reg_dma(device, mem, reg); |
812 | 813 | ||
813 | return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); | 814 | return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); |
@@ -817,11 +818,12 @@ static int | |||
817 | iser_reg_data_sg(struct iscsi_iser_task *task, | 818 | iser_reg_data_sg(struct iscsi_iser_task *task, |
818 | struct iser_data_buf *mem, | 819 | struct iser_data_buf *mem, |
819 | struct iser_fr_desc *desc, | 820 | struct iser_fr_desc *desc, |
821 | bool use_dma_key, | ||
820 | struct iser_mem_reg *reg) | 822 | struct iser_mem_reg *reg) |
821 | { | 823 | { |
822 | struct iser_device *device = task->iser_conn->ib_conn.device; | 824 | struct iser_device *device = task->iser_conn->ib_conn.device; |
823 | 825 | ||
824 | if (mem->dma_nents == 1) | 826 | if (use_dma_key) |
825 | return iser_reg_dma(device, mem, reg); | 827 | return iser_reg_dma(device, mem, reg); |
826 | 828 | ||
827 | return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); | 829 | return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); |
@@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, | |||
836 | struct iser_mem_reg *reg = &task->rdma_reg[dir]; | 838 | struct iser_mem_reg *reg = &task->rdma_reg[dir]; |
837 | struct iser_mem_reg *data_reg; | 839 | struct iser_mem_reg *data_reg; |
838 | struct iser_fr_desc *desc = NULL; | 840 | struct iser_fr_desc *desc = NULL; |
841 | bool use_dma_key; | ||
839 | int err; | 842 | int err; |
840 | 843 | ||
841 | err = iser_handle_unaligned_buf(task, mem, dir); | 844 | err = iser_handle_unaligned_buf(task, mem, dir); |
842 | if (unlikely(err)) | 845 | if (unlikely(err)) |
843 | return err; | 846 | return err; |
844 | 847 | ||
845 | if (mem->dma_nents != 1 || | 848 | use_dma_key = (mem->dma_nents == 1 && !iser_always_reg && |
846 | scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { | 849 | scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL); |
850 | |||
851 | if (!use_dma_key) { | ||
847 | desc = device->reg_ops->reg_desc_get(ib_conn); | 852 | desc = device->reg_ops->reg_desc_get(ib_conn); |
848 | reg->mem_h = desc; | 853 | reg->mem_h = desc; |
849 | } | 854 | } |
@@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, | |||
853 | else | 858 | else |
854 | data_reg = &task->desc.data_reg; | 859 | data_reg = &task->desc.data_reg; |
855 | 860 | ||
856 | err = iser_reg_data_sg(task, mem, desc, data_reg); | 861 | err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); |
857 | if (unlikely(err)) | 862 | if (unlikely(err)) |
858 | goto err_reg; | 863 | goto err_reg; |
859 | 864 | ||
@@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, | |||
866 | if (unlikely(err)) | 871 | if (unlikely(err)) |
867 | goto err_reg; | 872 | goto err_reg; |
868 | 873 | ||
869 | err = iser_reg_prot_sg(task, mem, desc, prot_reg); | 874 | err = iser_reg_prot_sg(task, mem, desc, |
875 | use_dma_key, prot_reg); | ||
870 | if (unlikely(err)) | 876 | if (unlikely(err)) |
871 | goto err_reg; | 877 | goto err_reg; |
872 | } | 878 | } |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ae70cc1463ac..85132d867bc8 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
133 | (unsigned long)comp); | 133 | (unsigned long)comp); |
134 | } | 134 | } |
135 | 135 | ||
136 | device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | | 136 | if (!iser_always_reg) { |
137 | IB_ACCESS_REMOTE_WRITE | | 137 | int access = IB_ACCESS_LOCAL_WRITE | |
138 | IB_ACCESS_REMOTE_READ); | 138 | IB_ACCESS_REMOTE_WRITE | |
139 | if (IS_ERR(device->mr)) | 139 | IB_ACCESS_REMOTE_READ; |
140 | goto dma_mr_err; | 140 | |
141 | device->mr = ib_get_dma_mr(device->pd, access); | ||
142 | if (IS_ERR(device->mr)) | ||
143 | goto dma_mr_err; | ||
144 | } | ||
141 | 145 | ||
142 | INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, | 146 | INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, |
143 | iser_event_handler); | 147 | iser_event_handler); |
@@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
147 | return 0; | 151 | return 0; |
148 | 152 | ||
149 | handler_err: | 153 | handler_err: |
150 | ib_dereg_mr(device->mr); | 154 | if (device->mr) |
155 | ib_dereg_mr(device->mr); | ||
151 | dma_mr_err: | 156 | dma_mr_err: |
152 | for (i = 0; i < device->comps_used; i++) | 157 | for (i = 0; i < device->comps_used; i++) |
153 | tasklet_kill(&device->comps[i].tasklet); | 158 | tasklet_kill(&device->comps[i].tasklet); |
@@ -173,7 +178,6 @@ comps_err: | |||
173 | static void iser_free_device_ib_res(struct iser_device *device) | 178 | static void iser_free_device_ib_res(struct iser_device *device) |
174 | { | 179 | { |
175 | int i; | 180 | int i; |
176 | BUG_ON(device->mr == NULL); | ||
177 | 181 | ||
178 | for (i = 0; i < device->comps_used; i++) { | 182 | for (i = 0; i < device->comps_used; i++) { |
179 | struct iser_comp *comp = &device->comps[i]; | 183 | struct iser_comp *comp = &device->comps[i]; |
@@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device) | |||
184 | } | 188 | } |
185 | 189 | ||
186 | (void)ib_unregister_event_handler(&device->event_handler); | 190 | (void)ib_unregister_event_handler(&device->event_handler); |
187 | (void)ib_dereg_mr(device->mr); | 191 | if (device->mr) |
192 | (void)ib_dereg_mr(device->mr); | ||
188 | ib_dealloc_pd(device->pd); | 193 | ib_dealloc_pd(device->pd); |
189 | 194 | ||
190 | kfree(device->comps); | 195 | kfree(device->comps); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index aa0d5ffe92d8..9335e5ae18cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
@@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) | |||
200 | 200 | ||
201 | return err; | 201 | return err; |
202 | } | 202 | } |
203 | |||
204 | int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey) | ||
205 | { | ||
206 | struct mlx5_cmd_query_special_contexts_mbox_in in; | ||
207 | struct mlx5_cmd_query_special_contexts_mbox_out out; | ||
208 | int err; | ||
209 | |||
210 | memset(&in, 0, sizeof(in)); | ||
211 | memset(&out, 0, sizeof(out)); | ||
212 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); | ||
213 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
214 | if (err) | ||
215 | return err; | ||
216 | |||
217 | if (out.hdr.status) | ||
218 | err = mlx5_cmd_status_to_err(&out.hdr); | ||
219 | |||
220 | *rsvd_lkey = be32_to_cpu(out.resd_lkey); | ||
221 | |||
222 | return err; | ||
223 | } | ||
224 | EXPORT_SYMBOL(mlx5_core_query_special_context); | ||
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8eb3b19af2a4..250b1ff8b48d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out { | |||
402 | u8 rsvd[8]; | 402 | u8 rsvd[8]; |
403 | }; | 403 | }; |
404 | 404 | ||
405 | struct mlx5_cmd_query_special_contexts_mbox_in { | ||
406 | struct mlx5_inbox_hdr hdr; | ||
407 | u8 rsvd[8]; | ||
408 | }; | ||
409 | |||
410 | struct mlx5_cmd_query_special_contexts_mbox_out { | ||
411 | struct mlx5_outbox_hdr hdr; | ||
412 | __be32 dump_fill_mkey; | ||
413 | __be32 resd_lkey; | ||
414 | }; | ||
415 | |||
416 | struct mlx5_cmd_layout { | 405 | struct mlx5_cmd_layout { |
417 | u8 type; | 406 | u8 type; |
418 | u8 rsvd0[3]; | 407 | u8 rsvd0[3]; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 27b53f9a24ad..8b6d6f2154a4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); | |||
845 | int mlx5_register_interface(struct mlx5_interface *intf); | 845 | int mlx5_register_interface(struct mlx5_interface *intf); |
846 | void mlx5_unregister_interface(struct mlx5_interface *intf); | 846 | void mlx5_unregister_interface(struct mlx5_interface *intf); |
847 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); | 847 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); |
848 | int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); | ||
849 | 848 | ||
850 | struct mlx5_profile { | 849 | struct mlx5_profile { |
851 | u64 mask; | 850 | u64 mask; |
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index cb25c89da623..f1e8dafbd507 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c | |||
@@ -39,25 +39,6 @@ static int | |||
39 | fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | 39 | fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
40 | struct rpcrdma_create_data_internal *cdata) | 40 | struct rpcrdma_create_data_internal *cdata) |
41 | { | 41 | { |
42 | struct ib_device_attr *devattr = &ia->ri_devattr; | ||
43 | struct ib_mr *mr; | ||
44 | |||
45 | /* Obtain an lkey to use for the regbufs, which are | ||
46 | * protected from remote access. | ||
47 | */ | ||
48 | if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { | ||
49 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
50 | } else { | ||
51 | mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE); | ||
52 | if (IS_ERR(mr)) { | ||
53 | pr_err("%s: ib_get_dma_mr for failed with %lX\n", | ||
54 | __func__, PTR_ERR(mr)); | ||
55 | return -ENOMEM; | ||
56 | } | ||
57 | ia->ri_dma_lkey = ia->ri_dma_mr->lkey; | ||
58 | ia->ri_dma_mr = mr; | ||
59 | } | ||
60 | |||
61 | return 0; | 42 | return 0; |
62 | } | 43 | } |
63 | 44 | ||
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index d6653f5d0830..5318951b3b53 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -189,11 +189,6 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |||
189 | struct ib_device_attr *devattr = &ia->ri_devattr; | 189 | struct ib_device_attr *devattr = &ia->ri_devattr; |
190 | int depth, delta; | 190 | int depth, delta; |
191 | 191 | ||
192 | /* Obtain an lkey to use for the regbufs, which are | ||
193 | * protected from remote access. | ||
194 | */ | ||
195 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
196 | |||
197 | ia->ri_max_frmr_depth = | 192 | ia->ri_max_frmr_depth = |
198 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | 193 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
199 | devattr->max_fast_reg_page_list_len); | 194 | devattr->max_fast_reg_page_list_len); |
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c index 72cf8b15bbb4..617b76f22154 100644 --- a/net/sunrpc/xprtrdma/physical_ops.c +++ b/net/sunrpc/xprtrdma/physical_ops.c | |||
@@ -23,7 +23,6 @@ static int | |||
23 | physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | 23 | physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
24 | struct rpcrdma_create_data_internal *cdata) | 24 | struct rpcrdma_create_data_internal *cdata) |
25 | { | 25 | { |
26 | struct ib_device_attr *devattr = &ia->ri_devattr; | ||
27 | struct ib_mr *mr; | 26 | struct ib_mr *mr; |
28 | 27 | ||
29 | /* Obtain an rkey to use for RPC data payloads. | 28 | /* Obtain an rkey to use for RPC data payloads. |
@@ -37,15 +36,8 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |||
37 | __func__, PTR_ERR(mr)); | 36 | __func__, PTR_ERR(mr)); |
38 | return -ENOMEM; | 37 | return -ENOMEM; |
39 | } | 38 | } |
40 | ia->ri_dma_mr = mr; | ||
41 | |||
42 | /* Obtain an lkey to use for regbufs. | ||
43 | */ | ||
44 | if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) | ||
45 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
46 | else | ||
47 | ia->ri_dma_lkey = ia->ri_dma_mr->lkey; | ||
48 | 39 | ||
40 | ia->ri_dma_mr = mr; | ||
49 | return 0; | 41 | return 0; |
50 | } | 42 | } |
51 | 43 | ||
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 682996779970..eb081ad05e33 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -1252,7 +1252,7 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) | |||
1252 | goto out_free; | 1252 | goto out_free; |
1253 | 1253 | ||
1254 | iov->length = size; | 1254 | iov->length = size; |
1255 | iov->lkey = ia->ri_dma_lkey; | 1255 | iov->lkey = ia->ri_pd->local_dma_lkey; |
1256 | rb->rg_size = size; | 1256 | rb->rg_size = size; |
1257 | rb->rg_owner = NULL; | 1257 | rb->rg_owner = NULL; |
1258 | return rb; | 1258 | return rb; |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 02512221b8bc..c09414e6f91b 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -65,7 +65,6 @@ struct rpcrdma_ia { | |||
65 | struct rdma_cm_id *ri_id; | 65 | struct rdma_cm_id *ri_id; |
66 | struct ib_pd *ri_pd; | 66 | struct ib_pd *ri_pd; |
67 | struct ib_mr *ri_dma_mr; | 67 | struct ib_mr *ri_dma_mr; |
68 | u32 ri_dma_lkey; | ||
69 | struct completion ri_done; | 68 | struct completion ri_done; |
70 | int ri_async_rc; | 69 | int ri_async_rc; |
71 | unsigned int ri_max_frmr_depth; | 70 | unsigned int ri_max_frmr_depth; |