aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-08 19:17:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-08 19:17:31 -0500
commit44596f86826df00ab3ab1086b25f3cdcc11156a1 (patch)
tree0016781a7197eb55f649eb6326f76b111f1885a6
parentd32da5841b0f8b73dddb35c59c447d9749a47f6c (diff)
parenta1ffa4670cb97ae3a4b3e8535d88be5f643f7c3b (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Doug Ledford: - One line fix to mlx4 error flow (same as mlx5 fix in last pull request, just in the mlx4 driver) - Fix a race condition in the IPoIB driver. This patch is larger than just a one line fix, but resolves a race condition in a fairly straight forward manner - Fix a locking issue in the RDMA netlink code. This patch is also larger than I would like for a late -rc. It has, however, had a week to bake in the rdma tree prior to this pull request - One line fix to fix granting remote machine access to memory that they don't need and shouldn't have - One line fix to correct the fact that our sgid/dgid pair is swapped from what you would expect when receiving an incoming connection request * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: IB/srpt: Fix ACL lookup during login IB/srpt: Disable RDMA access by the initiator RDMA/netlink: Fix locking around __ib_get_device_by_index IB/ipoib: Fix race condition in neigh creation IB/mlx4: Fix mlx4_ib_alloc_mr error flow
-rw-r--r--drivers/infiniband/core/core_priv.h2
-rw-r--r--drivers/infiniband/core/device.c18
-rw-r--r--drivers/infiniband/core/nldev.c54
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c5
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c5
7 files changed, 79 insertions, 32 deletions
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index a1d687a664f8..66f0268f37a6 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
314} 314}
315#endif 315#endif
316 316
317struct ib_device *__ib_device_get_by_index(u32 ifindex); 317struct ib_device *ib_device_get_by_index(u32 ifindex);
318/* RDMA device netlink */ 318/* RDMA device netlink */
319void nldev_init(void); 319void nldev_init(void);
320void nldev_exit(void); 320void nldev_exit(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 30914f3baa5f..465520627e4b 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
134 return 0; 134 return 0;
135} 135}
136 136
137struct ib_device *__ib_device_get_by_index(u32 index) 137static struct ib_device *__ib_device_get_by_index(u32 index)
138{ 138{
139 struct ib_device *device; 139 struct ib_device *device;
140 140
@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
145 return NULL; 145 return NULL;
146} 146}
147 147
148/*
149 * Caller is responsible to return refrerence count by calling put_device()
150 */
151struct ib_device *ib_device_get_by_index(u32 index)
152{
153 struct ib_device *device;
154
155 down_read(&lists_rwsem);
156 device = __ib_device_get_by_index(index);
157 if (device)
158 get_device(&device->dev);
159
160 up_read(&lists_rwsem);
161 return device;
162}
163
148static struct ib_device *__ib_device_get_by_name(const char *name) 164static struct ib_device *__ib_device_get_by_name(const char *name)
149{ 165{
150 struct ib_device *device; 166 struct ib_device *device;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 9a05245a1acf..0dcd1aa6f683 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
142 142
143 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 143 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
144 144
145 device = __ib_device_get_by_index(index); 145 device = ib_device_get_by_index(index);
146 if (!device) 146 if (!device)
147 return -EINVAL; 147 return -EINVAL;
148 148
149 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 149 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
150 if (!msg) 150 if (!msg) {
151 return -ENOMEM; 151 err = -ENOMEM;
152 goto err;
153 }
152 154
153 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 155 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
154 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 156 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
155 0, 0); 157 0, 0);
156 158
157 err = fill_dev_info(msg, device); 159 err = fill_dev_info(msg, device);
158 if (err) { 160 if (err)
159 nlmsg_free(msg); 161 goto err_free;
160 return err;
161 }
162 162
163 nlmsg_end(msg, nlh); 163 nlmsg_end(msg, nlh);
164 164
165 put_device(&device->dev);
165 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 166 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
167
168err_free:
169 nlmsg_free(msg);
170err:
171 put_device(&device->dev);
172 return err;
166} 173}
167 174
168static int _nldev_get_dumpit(struct ib_device *device, 175static int _nldev_get_dumpit(struct ib_device *device,
@@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
220 return -EINVAL; 227 return -EINVAL;
221 228
222 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 229 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
223 device = __ib_device_get_by_index(index); 230 device = ib_device_get_by_index(index);
224 if (!device) 231 if (!device)
225 return -EINVAL; 232 return -EINVAL;
226 233
227 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 234 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
228 if (!rdma_is_port_valid(device, port)) 235 if (!rdma_is_port_valid(device, port)) {
229 return -EINVAL; 236 err = -EINVAL;
237 goto err;
238 }
230 239
231 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 240 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
232 if (!msg) 241 if (!msg) {
233 return -ENOMEM; 242 err = -ENOMEM;
243 goto err;
244 }
234 245
235 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 246 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
236 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 247 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
237 0, 0); 248 0, 0);
238 249
239 err = fill_port_info(msg, device, port); 250 err = fill_port_info(msg, device, port);
240 if (err) { 251 if (err)
241 nlmsg_free(msg); 252 goto err_free;
242 return err;
243 }
244 253
245 nlmsg_end(msg, nlh); 254 nlmsg_end(msg, nlh);
255 put_device(&device->dev);
246 256
247 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 257 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
258
259err_free:
260 nlmsg_free(msg);
261err:
262 put_device(&device->dev);
263 return err;
248} 264}
249 265
250static int nldev_port_get_dumpit(struct sk_buff *skb, 266static int nldev_port_get_dumpit(struct sk_buff *skb,
@@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
265 return -EINVAL; 281 return -EINVAL;
266 282
267 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 283 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
268 device = __ib_device_get_by_index(ifindex); 284 device = ib_device_get_by_index(ifindex);
269 if (!device) 285 if (!device)
270 return -EINVAL; 286 return -EINVAL;
271 287
@@ -299,7 +315,9 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
299 nlmsg_end(skb, nlh); 315 nlmsg_end(skb, nlh);
300 } 316 }
301 317
302out: cb->args[0] = idx; 318out:
319 put_device(&device->dev);
320 cb->args[0] = idx;
303 return skb->len; 321 return skb->len;
304} 322}
305 323
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 313bfb9ccb71..4975f3e6596e 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
642 goto err_free_mr; 642 goto err_free_mr;
643 643
644 mr->max_pages = max_num_sg; 644 mr->max_pages = max_num_sg;
645
646 err = mlx4_mr_enable(dev->dev, &mr->mmr); 645 err = mlx4_mr_enable(dev->dev, &mr->mmr);
647 if (err) 646 if (err)
648 goto err_free_pl; 647 goto err_free_pl;
@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
653 return &mr->ibmr; 652 return &mr->ibmr;
654 653
655err_free_pl: 654err_free_pl:
655 mr->ibmr.device = pd->device;
656 mlx4_free_priv_pages(mr); 656 mlx4_free_priv_pages(mr);
657err_free_mr: 657err_free_mr:
658 (void) mlx4_mr_free(dev->dev, &mr->mmr); 658 (void) mlx4_mr_free(dev->dev, &mr->mmr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 12b7f911f0e5..8880351df179 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -902,8 +902,8 @@ static int path_rec_start(struct net_device *dev,
902 return 0; 902 return 0;
903} 903}
904 904
905static void neigh_add_path(struct sk_buff *skb, u8 *daddr, 905static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
906 struct net_device *dev) 906 struct net_device *dev)
907{ 907{
908 struct ipoib_dev_priv *priv = ipoib_priv(dev); 908 struct ipoib_dev_priv *priv = ipoib_priv(dev);
909 struct rdma_netdev *rn = netdev_priv(dev); 909 struct rdma_netdev *rn = netdev_priv(dev);
@@ -917,7 +917,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
917 spin_unlock_irqrestore(&priv->lock, flags); 917 spin_unlock_irqrestore(&priv->lock, flags);
918 ++dev->stats.tx_dropped; 918 ++dev->stats.tx_dropped;
919 dev_kfree_skb_any(skb); 919 dev_kfree_skb_any(skb);
920 return; 920 return NULL;
921 }
922
923 /* To avoid race condition, make sure that the
924 * neigh will be added only once.
925 */
926 if (unlikely(!list_empty(&neigh->list))) {
927 spin_unlock_irqrestore(&priv->lock, flags);
928 return neigh;
921 } 929 }
922 930
923 path = __path_find(dev, daddr + 4); 931 path = __path_find(dev, daddr + 4);
@@ -956,7 +964,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
956 path->ah->last_send = rn->send(dev, skb, path->ah->ah, 964 path->ah->last_send = rn->send(dev, skb, path->ah->ah,
957 IPOIB_QPN(daddr)); 965 IPOIB_QPN(daddr));
958 ipoib_neigh_put(neigh); 966 ipoib_neigh_put(neigh);
959 return; 967 return NULL;
960 } 968 }
961 } else { 969 } else {
962 neigh->ah = NULL; 970 neigh->ah = NULL;
@@ -973,7 +981,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
973 981
974 spin_unlock_irqrestore(&priv->lock, flags); 982 spin_unlock_irqrestore(&priv->lock, flags);
975 ipoib_neigh_put(neigh); 983 ipoib_neigh_put(neigh);
976 return; 984 return NULL;
977 985
978err_path: 986err_path:
979 ipoib_neigh_free(neigh); 987 ipoib_neigh_free(neigh);
@@ -983,6 +991,8 @@ err_drop:
983 991
984 spin_unlock_irqrestore(&priv->lock, flags); 992 spin_unlock_irqrestore(&priv->lock, flags);
985 ipoib_neigh_put(neigh); 993 ipoib_neigh_put(neigh);
994
995 return NULL;
986} 996}
987 997
988static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 998static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -1091,8 +1101,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1091 case htons(ETH_P_TIPC): 1101 case htons(ETH_P_TIPC):
1092 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1102 neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1093 if (unlikely(!neigh)) { 1103 if (unlikely(!neigh)) {
1094 neigh_add_path(skb, phdr->hwaddr, dev); 1104 neigh = neigh_add_path(skb, phdr->hwaddr, dev);
1095 return NETDEV_TX_OK; 1105 if (likely(!neigh))
1106 return NETDEV_TX_OK;
1096 } 1107 }
1097 break; 1108 break;
1098 case htons(ETH_P_ARP): 1109 case htons(ETH_P_ARP):
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 93e149efc1f5..9b3f47ae2016 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
816 spin_lock_irqsave(&priv->lock, flags); 816 spin_lock_irqsave(&priv->lock, flags);
817 if (!neigh) { 817 if (!neigh) {
818 neigh = ipoib_neigh_alloc(daddr, dev); 818 neigh = ipoib_neigh_alloc(daddr, dev);
819 if (neigh) { 819 /* Make sure that the neigh will be added only
820 * once to mcast list.
821 */
822 if (neigh && list_empty(&neigh->list)) {
820 kref_get(&mcast->ah->ref); 823 kref_get(&mcast->ah->ref);
821 neigh->ah = mcast->ah; 824 neigh->ah = mcast->ah;
822 list_add_tail(&neigh->list, &mcast->neigh_list); 825 list_add_tail(&neigh->list, &mcast->neigh_list);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 8a1bd354b1cc..bfa576aa9f03 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1013,8 +1013,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1013 return -ENOMEM; 1013 return -ENOMEM;
1014 1014
1015 attr->qp_state = IB_QPS_INIT; 1015 attr->qp_state = IB_QPS_INIT;
1016 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | 1016 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1017 IB_ACCESS_REMOTE_WRITE;
1018 attr->port_num = ch->sport->port; 1017 attr->port_num = ch->sport->port;
1019 attr->pkey_index = 0; 1018 attr->pkey_index = 0;
1020 1019
@@ -2078,7 +2077,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2078 goto destroy_ib; 2077 goto destroy_ib;
2079 } 2078 }
2080 2079
2081 guid = (__be16 *)&param->primary_path->sgid.global.interface_id; 2080 guid = (__be16 *)&param->primary_path->dgid.global.interface_id;
2082 snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x", 2081 snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
2083 be16_to_cpu(guid[0]), be16_to_cpu(guid[1]), 2082 be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
2084 be16_to_cpu(guid[2]), be16_to_cpu(guid[3])); 2083 be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));