aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-23 11:24:57 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-23 11:31:58 -0400
commit03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch)
treefbaf8738296b2e9dcba81c6daef2d515b6c4948c /drivers/infiniband/core
parent6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff)
parentf36b7534b83357cf52e747905de6d65b4f7c2512 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here... For the mac80211 stuff, these were fortunately just parallel adds. Trivially resolved. In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the function phy_disable_interrupts() earlier in the file, whilst in 'net-next' the phy_error() call from this function was removed. In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the 'rt_table_id' member of rtable collided with a bug fix in 'net' that added a new struct member "rt_mtu_locked" which needs to be copied over here. The mlxsw driver conflict consisted of net-next separating the span code and definitions into separate files, whilst a 'net' bug fix made some changes to that moved code. The mlx5 infiniband conflict resolution was quite non-trivial, the RDMA tree's merge commit was used as a guide here, and here are their notes: ==================== Due to bug fixes found by the syzkaller bot and taken into the for-rc branch after development for the 4.17 merge window had already started being taken into the for-next branch, there were fairly non-trivial merge issues that would need to be resolved between the for-rc branch and the for-next branch. This merge resolves those conflicts and provides a unified base upon which ongoing development for 4.17 can be based. Conflicts: drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524 (IB/mlx5: Fix cleanup order on unload) added to for-rc and commit b5ca15ad7e61 (IB/mlx5: Add proper representors support) add as part of the devel cycle both needed to modify the init/de-init functions used by mlx5. To support the new representors, the new functions added by the cleanup patch needed to be made non-static, and the init/de-init list added by the representors patch needed to be modified to match the init/de-init list changes made by the cleanup patch. Updates: drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function prototypes added by representors patch to reflect new function names as changed by cleanup patch drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init stage list to match new order from cleanup patch ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/addr.c15
-rw-r--r--drivers/infiniband/core/cma.c15
-rw-r--r--drivers/infiniband/core/cq.c21
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/sa_query.c7
-rw-r--r--drivers/infiniband/core/ucma.c42
6 files changed, 65 insertions, 41 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a5b4cf030c11..9183d148d644 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in,
550 dst_release(dst); 550 dst_release(dst);
551 } 551 }
552 552
553 if (ndev->flags & IFF_LOOPBACK) { 553 if (ndev) {
554 ret = rdma_translate_ip(dst_in, addr); 554 if (ndev->flags & IFF_LOOPBACK)
555 /* 555 ret = rdma_translate_ip(dst_in, addr);
556 * Put the loopback device and get the translated 556 else
557 * device instead. 557 addr->bound_dev_if = ndev->ifindex;
558 */
559 dev_put(ndev); 558 dev_put(ndev);
560 ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
561 } else {
562 addr->bound_dev_if = ndev->ifindex;
563 } 559 }
564 dev_put(ndev);
565 560
566 return ret; 561 return ret;
567} 562}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 915bbd867b61..66f203730e80 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3069,7 +3069,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3069 continue; 3069 continue;
3070 3070
3071 /* different dest port -> unique */ 3071 /* different dest port -> unique */
3072 if (!cma_any_port(cur_daddr) && 3072 if (!cma_any_port(daddr) &&
3073 !cma_any_port(cur_daddr) &&
3073 (dport != cur_dport)) 3074 (dport != cur_dport))
3074 continue; 3075 continue;
3075 3076
@@ -3080,7 +3081,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3080 continue; 3081 continue;
3081 3082
3082 /* different dst address -> unique */ 3083 /* different dst address -> unique */
3083 if (!cma_any_addr(cur_daddr) && 3084 if (!cma_any_addr(daddr) &&
3085 !cma_any_addr(cur_daddr) &&
3084 cma_addr_cmp(daddr, cur_daddr)) 3086 cma_addr_cmp(daddr, cur_daddr))
3085 continue; 3087 continue;
3086 3088
@@ -3378,13 +3380,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
3378 } 3380 }
3379#endif 3381#endif
3380 } 3382 }
3383 daddr = cma_dst_addr(id_priv);
3384 daddr->sa_family = addr->sa_family;
3385
3381 ret = cma_get_port(id_priv); 3386 ret = cma_get_port(id_priv);
3382 if (ret) 3387 if (ret)
3383 goto err2; 3388 goto err2;
3384 3389
3385 daddr = cma_dst_addr(id_priv);
3386 daddr->sa_family = addr->sa_family;
3387
3388 return 0; 3390 return 0;
3389err2: 3391err2:
3390 if (id_priv->cma_dev) 3392 if (id_priv->cma_dev)
@@ -4173,6 +4175,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
4173 struct cma_multicast *mc; 4175 struct cma_multicast *mc;
4174 int ret; 4176 int ret;
4175 4177
4178 if (!id->device)
4179 return -EINVAL;
4180
4176 id_priv = container_of(id, struct rdma_id_private, id); 4181 id_priv = container_of(id, struct rdma_id_private, id);
4177 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 4182 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
4178 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 4183 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index bc79ca8215d7..af5ad6a56ae4 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -17,6 +17,7 @@
17 17
18/* # of WCs to poll for with a single call to ib_poll_cq */ 18/* # of WCs to poll for with a single call to ib_poll_cq */
19#define IB_POLL_BATCH 16 19#define IB_POLL_BATCH 16
20#define IB_POLL_BATCH_DIRECT 8
20 21
21/* # of WCs to iterate over before yielding */ 22/* # of WCs to iterate over before yielding */
22#define IB_POLL_BUDGET_IRQ 256 23#define IB_POLL_BUDGET_IRQ 256
@@ -25,18 +26,18 @@
25#define IB_POLL_FLAGS \ 26#define IB_POLL_FLAGS \
26 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) 27 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
27 28
28static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) 29static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
30 int batch)
29{ 31{
30 int i, n, completed = 0; 32 int i, n, completed = 0;
31 struct ib_wc *wcs = poll_wc ? : cq->wc;
32 33
33 /* 34 /*
34 * budget might be (-1) if the caller does not 35 * budget might be (-1) if the caller does not
35 * want to bound this call, thus we need unsigned 36 * want to bound this call, thus we need unsigned
36 * minimum here. 37 * minimum here.
37 */ 38 */
38 while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, 39 while ((n = ib_poll_cq(cq, min_t(u32, batch,
39 budget - completed), wcs)) > 0) { 40 budget - completed), wcs)) > 0) {
40 for (i = 0; i < n; i++) { 41 for (i = 0; i < n; i++) {
41 struct ib_wc *wc = &wcs[i]; 42 struct ib_wc *wc = &wcs[i];
42 43
@@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
48 49
49 completed += n; 50 completed += n;
50 51
51 if (n != IB_POLL_BATCH || 52 if (n != batch || (budget != -1 && completed >= budget))
52 (budget != -1 && completed >= budget))
53 break; 53 break;
54 } 54 }
55 55
@@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
72 */ 72 */
73int ib_process_cq_direct(struct ib_cq *cq, int budget) 73int ib_process_cq_direct(struct ib_cq *cq, int budget)
74{ 74{
75 struct ib_wc wcs[IB_POLL_BATCH]; 75 struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
76 76
77 return __ib_process_cq(cq, budget, wcs); 77 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
78} 78}
79EXPORT_SYMBOL(ib_process_cq_direct); 79EXPORT_SYMBOL(ib_process_cq_direct);
80 80
@@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); 88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
89 int completed; 89 int completed;
90 90
91 completed = __ib_process_cq(cq, budget, NULL); 91 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
92 if (completed < budget) { 92 if (completed < budget) {
93 irq_poll_complete(&cq->iop); 93 irq_poll_complete(&cq->iop);
94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
@@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work)
108 struct ib_cq *cq = container_of(work, struct ib_cq, work); 108 struct ib_cq *cq = container_of(work, struct ib_cq, work);
109 int completed; 109 int completed;
110 110
111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); 111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
112 IB_POLL_BATCH);
112 if (completed >= IB_POLL_BUDGET_WORKQUEUE || 113 if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
113 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 114 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
114 queue_work(ib_comp_wq, &cq->work); 115 queue_work(ib_comp_wq, &cq->work);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index e8010e73a1cf..bb065c9449be 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device,
536 ret = device->query_device(device, &device->attrs, &uhw); 536 ret = device->query_device(device, &device->attrs, &uhw);
537 if (ret) { 537 if (ret) {
538 pr_warn("Couldn't query the device attributes\n"); 538 pr_warn("Couldn't query the device attributes\n");
539 goto cache_cleanup; 539 goto cg_cleanup;
540 } 540 }
541 541
542 ret = ib_device_register_sysfs(device, port_callback); 542 ret = ib_device_register_sysfs(device, port_callback);
543 if (ret) { 543 if (ret) {
544 pr_warn("Couldn't register device %s with driver model\n", 544 pr_warn("Couldn't register device %s with driver model\n",
545 device->name); 545 device->name);
546 goto cache_cleanup; 546 goto cg_cleanup;
547 } 547 }
548 548
549 device->reg_state = IB_DEV_REGISTERED; 549 device->reg_state = IB_DEV_REGISTERED;
@@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device,
559 mutex_unlock(&device_mutex); 559 mutex_unlock(&device_mutex);
560 return 0; 560 return 0;
561 561
562cg_cleanup:
563 ib_device_unregister_rdmacg(device);
562cache_cleanup: 564cache_cleanup:
563 ib_cache_cleanup_one(device); 565 ib_cache_cleanup_one(device);
564 ib_cache_release_one(device); 566 ib_cache_release_one(device);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8cf15d4a8ac4..9f029a1ca5ea 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
1291 1291
1292 resolved_dev = dev_get_by_index(dev_addr.net, 1292 resolved_dev = dev_get_by_index(dev_addr.net,
1293 dev_addr.bound_dev_if); 1293 dev_addr.bound_dev_if);
1294 if (resolved_dev->flags & IFF_LOOPBACK) { 1294 if (!resolved_dev) {
1295 dev_put(resolved_dev); 1295 dev_put(idev);
1296 resolved_dev = idev; 1296 return -ENODEV;
1297 dev_hold(resolved_dev);
1298 } 1297 }
1299 ndev = ib_get_ndev_from_path(rec); 1298 ndev = ib_get_ndev_from_path(rec);
1300 rcu_read_lock(); 1299 rcu_read_lock();
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index f015f1bf88c9..e5a1e7d81326 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
132 ctx = idr_find(&ctx_idr, id); 132 ctx = idr_find(&ctx_idr, id);
133 if (!ctx) 133 if (!ctx)
134 ctx = ERR_PTR(-ENOENT); 134 ctx = ERR_PTR(-ENOENT);
135 else if (ctx->file != file) 135 else if (ctx->file != file || !ctx->cm_id)
136 ctx = ERR_PTR(-EINVAL); 136 ctx = ERR_PTR(-EINVAL);
137 return ctx; 137 return ctx;
138} 138}
@@ -456,6 +456,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
456 struct rdma_ucm_create_id cmd; 456 struct rdma_ucm_create_id cmd;
457 struct rdma_ucm_create_id_resp resp; 457 struct rdma_ucm_create_id_resp resp;
458 struct ucma_context *ctx; 458 struct ucma_context *ctx;
459 struct rdma_cm_id *cm_id;
459 enum ib_qp_type qp_type; 460 enum ib_qp_type qp_type;
460 int ret; 461 int ret;
461 462
@@ -476,10 +477,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
476 return -ENOMEM; 477 return -ENOMEM;
477 478
478 ctx->uid = cmd.uid; 479 ctx->uid = cmd.uid;
479 ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, 480 cm_id = rdma_create_id(current->nsproxy->net_ns,
480 ucma_event_handler, ctx, cmd.ps, qp_type); 481 ucma_event_handler, ctx, cmd.ps, qp_type);
481 if (IS_ERR(ctx->cm_id)) { 482 if (IS_ERR(cm_id)) {
482 ret = PTR_ERR(ctx->cm_id); 483 ret = PTR_ERR(cm_id);
483 goto err1; 484 goto err1;
484 } 485 }
485 486
@@ -489,14 +490,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
489 ret = -EFAULT; 490 ret = -EFAULT;
490 goto err2; 491 goto err2;
491 } 492 }
493
494 ctx->cm_id = cm_id;
492 return 0; 495 return 0;
493 496
494err2: 497err2:
495 rdma_destroy_id(ctx->cm_id); 498 rdma_destroy_id(cm_id);
496err1: 499err1:
497 mutex_lock(&mut); 500 mutex_lock(&mut);
498 idr_remove(&ctx_idr, ctx->id); 501 idr_remove(&ctx_idr, ctx->id);
499 mutex_unlock(&mut); 502 mutex_unlock(&mut);
503 mutex_lock(&file->mut);
504 list_del(&ctx->list);
505 mutex_unlock(&file->mut);
500 kfree(ctx); 506 kfree(ctx);
501 return ret; 507 return ret;
502} 508}
@@ -664,19 +670,23 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
664 int in_len, int out_len) 670 int in_len, int out_len)
665{ 671{
666 struct rdma_ucm_resolve_ip cmd; 672 struct rdma_ucm_resolve_ip cmd;
673 struct sockaddr *src, *dst;
667 struct ucma_context *ctx; 674 struct ucma_context *ctx;
668 int ret; 675 int ret;
669 676
670 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 677 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
671 return -EFAULT; 678 return -EFAULT;
672 679
680 src = (struct sockaddr *) &cmd.src_addr;
681 dst = (struct sockaddr *) &cmd.dst_addr;
682 if (!rdma_addr_size(src) || !rdma_addr_size(dst))
683 return -EINVAL;
684
673 ctx = ucma_get_ctx(file, cmd.id); 685 ctx = ucma_get_ctx(file, cmd.id);
674 if (IS_ERR(ctx)) 686 if (IS_ERR(ctx))
675 return PTR_ERR(ctx); 687 return PTR_ERR(ctx);
676 688
677 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 689 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
678 (struct sockaddr *) &cmd.dst_addr,
679 cmd.timeout_ms);
680 ucma_put_ctx(ctx); 690 ucma_put_ctx(ctx);
681 return ret; 691 return ret;
682} 692}
@@ -1149,6 +1159,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1149 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1159 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1150 return -EFAULT; 1160 return -EFAULT;
1151 1161
1162 if (cmd.qp_state > IB_QPS_ERR)
1163 return -EINVAL;
1164
1152 ctx = ucma_get_ctx(file, cmd.id); 1165 ctx = ucma_get_ctx(file, cmd.id);
1153 if (IS_ERR(ctx)) 1166 if (IS_ERR(ctx))
1154 return PTR_ERR(ctx); 1167 return PTR_ERR(ctx);
@@ -1294,6 +1307,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1294 if (IS_ERR(ctx)) 1307 if (IS_ERR(ctx))
1295 return PTR_ERR(ctx); 1308 return PTR_ERR(ctx);
1296 1309
1310 if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
1311 return -EINVAL;
1312
1297 optval = memdup_user((void __user *) (unsigned long) cmd.optval, 1313 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1298 cmd.optlen); 1314 cmd.optlen);
1299 if (IS_ERR(optval)) { 1315 if (IS_ERR(optval)) {
@@ -1343,7 +1359,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
1343 return -ENOSPC; 1359 return -ENOSPC;
1344 1360
1345 addr = (struct sockaddr *) &cmd->addr; 1361 addr = (struct sockaddr *) &cmd->addr;
1346 if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) 1362 if (cmd->addr_size != rdma_addr_size(addr))
1347 return -EINVAL; 1363 return -EINVAL;
1348 1364
1349 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) 1365 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
@@ -1411,6 +1427,9 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1411 join_cmd.uid = cmd.uid; 1427 join_cmd.uid = cmd.uid;
1412 join_cmd.id = cmd.id; 1428 join_cmd.id = cmd.id;
1413 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); 1429 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1430 if (!join_cmd.addr_size)
1431 return -EINVAL;
1432
1414 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; 1433 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1415 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); 1434 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1416 1435
@@ -1426,6 +1445,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
1426 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1445 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1427 return -EFAULT; 1446 return -EFAULT;
1428 1447
1448 if (!rdma_addr_size((struct sockaddr *)&cmd.addr))
1449 return -EINVAL;
1450
1429 return ucma_process_join(file, &cmd, out_len); 1451 return ucma_process_join(file, &cmd, out_len);
1430} 1452}
1431 1453