aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/Kconfig5
-rw-r--r--drivers/infiniband/core/Makefile5
-rw-r--r--drivers/infiniband/core/addr.c97
-rw-r--r--drivers/infiniband/core/cm.c52
-rw-r--r--drivers/infiniband/core/cma.c74
-rw-r--r--drivers/infiniband/core/core_priv.h2
-rw-r--r--drivers/infiniband/core/sa_query.c12
-rw-r--r--drivers/infiniband/core/ucma.c18
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c4
-rw-r--r--drivers/infiniband/core/verbs.c98
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c40
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/main.c475
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c104
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/Kconfig2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c21
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c138
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--include/linux/mlx4/cq.h15
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/rdma/ib_addr.h69
-rw-r--r--include/rdma/ib_cm.h1
-rw-r--r--include/rdma/ib_pack.h1
-rw-r--r--include/rdma/ib_sa.h3
-rw-r--r--include/rdma/ib_verbs.h21
36 files changed, 939 insertions, 389 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 10219ee92191..77089399359b 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -3,6 +3,8 @@ menuconfig INFINIBAND
3 depends on PCI || BROKEN 3 depends on PCI || BROKEN
4 depends on HAS_IOMEM 4 depends on HAS_IOMEM
5 depends on NET 5 depends on NET
6 depends on INET
7 depends on m || IPV6 != m
6 ---help--- 8 ---help---
7 Core support for InfiniBand (IB). Make sure to also select 9 Core support for InfiniBand (IB). Make sure to also select
8 any protocols you wish to use as well as drivers for your 10 any protocols you wish to use as well as drivers for your
@@ -38,8 +40,7 @@ config INFINIBAND_USER_MEM
38 40
39config INFINIBAND_ADDR_TRANS 41config INFINIBAND_ADDR_TRANS
40 bool 42 bool
41 depends on INET 43 depends on INFINIBAND
42 depends on !(INFINIBAND = y && IPV6 = m)
43 default y 44 default y
44 45
45source "drivers/infiniband/hw/mthca/Kconfig" 46source "drivers/infiniband/hw/mthca/Kconfig"
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index c8bbaef1becb..3ab3865544bb 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,8 +1,9 @@
1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o 1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o
2user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o 2user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
3 3
4obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ 4obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
5 ib_cm.o iw_cm.o $(infiniband-y) 5 ib_cm.o iw_cm.o ib_addr.o \
6 $(infiniband-y)
6obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o 7obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
7obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ 8obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
8 $(user_access-y) 9 $(user_access-y)
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index e90f2b2eabd7..8172d37f9add 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -86,6 +86,8 @@ int rdma_addr_size(struct sockaddr *addr)
86} 86}
87EXPORT_SYMBOL(rdma_addr_size); 87EXPORT_SYMBOL(rdma_addr_size);
88 88
89static struct rdma_addr_client self;
90
89void rdma_addr_register_client(struct rdma_addr_client *client) 91void rdma_addr_register_client(struct rdma_addr_client *client)
90{ 92{
91 atomic_set(&client->refcount, 1); 93 atomic_set(&client->refcount, 1);
@@ -119,7 +121,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
119} 121}
120EXPORT_SYMBOL(rdma_copy_addr); 122EXPORT_SYMBOL(rdma_copy_addr);
121 123
122int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 124int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
125 u16 *vlan_id)
123{ 126{
124 struct net_device *dev; 127 struct net_device *dev;
125 int ret = -EADDRNOTAVAIL; 128 int ret = -EADDRNOTAVAIL;
@@ -142,6 +145,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
142 return ret; 145 return ret;
143 146
144 ret = rdma_copy_addr(dev_addr, dev, NULL); 147 ret = rdma_copy_addr(dev_addr, dev, NULL);
148 if (vlan_id)
149 *vlan_id = rdma_vlan_dev_vlan_id(dev);
145 dev_put(dev); 150 dev_put(dev);
146 break; 151 break;
147 152
@@ -153,6 +158,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
153 &((struct sockaddr_in6 *) addr)->sin6_addr, 158 &((struct sockaddr_in6 *) addr)->sin6_addr,
154 dev, 1)) { 159 dev, 1)) {
155 ret = rdma_copy_addr(dev_addr, dev, NULL); 160 ret = rdma_copy_addr(dev_addr, dev, NULL);
161 if (vlan_id)
162 *vlan_id = rdma_vlan_dev_vlan_id(dev);
156 break; 163 break;
157 } 164 }
158 } 165 }
@@ -238,7 +245,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
238 src_in->sin_addr.s_addr = fl4.saddr; 245 src_in->sin_addr.s_addr = fl4.saddr;
239 246
240 if (rt->dst.dev->flags & IFF_LOOPBACK) { 247 if (rt->dst.dev->flags & IFF_LOOPBACK) {
241 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); 248 ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
242 if (!ret) 249 if (!ret)
243 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); 250 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
244 goto put; 251 goto put;
@@ -286,7 +293,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
286 } 293 }
287 294
288 if (dst->dev->flags & IFF_LOOPBACK) { 295 if (dst->dev->flags & IFF_LOOPBACK) {
289 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); 296 ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
290 if (!ret) 297 if (!ret)
291 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); 298 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
292 goto put; 299 goto put;
@@ -437,6 +444,88 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
437} 444}
438EXPORT_SYMBOL(rdma_addr_cancel); 445EXPORT_SYMBOL(rdma_addr_cancel);
439 446
447struct resolve_cb_context {
448 struct rdma_dev_addr *addr;
449 struct completion comp;
450};
451
452static void resolve_cb(int status, struct sockaddr *src_addr,
453 struct rdma_dev_addr *addr, void *context)
454{
455 memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct
456 rdma_dev_addr));
457 complete(&((struct resolve_cb_context *)context)->comp);
458}
459
460int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
461 u16 *vlan_id)
462{
463 int ret = 0;
464 struct rdma_dev_addr dev_addr;
465 struct resolve_cb_context ctx;
466 struct net_device *dev;
467
468 union {
469 struct sockaddr _sockaddr;
470 struct sockaddr_in _sockaddr_in;
471 struct sockaddr_in6 _sockaddr_in6;
472 } sgid_addr, dgid_addr;
473
474
475 ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid);
476 if (ret)
477 return ret;
478
479 ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid);
480 if (ret)
481 return ret;
482
483 memset(&dev_addr, 0, sizeof(dev_addr));
484
485 ctx.addr = &dev_addr;
486 init_completion(&ctx.comp);
487 ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
488 &dev_addr, 1000, resolve_cb, &ctx);
489 if (ret)
490 return ret;
491
492 wait_for_completion(&ctx.comp);
493
494 memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
495 dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
496 if (!dev)
497 return -ENODEV;
498 if (vlan_id)
499 *vlan_id = rdma_vlan_dev_vlan_id(dev);
500 dev_put(dev);
501 return ret;
502}
503EXPORT_SYMBOL(rdma_addr_find_dmac_by_grh);
504
505int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
506{
507 int ret = 0;
508 struct rdma_dev_addr dev_addr;
509 union {
510 struct sockaddr _sockaddr;
511 struct sockaddr_in _sockaddr_in;
512 struct sockaddr_in6 _sockaddr_in6;
513 } gid_addr;
514
515 ret = rdma_gid2ip(&gid_addr._sockaddr, sgid);
516
517 if (ret)
518 return ret;
519 memset(&dev_addr, 0, sizeof(dev_addr));
520 ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
521 if (ret)
522 return ret;
523
524 memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN);
525 return ret;
526}
527EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid);
528
440static int netevent_callback(struct notifier_block *self, unsigned long event, 529static int netevent_callback(struct notifier_block *self, unsigned long event,
441 void *ctx) 530 void *ctx)
442{ 531{
@@ -461,11 +550,13 @@ static int __init addr_init(void)
461 return -ENOMEM; 550 return -ENOMEM;
462 551
463 register_netevent_notifier(&nb); 552 register_netevent_notifier(&nb);
553 rdma_addr_register_client(&self);
464 return 0; 554 return 0;
465} 555}
466 556
467static void __exit addr_cleanup(void) 557static void __exit addr_cleanup(void)
468{ 558{
559 rdma_addr_unregister_client(&self);
469 unregister_netevent_notifier(&nb); 560 unregister_netevent_notifier(&nb);
470 destroy_workqueue(addr_wq); 561 destroy_workqueue(addr_wq);
471} 562}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f2ef7ef0f36f..0601b9daf840 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -47,6 +47,7 @@
47#include <linux/sysfs.h> 47#include <linux/sysfs.h>
48#include <linux/workqueue.h> 48#include <linux/workqueue.h>
49#include <linux/kdev_t.h> 49#include <linux/kdev_t.h>
50#include <linux/etherdevice.h>
50 51
51#include <rdma/ib_cache.h> 52#include <rdma/ib_cache.h>
52#include <rdma/ib_cm.h> 53#include <rdma/ib_cm.h>
@@ -177,6 +178,8 @@ struct cm_av {
177 struct ib_ah_attr ah_attr; 178 struct ib_ah_attr ah_attr;
178 u16 pkey_index; 179 u16 pkey_index;
179 u8 timeout; 180 u8 timeout;
181 u8 valid;
182 u8 smac[ETH_ALEN];
180}; 183};
181 184
182struct cm_work { 185struct cm_work {
@@ -346,6 +349,23 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
346 grh, &av->ah_attr); 349 grh, &av->ah_attr);
347} 350}
348 351
352int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac)
353{
354 struct cm_id_private *cm_id_priv;
355
356 cm_id_priv = container_of(id, struct cm_id_private, id);
357
358 if (smac != NULL)
359 memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac));
360
361 if (alt_smac != NULL)
362 memcpy(cm_id_priv->alt_av.smac, alt_smac,
363 sizeof(cm_id_priv->alt_av.smac));
364
365 return 0;
366}
367EXPORT_SYMBOL(ib_update_cm_av);
368
349static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 369static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
350{ 370{
351 struct cm_device *cm_dev; 371 struct cm_device *cm_dev;
@@ -376,6 +396,9 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
376 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, 396 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
377 &av->ah_attr); 397 &av->ah_attr);
378 av->timeout = path->packet_life_time + 1; 398 av->timeout = path->packet_life_time + 1;
399 memcpy(av->smac, path->smac, sizeof(av->smac));
400
401 av->valid = 1;
379 return 0; 402 return 0;
380} 403}
381 404
@@ -1554,6 +1577,9 @@ static int cm_req_handler(struct cm_work *work)
1554 1577
1555 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); 1578 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1556 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1579 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1580
1581 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
1582 work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id;
1557 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1583 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1558 if (ret) { 1584 if (ret) {
1559 ib_get_cached_gid(work->port->cm_dev->ib_device, 1585 ib_get_cached_gid(work->port->cm_dev->ib_device,
@@ -3500,6 +3526,32 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3500 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3526 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3501 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3527 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3502 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3528 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3529 if (!cm_id_priv->av.valid) {
3530 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3531 return -EINVAL;
3532 }
3533 if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) {
3534 qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id;
3535 *qp_attr_mask |= IB_QP_VID;
3536 }
3537 if (!is_zero_ether_addr(cm_id_priv->av.smac)) {
3538 memcpy(qp_attr->smac, cm_id_priv->av.smac,
3539 sizeof(qp_attr->smac));
3540 *qp_attr_mask |= IB_QP_SMAC;
3541 }
3542 if (cm_id_priv->alt_av.valid) {
3543 if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) {
3544 qp_attr->alt_vlan_id =
3545 cm_id_priv->alt_av.ah_attr.vlan_id;
3546 *qp_attr_mask |= IB_QP_ALT_VID;
3547 }
3548 if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) {
3549 memcpy(qp_attr->alt_smac,
3550 cm_id_priv->alt_av.smac,
3551 sizeof(qp_attr->alt_smac));
3552 *qp_attr_mask |= IB_QP_ALT_SMAC;
3553 }
3554 }
3503 qp_attr->path_mtu = cm_id_priv->path_mtu; 3555 qp_attr->path_mtu = cm_id_priv->path_mtu;
3504 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3556 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3505 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3557 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 4173a2ad6d08..9b079a7ea29c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -340,7 +340,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
340 int ret; 340 int ret;
341 341
342 if (addr->sa_family != AF_IB) { 342 if (addr->sa_family != AF_IB) {
343 ret = rdma_translate_ip(addr, dev_addr); 343 ret = rdma_translate_ip(addr, dev_addr, NULL);
344 } else { 344 } else {
345 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 345 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
346 ret = 0; 346 ret = 0;
@@ -365,7 +365,9 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
365 return -EINVAL; 365 return -EINVAL;
366 366
367 mutex_lock(&lock); 367 mutex_lock(&lock);
368 iboe_addr_get_sgid(dev_addr, &iboe_gid); 368 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
369 &iboe_gid);
370
369 memcpy(&gid, dev_addr->src_dev_addr + 371 memcpy(&gid, dev_addr->src_dev_addr +
370 rdma_addr_gid_offset(dev_addr), sizeof gid); 372 rdma_addr_gid_offset(dev_addr), sizeof gid);
371 if (listen_id_priv && 373 if (listen_id_priv &&
@@ -603,6 +605,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
603{ 605{
604 struct ib_qp_attr qp_attr; 606 struct ib_qp_attr qp_attr;
605 int qp_attr_mask, ret; 607 int qp_attr_mask, ret;
608 union ib_gid sgid;
606 609
607 mutex_lock(&id_priv->qp_mutex); 610 mutex_lock(&id_priv->qp_mutex);
608 if (!id_priv->id.qp) { 611 if (!id_priv->id.qp) {
@@ -625,6 +628,20 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
625 if (ret) 628 if (ret)
626 goto out; 629 goto out;
627 630
631 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
632 qp_attr.ah_attr.grh.sgid_index, &sgid);
633 if (ret)
634 goto out;
635
636 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
637 == RDMA_TRANSPORT_IB &&
638 rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
639 == IB_LINK_LAYER_ETHERNET) {
640 ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
641
642 if (ret)
643 goto out;
644 }
628 if (conn_param) 645 if (conn_param)
629 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 646 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
630 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 647 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
@@ -725,6 +742,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
725 else 742 else
726 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 743 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
727 qp_attr_mask); 744 qp_attr_mask);
745
728 if (qp_attr->qp_state == IB_QPS_RTR) 746 if (qp_attr->qp_state == IB_QPS_RTR)
729 qp_attr->rq_psn = id_priv->seq_num; 747 qp_attr->rq_psn = id_priv->seq_num;
730 break; 748 break;
@@ -1266,6 +1284,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1266 struct rdma_id_private *listen_id, *conn_id; 1284 struct rdma_id_private *listen_id, *conn_id;
1267 struct rdma_cm_event event; 1285 struct rdma_cm_event event;
1268 int offset, ret; 1286 int offset, ret;
1287 u8 smac[ETH_ALEN];
1288 u8 alt_smac[ETH_ALEN];
1289 u8 *psmac = smac;
1290 u8 *palt_smac = alt_smac;
1291 int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) ==
1292 RDMA_TRANSPORT_IB) &&
1293 (rdma_port_get_link_layer(cm_id->device,
1294 ib_event->param.req_rcvd.port) ==
1295 IB_LINK_LAYER_ETHERNET));
1269 1296
1270 listen_id = cm_id->context; 1297 listen_id = cm_id->context;
1271 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) 1298 if (!cma_check_req_qp_type(&listen_id->id, ib_event))
@@ -1310,12 +1337,29 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1310 if (ret) 1337 if (ret)
1311 goto err3; 1338 goto err3;
1312 1339
1340 if (is_iboe) {
1341 if (ib_event->param.req_rcvd.primary_path != NULL)
1342 rdma_addr_find_smac_by_sgid(
1343 &ib_event->param.req_rcvd.primary_path->sgid,
1344 psmac, NULL);
1345 else
1346 psmac = NULL;
1347 if (ib_event->param.req_rcvd.alternate_path != NULL)
1348 rdma_addr_find_smac_by_sgid(
1349 &ib_event->param.req_rcvd.alternate_path->sgid,
1350 palt_smac, NULL);
1351 else
1352 palt_smac = NULL;
1353 }
1313 /* 1354 /*
1314 * Acquire mutex to prevent user executing rdma_destroy_id() 1355 * Acquire mutex to prevent user executing rdma_destroy_id()
1315 * while we're accessing the cm_id. 1356 * while we're accessing the cm_id.
1316 */ 1357 */
1317 mutex_lock(&lock); 1358 mutex_lock(&lock);
1318 if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) 1359 if (is_iboe)
1360 ib_update_cm_av(cm_id, psmac, palt_smac);
1361 if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
1362 (conn_id->id.qp_type != IB_QPT_UD))
1319 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1363 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1320 mutex_unlock(&lock); 1364 mutex_unlock(&lock);
1321 mutex_unlock(&conn_id->handler_mutex); 1365 mutex_unlock(&conn_id->handler_mutex);
@@ -1474,7 +1518,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1474 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1518 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1475 conn_id->state = RDMA_CM_CONNECT; 1519 conn_id->state = RDMA_CM_CONNECT;
1476 1520
1477 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); 1521 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
1478 if (ret) { 1522 if (ret) {
1479 mutex_unlock(&conn_id->handler_mutex); 1523 mutex_unlock(&conn_id->handler_mutex);
1480 rdma_destroy_id(new_cm_id); 1524 rdma_destroy_id(new_cm_id);
@@ -1873,7 +1917,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
1873 struct cma_work *work; 1917 struct cma_work *work;
1874 int ret; 1918 int ret;
1875 struct net_device *ndev = NULL; 1919 struct net_device *ndev = NULL;
1876 u16 vid; 1920
1877 1921
1878 work = kzalloc(sizeof *work, GFP_KERNEL); 1922 work = kzalloc(sizeof *work, GFP_KERNEL);
1879 if (!work) 1923 if (!work)
@@ -1897,10 +1941,14 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
1897 goto err2; 1941 goto err2;
1898 } 1942 }
1899 1943
1900 vid = rdma_vlan_dev_vlan_id(ndev); 1944 route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
1945 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
1946 memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
1901 1947
1902 iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); 1948 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
1903 iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); 1949 &route->path_rec->sgid);
1950 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
1951 &route->path_rec->dgid);
1904 1952
1905 route->path_rec->hop_limit = 1; 1953 route->path_rec->hop_limit = 1;
1906 route->path_rec->reversible = 1; 1954 route->path_rec->reversible = 1;
@@ -2063,6 +2111,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
2063 RDMA_CM_ADDR_RESOLVED)) 2111 RDMA_CM_ADDR_RESOLVED))
2064 goto out; 2112 goto out;
2065 2113
2114 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
2066 if (!status && !id_priv->cma_dev) 2115 if (!status && !id_priv->cma_dev)
2067 status = cma_acquire_dev(id_priv, NULL); 2116 status = cma_acquire_dev(id_priv, NULL);
2068 2117
@@ -2072,10 +2121,8 @@ static void addr_handler(int status, struct sockaddr *src_addr,
2072 goto out; 2121 goto out;
2073 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2122 event.event = RDMA_CM_EVENT_ADDR_ERROR;
2074 event.status = status; 2123 event.status = status;
2075 } else { 2124 } else
2076 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
2077 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2125 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2078 }
2079 2126
2080 if (id_priv->id.event_handler(&id_priv->id, &event)) { 2127 if (id_priv->id.event_handler(&id_priv->id, &event)) {
2081 cma_exch(id_priv, RDMA_CM_DESTROYING); 2128 cma_exch(id_priv, RDMA_CM_DESTROYING);
@@ -2559,6 +2606,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2559 if (ret) 2606 if (ret)
2560 goto err1; 2607 goto err1;
2561 2608
2609 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
2562 if (!cma_any_addr(addr)) { 2610 if (!cma_any_addr(addr)) {
2563 ret = cma_translate_addr(addr, &id->route.addr.dev_addr); 2611 ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
2564 if (ret) 2612 if (ret)
@@ -2569,7 +2617,6 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2569 goto err1; 2617 goto err1;
2570 } 2618 }
2571 2619
2572 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
2573 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 2620 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
2574 if (addr->sa_family == AF_INET) 2621 if (addr->sa_family == AF_INET)
2575 id_priv->afonly = 1; 2622 id_priv->afonly = 1;
@@ -3298,7 +3345,8 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
3298 err = -EINVAL; 3345 err = -EINVAL;
3299 goto out2; 3346 goto out2;
3300 } 3347 }
3301 iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid); 3348 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
3349 &mc->multicast.ib->rec.port_gid);
3302 work->id = id_priv; 3350 work->id = id_priv;
3303 work->mc = mc; 3351 work->mc = mc;
3304 INIT_WORK(&work->work, iboe_mcast_work_handler); 3352 INIT_WORK(&work->work, iboe_mcast_work_handler);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index a565af5c2d2e..87d1936f5c1c 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -49,4 +49,6 @@ void ib_sysfs_cleanup(void);
49int ib_cache_setup(void); 49int ib_cache_setup(void);
50void ib_cache_cleanup(void); 50void ib_cache_cleanup(void);
51 51
52int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
53 struct ib_qp_attr *qp_attr, int *qp_attr_mask);
52#endif /* _CORE_PRIV_H */ 54#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 9838ca484389..f820958e4047 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -42,7 +42,7 @@
42#include <linux/kref.h> 42#include <linux/kref.h>
43#include <linux/idr.h> 43#include <linux/idr.h>
44#include <linux/workqueue.h> 44#include <linux/workqueue.h>
45 45#include <uapi/linux/if_ether.h>
46#include <rdma/ib_pack.h> 46#include <rdma/ib_pack.h>
47#include <rdma/ib_cache.h> 47#include <rdma/ib_cache.h>
48#include "sa.h" 48#include "sa.h"
@@ -556,6 +556,13 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
556 ah_attr->grh.hop_limit = rec->hop_limit; 556 ah_attr->grh.hop_limit = rec->hop_limit;
557 ah_attr->grh.traffic_class = rec->traffic_class; 557 ah_attr->grh.traffic_class = rec->traffic_class;
558 } 558 }
559 if (force_grh) {
560 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
561 ah_attr->vlan_id = rec->vlan_id;
562 } else {
563 ah_attr->vlan_id = 0xffff;
564 }
565
559 return 0; 566 return 0;
560} 567}
561EXPORT_SYMBOL(ib_init_ah_from_path); 568EXPORT_SYMBOL(ib_init_ah_from_path);
@@ -670,6 +677,9 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
670 677
671 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 678 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
672 mad->data, &rec); 679 mad->data, &rec);
680 rec.vlan_id = 0xffff;
681 memset(rec.dmac, 0, ETH_ALEN);
682 memset(rec.smac, 0, ETH_ALEN);
673 query->callback(status, &rec, query->context); 683 query->callback(status, &rec, query->context);
674 } else 684 } else
675 query->callback(status, NULL, query->context); 685 query->callback(status, NULL, query->context);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ab8b1c30b36b..56a4b7ca7ee3 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -655,24 +655,14 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
655static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, 655static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
656 struct rdma_route *route) 656 struct rdma_route *route)
657{ 657{
658 struct rdma_dev_addr *dev_addr;
659 struct net_device *dev;
660 u16 vid = 0;
661 658
662 resp->num_paths = route->num_paths; 659 resp->num_paths = route->num_paths;
663 switch (route->num_paths) { 660 switch (route->num_paths) {
664 case 0: 661 case 0:
665 dev_addr = &route->addr.dev_addr; 662 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
666 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 663 (union ib_gid *)&resp->ib_route[0].dgid);
667 if (dev) { 664 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
668 vid = rdma_vlan_dev_vlan_id(dev); 665 (union ib_gid *)&resp->ib_route[0].sgid);
669 dev_put(dev);
670 }
671
672 iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
673 dev_addr->dst_dev_addr, vid);
674 iboe_addr_get_sgid(dev_addr,
675 (union ib_gid *) &resp->ib_route[0].sgid);
676 resp->ib_route[0].pkey = cpu_to_be16(0xffff); 666 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
677 break; 667 break;
678 case 2: 668 case 2:
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index f1cc83855af6..ea6203ee7bcc 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -40,6 +40,7 @@
40#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41 41
42#include "uverbs.h" 42#include "uverbs.h"
43#include "core_priv.h"
43 44
44struct uverbs_lock_class { 45struct uverbs_lock_class {
45 struct lock_class_key key; 46 struct lock_class_key key;
@@ -1961,6 +1962,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1961 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1962 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
1962 1963
1963 if (qp->real_qp == qp) { 1964 if (qp->real_qp == qp) {
1965 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
1966 if (ret)
1967 goto out;
1964 ret = qp->device->modify_qp(qp, attr, 1968 ret = qp->device->modify_qp(qp, attr,
1965 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 1969 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
1966 } else { 1970 } else {
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 88590e3da8c0..3ac795115438 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -44,6 +44,9 @@
44 44
45#include <rdma/ib_verbs.h> 45#include <rdma/ib_verbs.h>
46#include <rdma/ib_cache.h> 46#include <rdma/ib_cache.h>
47#include <rdma/ib_addr.h>
48
49#include "core_priv.h"
47 50
48int ib_rate_to_mult(enum ib_rate rate) 51int ib_rate_to_mult(enum ib_rate rate)
49{ 52{
@@ -195,8 +198,28 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
195 u32 flow_class; 198 u32 flow_class;
196 u16 gid_index; 199 u16 gid_index;
197 int ret; 200 int ret;
201 int is_eth = (rdma_port_get_link_layer(device, port_num) ==
202 IB_LINK_LAYER_ETHERNET);
198 203
199 memset(ah_attr, 0, sizeof *ah_attr); 204 memset(ah_attr, 0, sizeof *ah_attr);
205 if (is_eth) {
206 if (!(wc->wc_flags & IB_WC_GRH))
207 return -EPROTOTYPE;
208
209 if (wc->wc_flags & IB_WC_WITH_SMAC &&
210 wc->wc_flags & IB_WC_WITH_VLAN) {
211 memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
212 ah_attr->vlan_id = wc->vlan_id;
213 } else {
214 ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
215 ah_attr->dmac, &ah_attr->vlan_id);
216 if (ret)
217 return ret;
218 }
219 } else {
220 ah_attr->vlan_id = 0xffff;
221 }
222
200 ah_attr->dlid = wc->slid; 223 ah_attr->dlid = wc->slid;
201 ah_attr->sl = wc->sl; 224 ah_attr->sl = wc->sl;
202 ah_attr->src_path_bits = wc->dlid_path_bits; 225 ah_attr->src_path_bits = wc->dlid_path_bits;
@@ -479,7 +502,9 @@ EXPORT_SYMBOL(ib_create_qp);
479static const struct { 502static const struct {
480 int valid; 503 int valid;
481 enum ib_qp_attr_mask req_param[IB_QPT_MAX]; 504 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
505 enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX];
482 enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; 506 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
507 enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX];
483} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 508} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
484 [IB_QPS_RESET] = { 509 [IB_QPS_RESET] = {
485 [IB_QPS_RESET] = { .valid = 1 }, 510 [IB_QPS_RESET] = { .valid = 1 },
@@ -560,6 +585,12 @@ static const struct {
560 IB_QP_MAX_DEST_RD_ATOMIC | 585 IB_QP_MAX_DEST_RD_ATOMIC |
561 IB_QP_MIN_RNR_TIMER), 586 IB_QP_MIN_RNR_TIMER),
562 }, 587 },
588 .req_param_add_eth = {
589 [IB_QPT_RC] = (IB_QP_SMAC),
590 [IB_QPT_UC] = (IB_QP_SMAC),
591 [IB_QPT_XRC_INI] = (IB_QP_SMAC),
592 [IB_QPT_XRC_TGT] = (IB_QP_SMAC)
593 },
563 .opt_param = { 594 .opt_param = {
564 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 595 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
565 IB_QP_QKEY), 596 IB_QP_QKEY),
@@ -579,7 +610,21 @@ static const struct {
579 IB_QP_QKEY), 610 IB_QP_QKEY),
580 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 611 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
581 IB_QP_QKEY), 612 IB_QP_QKEY),
582 } 613 },
614 .opt_param_add_eth = {
615 [IB_QPT_RC] = (IB_QP_ALT_SMAC |
616 IB_QP_VID |
617 IB_QP_ALT_VID),
618 [IB_QPT_UC] = (IB_QP_ALT_SMAC |
619 IB_QP_VID |
620 IB_QP_ALT_VID),
621 [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC |
622 IB_QP_VID |
623 IB_QP_ALT_VID),
624 [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC |
625 IB_QP_VID |
626 IB_QP_ALT_VID)
627 }
583 } 628 }
584 }, 629 },
585 [IB_QPS_RTR] = { 630 [IB_QPS_RTR] = {
@@ -782,7 +827,8 @@ static const struct {
782}; 827};
783 828
784int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 829int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
785 enum ib_qp_type type, enum ib_qp_attr_mask mask) 830 enum ib_qp_type type, enum ib_qp_attr_mask mask,
831 enum rdma_link_layer ll)
786{ 832{
787 enum ib_qp_attr_mask req_param, opt_param; 833 enum ib_qp_attr_mask req_param, opt_param;
788 834
@@ -801,6 +847,13 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
801 req_param = qp_state_table[cur_state][next_state].req_param[type]; 847 req_param = qp_state_table[cur_state][next_state].req_param[type];
802 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 848 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
803 849
850 if (ll == IB_LINK_LAYER_ETHERNET) {
851 req_param |= qp_state_table[cur_state][next_state].
852 req_param_add_eth[type];
853 opt_param |= qp_state_table[cur_state][next_state].
854 opt_param_add_eth[type];
855 }
856
804 if ((mask & req_param) != req_param) 857 if ((mask & req_param) != req_param)
805 return 0; 858 return 0;
806 859
@@ -811,10 +864,51 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
811} 864}
812EXPORT_SYMBOL(ib_modify_qp_is_ok); 865EXPORT_SYMBOL(ib_modify_qp_is_ok);
813 866
867int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
868 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
869{
870 int ret = 0;
871 union ib_gid sgid;
872
873 if ((*qp_attr_mask & IB_QP_AV) &&
874 (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) {
875 ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
876 qp_attr->ah_attr.grh.sgid_index, &sgid);
877 if (ret)
878 goto out;
879 if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
880 rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
881 rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
882 qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
883 } else {
884 ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
885 qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
886 if (ret)
887 goto out;
888 ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL);
889 if (ret)
890 goto out;
891 }
892 *qp_attr_mask |= IB_QP_SMAC;
893 if (qp_attr->vlan_id < 0xFFFF)
894 *qp_attr_mask |= IB_QP_VID;
895 }
896out:
897 return ret;
898}
899EXPORT_SYMBOL(ib_resolve_eth_l2_attrs);
900
901
814int ib_modify_qp(struct ib_qp *qp, 902int ib_modify_qp(struct ib_qp *qp,
815 struct ib_qp_attr *qp_attr, 903 struct ib_qp_attr *qp_attr,
816 int qp_attr_mask) 904 int qp_attr_mask)
817{ 905{
906 int ret;
907
908 ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask);
909 if (ret)
910 return ret;
911
818 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 912 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
819} 913}
820EXPORT_SYMBOL(ib_modify_qp); 914EXPORT_SYMBOL(ib_modify_qp);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 00d6861a6a18..2e89356c46fa 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -1329,7 +1329,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1329 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state; 1329 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
1330 if (!smi_reset2init && 1330 if (!smi_reset2init &&
1331 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type, 1331 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
1332 attr_mask)) { 1332 attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
1333 ret = -EINVAL; 1333 ret = -EINVAL;
1334 ehca_err(ibqp->device, 1334 ehca_err(ibqp->device,
1335 "Invalid qp transition new_state=%x cur_state=%x " 1335 "Invalid qp transition new_state=%x cur_state=%x "
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 0857a9c3cd3d..face87602dc1 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -463,7 +463,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
463 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 463 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
464 464
465 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 465 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
466 attr_mask)) 466 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
467 goto inval; 467 goto inval;
468 468
469 if (attr_mask & IB_QP_AV) { 469 if (attr_mask & IB_QP_AV) {
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index 24ab11a9ad1e..fc01deac1d3c 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,6 @@
1config MLX4_INFINIBAND 1config MLX4_INFINIBAND
2 tristate "Mellanox ConnectX HCA support" 2 tristate "Mellanox ConnectX HCA support"
3 depends on NETDEVICES && ETHERNET && PCI 3 depends on NETDEVICES && ETHERNET && PCI && INET
4 select NET_VENDOR_MELLANOX 4 select NET_VENDOR_MELLANOX
5 select MLX4_CORE 5 select MLX4_CORE
6 ---help--- 6 ---help---
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index a251becdaa98..170dca608042 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -39,25 +39,6 @@
39 39
40#include "mlx4_ib.h" 40#include "mlx4_ib.h"
41 41
42int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
43 u8 *mac, int *is_mcast, u8 port)
44{
45 struct in6_addr in6;
46
47 *is_mcast = 0;
48
49 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
50 if (rdma_link_local_addr(&in6))
51 rdma_get_ll_mac(&in6, mac);
52 else if (rdma_is_multicast_addr(&in6)) {
53 rdma_get_mcast_mac(&in6, mac);
54 *is_mcast = 1;
55 } else
56 return -EINVAL;
57
58 return 0;
59}
60
61static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, 42static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
62 struct mlx4_ib_ah *ah) 43 struct mlx4_ib_ah *ah)
63{ 44{
@@ -92,21 +73,18 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
92{ 73{
93 struct mlx4_ib_dev *ibdev = to_mdev(pd->device); 74 struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
94 struct mlx4_dev *dev = ibdev->dev; 75 struct mlx4_dev *dev = ibdev->dev;
95 union ib_gid sgid;
96 u8 mac[6];
97 int err;
98 int is_mcast; 76 int is_mcast;
77 struct in6_addr in6;
99 u16 vlan_tag; 78 u16 vlan_tag;
100 79
101 err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num); 80 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
102 if (err) 81 if (rdma_is_multicast_addr(&in6)) {
103 return ERR_PTR(err); 82 is_mcast = 1;
104 83 rdma_get_mcast_mac(&in6, ah->av.eth.mac);
105 memcpy(ah->av.eth.mac, mac, 6); 84 } else {
106 err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid); 85 memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
107 if (err) 86 }
108 return ERR_PTR(err); 87 vlan_tag = ah_attr->vlan_id;
109 vlan_tag = rdma_get_vlan_id(&sgid);
110 if (vlan_tag < 0x1000) 88 if (vlan_tag < 0x1000)
111 vlan_tag |= (ah_attr->sl & 7) << 13; 89 vlan_tag |= (ah_attr->sl & 7) << 13;
112 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 90 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 66dbf8062374..cc40f08ca8f1 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -798,6 +798,15 @@ repoll:
798 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 798 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
799 else 799 else
800 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 800 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
801 if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) {
802 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
803 MLX4_CQE_VID_MASK;
804 } else {
805 wc->vlan_id = 0xffff;
806 }
807 wc->wc_flags |= IB_WC_WITH_VLAN;
808 memcpy(wc->smac, cqe->smac, ETH_ALEN);
809 wc->wc_flags |= IB_WC_WITH_SMAC;
801 } 810 }
802 811
803 return 0; 812 return 0;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ea5844e89b2a..c2702f549f10 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -39,6 +39,8 @@
39#include <linux/inetdevice.h> 39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h> 40#include <linux/rtnetlink.h>
41#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
42#include <net/ipv6.h>
43#include <net/addrconf.h>
42 44
43#include <rdma/ib_smi.h> 45#include <rdma/ib_smi.h>
44#include <rdma/ib_user_verbs.h> 46#include <rdma/ib_user_verbs.h>
@@ -794,7 +796,6 @@ static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
794int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 796int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
795 union ib_gid *gid) 797 union ib_gid *gid)
796{ 798{
797 u8 mac[6];
798 struct net_device *ndev; 799 struct net_device *ndev;
799 int ret = 0; 800 int ret = 0;
800 801
@@ -808,11 +809,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
808 spin_unlock(&mdev->iboe.lock); 809 spin_unlock(&mdev->iboe.lock);
809 810
810 if (ndev) { 811 if (ndev) {
811 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
812 rtnl_lock();
813 dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
814 ret = 1; 812 ret = 1;
815 rtnl_unlock();
816 dev_put(ndev); 813 dev_put(ndev);
817 } 814 }
818 815
@@ -1164,6 +1161,8 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1164 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1161 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1165 u64 reg_id; 1162 u64 reg_id;
1166 struct mlx4_ib_steering *ib_steering = NULL; 1163 struct mlx4_ib_steering *ib_steering = NULL;
1164 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1165 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1167 1166
1168 if (mdev->dev->caps.steering_mode == 1167 if (mdev->dev->caps.steering_mode ==
1169 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1168 MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1175,7 +1174,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1175 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, 1174 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1176 !!(mqp->flags & 1175 !!(mqp->flags &
1177 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1176 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1178 MLX4_PROT_IB_IPV6, &reg_id); 1177 prot, &reg_id);
1179 if (err) 1178 if (err)
1180 goto err_malloc; 1179 goto err_malloc;
1181 1180
@@ -1194,7 +1193,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1194 1193
1195err_add: 1194err_add:
1196 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1195 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1197 MLX4_PROT_IB_IPV6, reg_id); 1196 prot, reg_id);
1198err_malloc: 1197err_malloc:
1199 kfree(ib_steering); 1198 kfree(ib_steering);
1200 1199
@@ -1222,10 +1221,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1222 int err; 1221 int err;
1223 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1222 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1224 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1223 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1225 u8 mac[6];
1226 struct net_device *ndev; 1224 struct net_device *ndev;
1227 struct mlx4_ib_gid_entry *ge; 1225 struct mlx4_ib_gid_entry *ge;
1228 u64 reg_id = 0; 1226 u64 reg_id = 0;
1227 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1228 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1229 1229
1230 if (mdev->dev->caps.steering_mode == 1230 if (mdev->dev->caps.steering_mode ==
1231 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1231 MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1248,7 +1248,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1248 } 1248 }
1249 1249
1250 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1250 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1251 MLX4_PROT_IB_IPV6, reg_id); 1251 prot, reg_id);
1252 if (err) 1252 if (err)
1253 return err; 1253 return err;
1254 1254
@@ -1260,13 +1260,8 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1260 if (ndev) 1260 if (ndev)
1261 dev_hold(ndev); 1261 dev_hold(ndev);
1262 spin_unlock(&mdev->iboe.lock); 1262 spin_unlock(&mdev->iboe.lock);
1263 rdma_get_mcast_mac((struct in6_addr *)gid, mac); 1263 if (ndev)
1264 if (ndev) {
1265 rtnl_lock();
1266 dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
1267 rtnl_unlock();
1268 dev_put(ndev); 1264 dev_put(ndev);
1269 }
1270 list_del(&ge->list); 1265 list_del(&ge->list);
1271 kfree(ge); 1266 kfree(ge);
1272 } else 1267 } else
@@ -1362,20 +1357,6 @@ static struct device_attribute *mlx4_class_attributes[] = {
1362 &dev_attr_board_id 1357 &dev_attr_board_id
1363}; 1358};
1364 1359
1365static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
1366{
1367 memcpy(eui, dev->dev_addr, 3);
1368 memcpy(eui + 5, dev->dev_addr + 3, 3);
1369 if (vlan_id < 0x1000) {
1370 eui[3] = vlan_id >> 8;
1371 eui[4] = vlan_id & 0xff;
1372 } else {
1373 eui[3] = 0xff;
1374 eui[4] = 0xfe;
1375 }
1376 eui[0] ^= 2;
1377}
1378
1379static void update_gids_task(struct work_struct *work) 1360static void update_gids_task(struct work_struct *work)
1380{ 1361{
1381 struct update_gid_work *gw = container_of(work, struct update_gid_work, work); 1362 struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
@@ -1398,161 +1379,318 @@ static void update_gids_task(struct work_struct *work)
1398 MLX4_CMD_WRAPPED); 1379 MLX4_CMD_WRAPPED);
1399 if (err) 1380 if (err)
1400 pr_warn("set port command failed\n"); 1381 pr_warn("set port command failed\n");
1401 else { 1382 else
1402 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
1403 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); 1383 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
1404 }
1405 1384
1406 mlx4_free_cmd_mailbox(dev, mailbox); 1385 mlx4_free_cmd_mailbox(dev, mailbox);
1407 kfree(gw); 1386 kfree(gw);
1408} 1387}
1409 1388
1410static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear) 1389static void reset_gids_task(struct work_struct *work)
1411{ 1390{
1412 struct net_device *ndev = dev->iboe.netdevs[port - 1]; 1391 struct update_gid_work *gw =
1413 struct update_gid_work *work; 1392 container_of(work, struct update_gid_work, work);
1414 struct net_device *tmp; 1393 struct mlx4_cmd_mailbox *mailbox;
1394 union ib_gid *gids;
1395 int err;
1415 int i; 1396 int i;
1416 u8 *hits; 1397 struct mlx4_dev *dev = gw->dev->dev;
1417 int ret;
1418 union ib_gid gid;
1419 int free;
1420 int found;
1421 int need_update = 0;
1422 u16 vid;
1423 1398
1424 work = kzalloc(sizeof *work, GFP_ATOMIC); 1399 mailbox = mlx4_alloc_cmd_mailbox(dev);
1425 if (!work) 1400 if (IS_ERR(mailbox)) {
1426 return -ENOMEM; 1401 pr_warn("reset gid table failed\n");
1402 goto free;
1403 }
1427 1404
1428 hits = kzalloc(128, GFP_ATOMIC); 1405 gids = mailbox->buf;
1429 if (!hits) { 1406 memcpy(gids, gw->gids, sizeof(gw->gids));
1430 ret = -ENOMEM; 1407
1431 goto out; 1408 for (i = 1; i < gw->dev->num_ports + 1; i++) {
1409 if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) ==
1410 IB_LINK_LAYER_ETHERNET) {
1411 err = mlx4_cmd(dev, mailbox->dma,
1412 MLX4_SET_PORT_GID_TABLE << 8 | i,
1413 1, MLX4_CMD_SET_PORT,
1414 MLX4_CMD_TIME_CLASS_B,
1415 MLX4_CMD_WRAPPED);
1416 if (err)
1417 pr_warn(KERN_WARNING
1418 "set port %d command failed\n", i);
1419 }
1432 } 1420 }
1433 1421
1434 rcu_read_lock(); 1422 mlx4_free_cmd_mailbox(dev, mailbox);
1435 for_each_netdev_rcu(&init_net, tmp) { 1423free:
1436 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) { 1424 kfree(gw);
1437 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 1425}
1438 vid = rdma_vlan_dev_vlan_id(tmp);
1439 mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
1440 found = 0;
1441 free = -1;
1442 for (i = 0; i < 128; ++i) {
1443 if (free < 0 &&
1444 !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1445 free = i;
1446 if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
1447 hits[i] = 1;
1448 found = 1;
1449 break;
1450 }
1451 }
1452 1426
1453 if (!found) { 1427static int update_gid_table(struct mlx4_ib_dev *dev, int port,
1454 if (tmp == ndev && 1428 union ib_gid *gid, int clear)
1455 (memcmp(&dev->iboe.gid_table[port - 1][0], 1429{
1456 &gid, sizeof gid) || 1430 struct update_gid_work *work;
1457 !memcmp(&dev->iboe.gid_table[port - 1][0], 1431 int i;
1458 &zgid, sizeof gid))) { 1432 int need_update = 0;
1459 dev->iboe.gid_table[port - 1][0] = gid; 1433 int free = -1;
1460 ++need_update; 1434 int found = -1;
1461 hits[0] = 1; 1435 int max_gids;
1462 } else if (free >= 0) { 1436
1463 dev->iboe.gid_table[port - 1][free] = gid; 1437 max_gids = dev->dev->caps.gid_table_len[port];
1464 hits[free] = 1; 1438 for (i = 0; i < max_gids; ++i) {
1465 ++need_update; 1439 if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
1466 } 1440 sizeof(*gid)))
1441 found = i;
1442
1443 if (clear) {
1444 if (found >= 0) {
1445 need_update = 1;
1446 dev->iboe.gid_table[port - 1][found] = zgid;
1447 break;
1467 } 1448 }
1449 } else {
1450 if (found >= 0)
1451 break;
1452
1453 if (free < 0 &&
1454 !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid,
1455 sizeof(*gid)))
1456 free = i;
1468 } 1457 }
1469 } 1458 }
1470 rcu_read_unlock();
1471 1459
1472 for (i = 0; i < 128; ++i) 1460 if (found == -1 && !clear && free >= 0) {
1473 if (!hits[i]) { 1461 dev->iboe.gid_table[port - 1][free] = *gid;
1474 if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) 1462 need_update = 1;
1475 ++need_update; 1463 }
1476 dev->iboe.gid_table[port - 1][i] = zgid;
1477 }
1478 1464
1479 if (need_update) { 1465 if (!need_update)
1480 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids); 1466 return 0;
1481 INIT_WORK(&work->work, update_gids_task); 1467
1482 work->port = port; 1468 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1483 work->dev = dev; 1469 if (!work)
1484 queue_work(wq, &work->work); 1470 return -ENOMEM;
1485 } else 1471
1486 kfree(work); 1472 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
1473 INIT_WORK(&work->work, update_gids_task);
1474 work->port = port;
1475 work->dev = dev;
1476 queue_work(wq, &work->work);
1487 1477
1488 kfree(hits);
1489 return 0; 1478 return 0;
1479}
1490 1480
1491out: 1481static int reset_gid_table(struct mlx4_ib_dev *dev)
1492 kfree(work); 1482{
1493 return ret; 1483 struct update_gid_work *work;
1484
1485
1486 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1487 if (!work)
1488 return -ENOMEM;
1489 memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table));
1490 memset(work->gids, 0, sizeof(work->gids));
1491 INIT_WORK(&work->work, reset_gids_task);
1492 work->dev = dev;
1493 queue_work(wq, &work->work);
1494 return 0;
1494} 1495}
1495 1496
1496static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event) 1497static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1498 struct mlx4_ib_dev *ibdev, union ib_gid *gid)
1497{ 1499{
1498 switch (event) { 1500 struct mlx4_ib_iboe *iboe;
1499 case NETDEV_UP: 1501 int port = 0;
1500 case NETDEV_CHANGEADDR: 1502 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
1501 update_ipv6_gids(dev, port, 0); 1503 rdma_vlan_dev_real_dev(event_netdev) :
1502 break; 1504 event_netdev;
1505
1506 if (event != NETDEV_DOWN && event != NETDEV_UP)
1507 return 0;
1508
1509 if ((real_dev != event_netdev) &&
1510 (event == NETDEV_DOWN) &&
1511 rdma_link_local_addr((struct in6_addr *)gid))
1512 return 0;
1513
1514 iboe = &ibdev->iboe;
1515 spin_lock(&iboe->lock);
1516
1517 for (port = 1; port <= MLX4_MAX_PORTS; ++port)
1518 if ((netif_is_bond_master(real_dev) &&
1519 (real_dev == iboe->masters[port - 1])) ||
1520 (!netif_is_bond_master(real_dev) &&
1521 (real_dev == iboe->netdevs[port - 1])))
1522 update_gid_table(ibdev, port, gid,
1523 event == NETDEV_DOWN);
1524
1525 spin_unlock(&iboe->lock);
1526 return 0;
1503 1527
1504 case NETDEV_DOWN:
1505 update_ipv6_gids(dev, port, 1);
1506 dev->iboe.netdevs[port - 1] = NULL;
1507 }
1508} 1528}
1509 1529
1510static void netdev_added(struct mlx4_ib_dev *dev, int port) 1530static u8 mlx4_ib_get_dev_port(struct net_device *dev,
1531 struct mlx4_ib_dev *ibdev)
1511{ 1532{
1512 update_ipv6_gids(dev, port, 0); 1533 u8 port = 0;
1534 struct mlx4_ib_iboe *iboe;
1535 struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
1536 rdma_vlan_dev_real_dev(dev) : dev;
1537
1538 iboe = &ibdev->iboe;
1539 spin_lock(&iboe->lock);
1540
1541 for (port = 1; port <= MLX4_MAX_PORTS; ++port)
1542 if ((netif_is_bond_master(real_dev) &&
1543 (real_dev == iboe->masters[port - 1])) ||
1544 (!netif_is_bond_master(real_dev) &&
1545 (real_dev == iboe->netdevs[port - 1])))
1546 break;
1547
1548 spin_unlock(&iboe->lock);
1549
1550 if ((port == 0) || (port > MLX4_MAX_PORTS))
1551 return 0;
1552 else
1553 return port;
1513} 1554}
1514 1555
1515static void netdev_removed(struct mlx4_ib_dev *dev, int port) 1556static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
1557 void *ptr)
1516{ 1558{
1517 update_ipv6_gids(dev, port, 1); 1559 struct mlx4_ib_dev *ibdev;
1560 struct in_ifaddr *ifa = ptr;
1561 union ib_gid gid;
1562 struct net_device *event_netdev = ifa->ifa_dev->dev;
1563
1564 ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
1565
1566 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
1567
1568 mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
1569 return NOTIFY_DONE;
1518} 1570}
1519 1571
1520static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event, 1572#if IS_ENABLED(CONFIG_IPV6)
1573static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
1521 void *ptr) 1574 void *ptr)
1522{ 1575{
1523 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1524 struct mlx4_ib_dev *ibdev; 1576 struct mlx4_ib_dev *ibdev;
1525 struct net_device *oldnd; 1577 struct inet6_ifaddr *ifa = ptr;
1578 union ib_gid *gid = (union ib_gid *)&ifa->addr;
1579 struct net_device *event_netdev = ifa->idev->dev;
1580
1581 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
1582
1583 mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
1584 return NOTIFY_DONE;
1585}
1586#endif
1587
1588static void mlx4_ib_get_dev_addr(struct net_device *dev,
1589 struct mlx4_ib_dev *ibdev, u8 port)
1590{
1591 struct in_device *in_dev;
1592#if IS_ENABLED(CONFIG_IPV6)
1593 struct inet6_dev *in6_dev;
1594 union ib_gid *pgid;
1595 struct inet6_ifaddr *ifp;
1596#endif
1597 union ib_gid gid;
1598
1599
1600 if ((port == 0) || (port > MLX4_MAX_PORTS))
1601 return;
1602
1603 /* IPv4 gids */
1604 in_dev = in_dev_get(dev);
1605 if (in_dev) {
1606 for_ifa(in_dev) {
1607 /*ifa->ifa_address;*/
1608 ipv6_addr_set_v4mapped(ifa->ifa_address,
1609 (struct in6_addr *)&gid);
1610 update_gid_table(ibdev, port, &gid, 0);
1611 }
1612 endfor_ifa(in_dev);
1613 in_dev_put(in_dev);
1614 }
1615#if IS_ENABLED(CONFIG_IPV6)
1616 /* IPv6 gids */
1617 in6_dev = in6_dev_get(dev);
1618 if (in6_dev) {
1619 read_lock_bh(&in6_dev->lock);
1620 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
1621 pgid = (union ib_gid *)&ifp->addr;
1622 update_gid_table(ibdev, port, pgid, 0);
1623 }
1624 read_unlock_bh(&in6_dev->lock);
1625 in6_dev_put(in6_dev);
1626 }
1627#endif
1628}
1629
1630static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1631{
1632 struct net_device *dev;
1633
1634 if (reset_gid_table(ibdev))
1635 return -1;
1636
1637 read_lock(&dev_base_lock);
1638
1639 for_each_netdev(&init_net, dev) {
1640 u8 port = mlx4_ib_get_dev_port(dev, ibdev);
1641 if (port)
1642 mlx4_ib_get_dev_addr(dev, ibdev, port);
1643 }
1644
1645 read_unlock(&dev_base_lock);
1646
1647 return 0;
1648}
1649
1650static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
1651{
1526 struct mlx4_ib_iboe *iboe; 1652 struct mlx4_ib_iboe *iboe;
1527 int port; 1653 int port;
1528 1654
1529 if (!net_eq(dev_net(dev), &init_net))
1530 return NOTIFY_DONE;
1531
1532 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1533 iboe = &ibdev->iboe; 1655 iboe = &ibdev->iboe;
1534 1656
1535 spin_lock(&iboe->lock); 1657 spin_lock(&iboe->lock);
1536 mlx4_foreach_ib_transport_port(port, ibdev->dev) { 1658 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1537 oldnd = iboe->netdevs[port - 1]; 1659 struct net_device *old_master = iboe->masters[port - 1];
1660 struct net_device *curr_master;
1538 iboe->netdevs[port - 1] = 1661 iboe->netdevs[port - 1] =
1539 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); 1662 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1540 if (oldnd != iboe->netdevs[port - 1]) { 1663
1541 if (iboe->netdevs[port - 1]) 1664 if (iboe->netdevs[port - 1] &&
1542 netdev_added(ibdev, port); 1665 netif_is_bond_slave(iboe->netdevs[port - 1])) {
1543 else 1666 rtnl_lock();
1544 netdev_removed(ibdev, port); 1667 iboe->masters[port - 1] = netdev_master_upper_dev_get(
1668 iboe->netdevs[port - 1]);
1669 rtnl_unlock();
1545 } 1670 }
1546 } 1671 curr_master = iboe->masters[port - 1];
1547 1672
1548 if (dev == iboe->netdevs[0] || 1673 /* if bonding is used it is possible that we add it to masters
1549 (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0])) 1674 only after IP address is assigned to the net bonding
1550 handle_en_event(ibdev, 1, event); 1675 interface */
1551 else if (dev == iboe->netdevs[1] 1676 if (curr_master && (old_master != curr_master))
1552 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1])) 1677 mlx4_ib_get_dev_addr(curr_master, ibdev, port);
1553 handle_en_event(ibdev, 2, event); 1678 }
1554 1679
1555 spin_unlock(&iboe->lock); 1680 spin_unlock(&iboe->lock);
1681}
1682
1683static int mlx4_ib_netdev_event(struct notifier_block *this,
1684 unsigned long event, void *ptr)
1685{
1686 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1687 struct mlx4_ib_dev *ibdev;
1688
1689 if (!net_eq(dev_net(dev), &init_net))
1690 return NOTIFY_DONE;
1691
1692 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1693 mlx4_ib_scan_netdevs(ibdev);
1556 1694
1557 return NOTIFY_DONE; 1695 return NOTIFY_DONE;
1558} 1696}
@@ -1886,11 +2024,35 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1886 if (mlx4_ib_init_sriov(ibdev)) 2024 if (mlx4_ib_init_sriov(ibdev))
1887 goto err_mad; 2025 goto err_mad;
1888 2026
1889 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) { 2027 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
1890 iboe->nb.notifier_call = mlx4_ib_netdev_event; 2028 if (!iboe->nb.notifier_call) {
1891 err = register_netdevice_notifier(&iboe->nb); 2029 iboe->nb.notifier_call = mlx4_ib_netdev_event;
1892 if (err) 2030 err = register_netdevice_notifier(&iboe->nb);
1893 goto err_sriov; 2031 if (err) {
2032 iboe->nb.notifier_call = NULL;
2033 goto err_notif;
2034 }
2035 }
2036 if (!iboe->nb_inet.notifier_call) {
2037 iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
2038 err = register_inetaddr_notifier(&iboe->nb_inet);
2039 if (err) {
2040 iboe->nb_inet.notifier_call = NULL;
2041 goto err_notif;
2042 }
2043 }
2044#if IS_ENABLED(CONFIG_IPV6)
2045 if (!iboe->nb_inet6.notifier_call) {
2046 iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
2047 err = register_inet6addr_notifier(&iboe->nb_inet6);
2048 if (err) {
2049 iboe->nb_inet6.notifier_call = NULL;
2050 goto err_notif;
2051 }
2052 }
2053#endif
2054 mlx4_ib_scan_netdevs(ibdev);
2055 mlx4_ib_init_gid_table(ibdev);
1894 } 2056 }
1895 2057
1896 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { 2058 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -1916,11 +2078,25 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1916 return ibdev; 2078 return ibdev;
1917 2079
1918err_notif: 2080err_notif:
1919 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 2081 if (ibdev->iboe.nb.notifier_call) {
1920 pr_warn("failure unregistering notifier\n"); 2082 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2083 pr_warn("failure unregistering notifier\n");
2084 ibdev->iboe.nb.notifier_call = NULL;
2085 }
2086 if (ibdev->iboe.nb_inet.notifier_call) {
2087 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2088 pr_warn("failure unregistering notifier\n");
2089 ibdev->iboe.nb_inet.notifier_call = NULL;
2090 }
2091#if IS_ENABLED(CONFIG_IPV6)
2092 if (ibdev->iboe.nb_inet6.notifier_call) {
2093 if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2094 pr_warn("failure unregistering notifier\n");
2095 ibdev->iboe.nb_inet6.notifier_call = NULL;
2096 }
2097#endif
1921 flush_workqueue(wq); 2098 flush_workqueue(wq);
1922 2099
1923err_sriov:
1924 mlx4_ib_close_sriov(ibdev); 2100 mlx4_ib_close_sriov(ibdev);
1925 2101
1926err_mad: 2102err_mad:
@@ -2039,6 +2215,19 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2039 kfree(ibdev->ib_uc_qpns_bitmap); 2215 kfree(ibdev->ib_uc_qpns_bitmap);
2040 } 2216 }
2041 2217
2218 if (ibdev->iboe.nb_inet.notifier_call) {
2219 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2220 pr_warn("failure unregistering notifier\n");
2221 ibdev->iboe.nb_inet.notifier_call = NULL;
2222 }
2223#if IS_ENABLED(CONFIG_IPV6)
2224 if (ibdev->iboe.nb_inet6.notifier_call) {
2225 if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2226 pr_warn("failure unregistering notifier\n");
2227 ibdev->iboe.nb_inet6.notifier_call = NULL;
2228 }
2229#endif
2230
2042 iounmap(ibdev->uar_map); 2231 iounmap(ibdev->uar_map);
2043 for (p = 0; p < ibdev->num_ports; ++p) 2232 for (p = 0; p < ibdev->num_ports; ++p)
2044 if (ibdev->counters[p] != -1) 2233 if (ibdev->counters[p] != -1)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 837f9aa3d2a2..a230683af940 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -432,7 +432,10 @@ struct mlx4_ib_sriov {
432struct mlx4_ib_iboe { 432struct mlx4_ib_iboe {
433 spinlock_t lock; 433 spinlock_t lock;
434 struct net_device *netdevs[MLX4_MAX_PORTS]; 434 struct net_device *netdevs[MLX4_MAX_PORTS];
435 struct net_device *masters[MLX4_MAX_PORTS];
435 struct notifier_block nb; 436 struct notifier_block nb;
437 struct notifier_block nb_inet;
438 struct notifier_block nb_inet6;
436 union ib_gid gid_table[MLX4_MAX_PORTS][128]; 439 union ib_gid gid_table[MLX4_MAX_PORTS][128];
437}; 440};
438 441
@@ -683,9 +686,6 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
683int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 686int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
684 union ib_gid *gid, int netw_view); 687 union ib_gid *gid, int netw_view);
685 688
686int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
687 u8 *mac, int *is_mcast, u8 port);
688
689static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) 689static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
690{ 690{
691 u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; 691 u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 387fbf274151..d8f4d1fe8494 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -90,6 +90,21 @@ enum {
90 MLX4_RAW_QP_MSGMAX = 31, 90 MLX4_RAW_QP_MSGMAX = 31,
91}; 91};
92 92
93#ifndef ETH_ALEN
94#define ETH_ALEN 6
95#endif
96static inline u64 mlx4_mac_to_u64(u8 *addr)
97{
98 u64 mac = 0;
99 int i;
100
101 for (i = 0; i < ETH_ALEN; i++) {
102 mac <<= 8;
103 mac |= addr[i];
104 }
105 return mac;
106}
107
93static const __be32 mlx4_ib_opcode[] = { 108static const __be32 mlx4_ib_opcode[] = {
94 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 109 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
95 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), 110 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
@@ -1171,16 +1186,15 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
1171 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); 1186 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
1172} 1187}
1173 1188
1174static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, 1189static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1175 struct mlx4_qp_path *path, u8 port) 1190 u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
1191 u8 port)
1176{ 1192{
1177 int err;
1178 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == 1193 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
1179 IB_LINK_LAYER_ETHERNET; 1194 IB_LINK_LAYER_ETHERNET;
1180 u8 mac[6];
1181 int is_mcast;
1182 u16 vlan_tag;
1183 int vidx; 1195 int vidx;
1196 int smac_index;
1197
1184 1198
1185 path->grh_mylmc = ah->src_path_bits & 0x7f; 1199 path->grh_mylmc = ah->src_path_bits & 0x7f;
1186 path->rlid = cpu_to_be16(ah->dlid); 1200 path->rlid = cpu_to_be16(ah->dlid);
@@ -1215,22 +1229,27 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1215 if (!(ah->ah_flags & IB_AH_GRH)) 1229 if (!(ah->ah_flags & IB_AH_GRH))
1216 return -1; 1230 return -1;
1217 1231
1218 err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); 1232 memcpy(path->dmac, ah->dmac, ETH_ALEN);
1219 if (err)
1220 return err;
1221
1222 memcpy(path->dmac, mac, 6);
1223 path->ackto = MLX4_IB_LINK_TYPE_ETH; 1233 path->ackto = MLX4_IB_LINK_TYPE_ETH;
1224 /* use index 0 into MAC table for IBoE */ 1234 /* find the index into MAC table for IBoE */
1225 path->grh_mylmc &= 0x80; 1235 if (!is_zero_ether_addr((const u8 *)&smac)) {
1236 if (mlx4_find_cached_mac(dev->dev, port, smac,
1237 &smac_index))
1238 return -ENOENT;
1239 } else {
1240 smac_index = 0;
1241 }
1226 1242
1227 vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); 1243 path->grh_mylmc &= 0x80 | smac_index;
1244
1245 path->feup |= MLX4_FEUP_FORCE_ETH_UP;
1228 if (vlan_tag < 0x1000) { 1246 if (vlan_tag < 0x1000) {
1229 if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx)) 1247 if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
1230 return -ENOENT; 1248 return -ENOENT;
1231 1249
1232 path->vlan_index = vidx; 1250 path->vlan_index = vidx;
1233 path->fl = 1 << 6; 1251 path->fl = 1 << 6;
1252 path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
1234 } 1253 }
1235 } else 1254 } else
1236 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | 1255 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
@@ -1239,6 +1258,28 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1239 return 0; 1258 return 0;
1240} 1259}
1241 1260
1261static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
1262 enum ib_qp_attr_mask qp_attr_mask,
1263 struct mlx4_qp_path *path, u8 port)
1264{
1265 return _mlx4_set_path(dev, &qp->ah_attr,
1266 mlx4_mac_to_u64((u8 *)qp->smac),
1267 (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
1268 path, port);
1269}
1270
1271static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
1272 const struct ib_qp_attr *qp,
1273 enum ib_qp_attr_mask qp_attr_mask,
1274 struct mlx4_qp_path *path, u8 port)
1275{
1276 return _mlx4_set_path(dev, &qp->alt_ah_attr,
1277 mlx4_mac_to_u64((u8 *)qp->alt_smac),
1278 (qp_attr_mask & IB_QP_ALT_VID) ?
1279 qp->alt_vlan_id : 0xffff,
1280 path, port);
1281}
1282
1242static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 1283static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1243{ 1284{
1244 struct mlx4_ib_gid_entry *ge, *tmp; 1285 struct mlx4_ib_gid_entry *ge, *tmp;
@@ -1362,7 +1403,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1362 } 1403 }
1363 1404
1364 if (attr_mask & IB_QP_AV) { 1405 if (attr_mask & IB_QP_AV) {
1365 if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path, 1406 if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path,
1366 attr_mask & IB_QP_PORT ? 1407 attr_mask & IB_QP_PORT ?
1367 attr->port_num : qp->port)) 1408 attr->port_num : qp->port))
1368 goto out; 1409 goto out;
@@ -1385,8 +1426,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1385 dev->dev->caps.pkey_table_len[attr->alt_port_num]) 1426 dev->dev->caps.pkey_table_len[attr->alt_port_num])
1386 goto out; 1427 goto out;
1387 1428
1388 if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, 1429 if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path,
1389 attr->alt_port_num)) 1430 attr->alt_port_num))
1390 goto out; 1431 goto out;
1391 1432
1392 context->alt_path.pkey_index = attr->alt_pkey_index; 1433 context->alt_path.pkey_index = attr->alt_pkey_index;
@@ -1497,6 +1538,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1497 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | 1538 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
1498 MLX4_IB_LINK_TYPE_ETH; 1539 MLX4_IB_LINK_TYPE_ETH;
1499 1540
1541 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
1542 int is_eth = rdma_port_get_link_layer(
1543 &dev->ib_dev, qp->port) ==
1544 IB_LINK_LAYER_ETHERNET;
1545 if (is_eth) {
1546 context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
1547 optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
1548 }
1549 }
1550
1551
1500 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 1552 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1501 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) 1553 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1502 sqd_event = 1; 1554 sqd_event = 1;
@@ -1599,13 +1651,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1599 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1651 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1600 enum ib_qp_state cur_state, new_state; 1652 enum ib_qp_state cur_state, new_state;
1601 int err = -EINVAL; 1653 int err = -EINVAL;
1602 1654 int ll;
1603 mutex_lock(&qp->mutex); 1655 mutex_lock(&qp->mutex);
1604 1656
1605 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 1657 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1606 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1658 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1607 1659
1608 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { 1660 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1661 ll = IB_LINK_LAYER_UNSPECIFIED;
1662 } else {
1663 int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1664 ll = rdma_port_get_link_layer(&dev->ib_dev, port);
1665 }
1666
1667 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1668 attr_mask, ll)) {
1609 pr_debug("qpn 0x%x: invalid attribute mask specified " 1669 pr_debug("qpn 0x%x: invalid attribute mask specified "
1610 "for transition %d to %d. qp_type %d," 1670 "for transition %d to %d. qp_type %d,"
1611 " attr_mask 0x%x\n", 1671 " attr_mask 0x%x\n",
@@ -1822,8 +1882,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1822 return err; 1882 return err;
1823 } 1883 }
1824 1884
1825 vlan = rdma_get_vlan_id(&sgid); 1885 if (ah->av.eth.vlan != 0xffff) {
1826 is_vlan = vlan < 0x1000; 1886 vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
1887 is_vlan = 1;
1888 }
1827 } 1889 }
1828 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); 1890 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
1829 1891
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 70dd77085db6..ae37fb9bf262 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1664,7 +1664,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1664 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1664 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1665 1665
1666 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR && 1666 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1667 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) 1667 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1668 IB_LINK_LAYER_UNSPECIFIED))
1668 goto out; 1669 goto out;
1669 1670
1670 if ((attr_mask & IB_QP_PORT) && 1671 if ((attr_mask & IB_QP_PORT) &&
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 26a684536109..e354b2f04ad9 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -860,7 +860,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
860 860
861 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 861 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
862 862
863 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { 863 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
864 IB_LINK_LAYER_UNSPECIFIED)) {
864 mthca_dbg(dev, "Bad QP transition (transport %d) " 865 mthca_dbg(dev, "Bad QP transition (transport %d) "
865 "%d->%d with attr 0x%08x\n", 866 "%d->%d with attr 0x%08x\n",
866 qp->transport, cur_state, new_state, 867 qp->transport, cur_state, new_state,
diff --git a/drivers/infiniband/hw/ocrdma/Kconfig b/drivers/infiniband/hw/ocrdma/Kconfig
index b5b6056c8518..c0cddc0192d1 100644
--- a/drivers/infiniband/hw/ocrdma/Kconfig
+++ b/drivers/infiniband/hw/ocrdma/Kconfig
@@ -1,6 +1,6 @@
1config INFINIBAND_OCRDMA 1config INFINIBAND_OCRDMA
2 tristate "Emulex One Connect HCA support" 2 tristate "Emulex One Connect HCA support"
3 depends on ETHERNET && NETDEVICES && PCI && (IPV6 || IPV6=n) 3 depends on ETHERNET && NETDEVICES && PCI && INET && (IPV6 || IPV6=n)
4 select NET_VENDOR_EMULEX 4 select NET_VENDOR_EMULEX
5 select BE2NET 5 select BE2NET
6 ---help--- 6 ---help---
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 294dd27b601e..7c001b97b23f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -423,5 +423,17 @@ static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
423 OCRDMA_CQE_WRITE_IMM) ? 1 : 0; 423 OCRDMA_CQE_WRITE_IMM) ? 1 : 0;
424} 424}
425 425
426static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
427 struct ib_ah_attr *ah_attr, u8 *mac_addr)
428{
429 struct in6_addr in6;
430
431 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
432 if (rdma_is_multicast_addr(&in6))
433 rdma_get_mcast_mac(&in6, mac_addr);
434 else
435 memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
436 return 0;
437}
426 438
427#endif 439#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 69da5dd1ca8a..34071143006e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -49,7 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
49 49
50 ah->sgid_index = attr->grh.sgid_index; 50 ah->sgid_index = attr->grh.sgid_index;
51 51
52 vlan_tag = rdma_get_vlan_id(&attr->grh.dgid); 52 vlan_tag = attr->vlan_id;
53 if (!vlan_tag || (vlan_tag > 0xFFF)) 53 if (!vlan_tag || (vlan_tag > 0xFFF))
54 vlan_tag = dev->pvid; 54 vlan_tag = dev->pvid;
55 if (vlan_tag && (vlan_tag < 0x1000)) { 55 if (vlan_tag && (vlan_tag < 0x1000)) {
@@ -64,7 +64,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
64 eth_sz = sizeof(struct ocrdma_eth_basic); 64 eth_sz = sizeof(struct ocrdma_eth_basic);
65 } 65 }
66 memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); 66 memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
67 status = ocrdma_resolve_dgid(dev, &attr->grh.dgid, &eth.dmac[0]); 67 memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
68 status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
68 if (status) 69 if (status)
69 return status; 70 return status;
70 status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, 71 status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 56bf32fcb62c..1664d648cbfc 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -2076,23 +2076,6 @@ mbx_err:
2076 return status; 2076 return status;
2077} 2077}
2078 2078
2079int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
2080 u8 *mac_addr)
2081{
2082 struct in6_addr in6;
2083
2084 memcpy(&in6, dgid, sizeof in6);
2085 if (rdma_is_multicast_addr(&in6)) {
2086 rdma_get_mcast_mac(&in6, mac_addr);
2087 } else if (rdma_link_local_addr(&in6)) {
2088 rdma_get_ll_mac(&in6, mac_addr);
2089 } else {
2090 pr_err("%s() fail to resolve mac_addr.\n", __func__);
2091 return -EINVAL;
2092 }
2093 return 0;
2094}
2095
2096static int ocrdma_set_av_params(struct ocrdma_qp *qp, 2079static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2097 struct ocrdma_modify_qp *cmd, 2080 struct ocrdma_modify_qp *cmd,
2098 struct ib_qp_attr *attrs) 2081 struct ib_qp_attr *attrs)
@@ -2126,14 +2109,14 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2126 2109
2127 qp->sgid_idx = ah_attr->grh.sgid_index; 2110 qp->sgid_idx = ah_attr->grh.sgid_index;
2128 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); 2111 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2129 ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]); 2112 ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
2130 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | 2113 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2131 (mac_addr[2] << 16) | (mac_addr[3] << 24); 2114 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2132 /* convert them to LE format. */ 2115 /* convert them to LE format. */
2133 ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid)); 2116 ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
2134 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); 2117 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2135 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); 2118 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2136 vlan_id = rdma_get_vlan_id(&sgid); 2119 vlan_id = ah_attr->vlan_id;
2137 if (vlan_id && (vlan_id < 0x1000)) { 2120 if (vlan_id && (vlan_id < 0x1000)) {
2138 cmd->params.vlan_dmac_b4_to_b5 |= 2121 cmd->params.vlan_dmac_b4_to_b5 |=
2139 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2122 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index f2a89d4cc7c4..82fe332ae6c6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -94,7 +94,6 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
94int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed); 94int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
95int ocrdma_query_config(struct ocrdma_dev *, 95int ocrdma_query_config(struct ocrdma_dev *,
96 struct ocrdma_mbx_query_config *config); 96 struct ocrdma_mbx_query_config *config);
97int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr);
98 97
99int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); 98int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
100int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); 99int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 91443bcb9e0e..2ca86ca818bd 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -67,46 +67,24 @@ void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
67 guid[7] = mac_addr[5]; 67 guid[7] = mac_addr[5];
68} 68}
69 69
70static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr, 70static bool ocrdma_add_sgid(struct ocrdma_dev *dev, union ib_gid *new_sgid)
71 bool is_vlan, u16 vlan_id)
72{
73 sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
74 sgid->raw[8] = mac_addr[0] ^ 2;
75 sgid->raw[9] = mac_addr[1];
76 sgid->raw[10] = mac_addr[2];
77 if (is_vlan) {
78 sgid->raw[11] = vlan_id >> 8;
79 sgid->raw[12] = vlan_id & 0xff;
80 } else {
81 sgid->raw[11] = 0xff;
82 sgid->raw[12] = 0xfe;
83 }
84 sgid->raw[13] = mac_addr[3];
85 sgid->raw[14] = mac_addr[4];
86 sgid->raw[15] = mac_addr[5];
87}
88
89static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
90 bool is_vlan, u16 vlan_id)
91{ 71{
92 int i; 72 int i;
93 union ib_gid new_sgid;
94 unsigned long flags; 73 unsigned long flags;
95 74
96 memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); 75 memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
97 76
98 ocrdma_build_sgid_mac(&new_sgid, mac_addr, is_vlan, vlan_id);
99 77
100 spin_lock_irqsave(&dev->sgid_lock, flags); 78 spin_lock_irqsave(&dev->sgid_lock, flags);
101 for (i = 0; i < OCRDMA_MAX_SGID; i++) { 79 for (i = 0; i < OCRDMA_MAX_SGID; i++) {
102 if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, 80 if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
103 sizeof(union ib_gid))) { 81 sizeof(union ib_gid))) {
104 /* found free entry */ 82 /* found free entry */
105 memcpy(&dev->sgid_tbl[i], &new_sgid, 83 memcpy(&dev->sgid_tbl[i], new_sgid,
106 sizeof(union ib_gid)); 84 sizeof(union ib_gid));
107 spin_unlock_irqrestore(&dev->sgid_lock, flags); 85 spin_unlock_irqrestore(&dev->sgid_lock, flags);
108 return true; 86 return true;
109 } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid, 87 } else if (!memcmp(&dev->sgid_tbl[i], new_sgid,
110 sizeof(union ib_gid))) { 88 sizeof(union ib_gid))) {
111 /* entry already present, no addition is required. */ 89 /* entry already present, no addition is required. */
112 spin_unlock_irqrestore(&dev->sgid_lock, flags); 90 spin_unlock_irqrestore(&dev->sgid_lock, flags);
@@ -117,20 +95,17 @@ static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
117 return false; 95 return false;
118} 96}
119 97
120static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, 98static bool ocrdma_del_sgid(struct ocrdma_dev *dev, union ib_gid *sgid)
121 bool is_vlan, u16 vlan_id)
122{ 99{
123 int found = false; 100 int found = false;
124 int i; 101 int i;
125 union ib_gid sgid;
126 unsigned long flags; 102 unsigned long flags;
127 103
128 ocrdma_build_sgid_mac(&sgid, mac_addr, is_vlan, vlan_id);
129 104
130 spin_lock_irqsave(&dev->sgid_lock, flags); 105 spin_lock_irqsave(&dev->sgid_lock, flags);
131 /* first is default sgid, which cannot be deleted. */ 106 /* first is default sgid, which cannot be deleted. */
132 for (i = 1; i < OCRDMA_MAX_SGID; i++) { 107 for (i = 1; i < OCRDMA_MAX_SGID; i++) {
133 if (!memcmp(&dev->sgid_tbl[i], &sgid, sizeof(union ib_gid))) { 108 if (!memcmp(&dev->sgid_tbl[i], sgid, sizeof(union ib_gid))) {
134 /* found matching entry */ 109 /* found matching entry */
135 memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid)); 110 memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid));
136 found = true; 111 found = true;
@@ -141,75 +116,18 @@ static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
141 return found; 116 return found;
142} 117}
143 118
144static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) 119static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
145{ 120 union ib_gid *gid)
146 /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
147 union ib_gid *sgid = &dev->sgid_tbl[0];
148
149 sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
150 ocrdma_get_guid(dev, &sgid->raw[8]);
151}
152
153#if IS_ENABLED(CONFIG_VLAN_8021Q)
154static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
155{
156 struct net_device *netdev, *tmp;
157 u16 vlan_id;
158 bool is_vlan;
159
160 netdev = dev->nic_info.netdev;
161
162 rcu_read_lock();
163 for_each_netdev_rcu(&init_net, tmp) {
164 if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
165 if (!netif_running(tmp) || !netif_oper_up(tmp))
166 continue;
167 if (netdev != tmp) {
168 vlan_id = vlan_dev_vlan_id(tmp);
169 is_vlan = true;
170 } else {
171 is_vlan = false;
172 vlan_id = 0;
173 tmp = netdev;
174 }
175 ocrdma_add_sgid(dev, tmp->dev_addr, is_vlan, vlan_id);
176 }
177 }
178 rcu_read_unlock();
179}
180#else
181static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
182{
183
184}
185#endif /* VLAN */
186
187static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
188{ 121{
189 ocrdma_add_default_sgid(dev);
190 ocrdma_add_vlan_sgids(dev);
191 return 0;
192}
193
194#if IS_ENABLED(CONFIG_IPV6)
195
196static int ocrdma_inet6addr_event(struct notifier_block *notifier,
197 unsigned long event, void *ptr)
198{
199 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
200 struct net_device *netdev = ifa->idev->dev;
201 struct ib_event gid_event; 122 struct ib_event gid_event;
202 struct ocrdma_dev *dev; 123 struct ocrdma_dev *dev;
203 bool found = false; 124 bool found = false;
204 bool updated = false; 125 bool updated = false;
205 bool is_vlan = false; 126 bool is_vlan = false;
206 u16 vid = 0;
207 127
208 is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; 128 is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
209 if (is_vlan) { 129 if (is_vlan)
210 vid = vlan_dev_vlan_id(netdev);
211 netdev = vlan_dev_real_dev(netdev); 130 netdev = vlan_dev_real_dev(netdev);
212 }
213 131
214 rcu_read_lock(); 132 rcu_read_lock();
215 list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { 133 list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
@@ -222,16 +140,14 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
222 140
223 if (!found) 141 if (!found)
224 return NOTIFY_DONE; 142 return NOTIFY_DONE;
225 if (!rdma_link_local_addr((struct in6_addr *)&ifa->addr))
226 return NOTIFY_DONE;
227 143
228 mutex_lock(&dev->dev_lock); 144 mutex_lock(&dev->dev_lock);
229 switch (event) { 145 switch (event) {
230 case NETDEV_UP: 146 case NETDEV_UP:
231 updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); 147 updated = ocrdma_add_sgid(dev, gid);
232 break; 148 break;
233 case NETDEV_DOWN: 149 case NETDEV_DOWN:
234 updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); 150 updated = ocrdma_del_sgid(dev, gid);
235 break; 151 break;
236 default: 152 default:
237 break; 153 break;
@@ -247,6 +163,32 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
247 return NOTIFY_OK; 163 return NOTIFY_OK;
248} 164}
249 165
166static int ocrdma_inetaddr_event(struct notifier_block *notifier,
167 unsigned long event, void *ptr)
168{
169 struct in_ifaddr *ifa = ptr;
170 union ib_gid gid;
171 struct net_device *netdev = ifa->ifa_dev->dev;
172
173 ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
174 return ocrdma_addr_event(event, netdev, &gid);
175}
176
177static struct notifier_block ocrdma_inetaddr_notifier = {
178 .notifier_call = ocrdma_inetaddr_event
179};
180
181#if IS_ENABLED(CONFIG_IPV6)
182
183static int ocrdma_inet6addr_event(struct notifier_block *notifier,
184 unsigned long event, void *ptr)
185{
186 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
187 union ib_gid *gid = (union ib_gid *)&ifa->addr;
188 struct net_device *netdev = ifa->idev->dev;
189 return ocrdma_addr_event(event, netdev, gid);
190}
191
250static struct notifier_block ocrdma_inet6addr_notifier = { 192static struct notifier_block ocrdma_inet6addr_notifier = {
251 .notifier_call = ocrdma_inet6addr_event 193 .notifier_call = ocrdma_inet6addr_event
252}; 194};
@@ -423,10 +365,6 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
423 if (status) 365 if (status)
424 goto alloc_err; 366 goto alloc_err;
425 367
426 status = ocrdma_build_sgid_tbl(dev);
427 if (status)
428 goto alloc_err;
429
430 status = ocrdma_register_device(dev); 368 status = ocrdma_register_device(dev);
431 if (status) 369 if (status)
432 goto alloc_err; 370 goto alloc_err;
@@ -553,6 +491,10 @@ static int __init ocrdma_init_module(void)
553{ 491{
554 int status; 492 int status;
555 493
494 status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier);
495 if (status)
496 return status;
497
556#if IS_ENABLED(CONFIG_IPV6) 498#if IS_ENABLED(CONFIG_IPV6)
557 status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); 499 status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
558 if (status) 500 if (status)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 7686dceadd29..aa92f40c9d50 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1326,7 +1326,8 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1326 new_qps = old_qps; 1326 new_qps = old_qps;
1327 spin_unlock_irqrestore(&qp->q_lock, flags); 1327 spin_unlock_irqrestore(&qp->q_lock, flags);
1328 1328
1329 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { 1329 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
1330 IB_LINK_LAYER_ETHERNET)) {
1330 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" 1331 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1331 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", 1332 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1332 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, 1333 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 3cca55b51e54..0cad0c40d742 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -585,7 +585,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
585 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 585 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
586 586
587 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 587 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
588 attr_mask)) 588 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
589 goto inval; 589 goto inval;
590 590
591 if (attr_mask & IB_QP_AV) { 591 if (attr_mask & IB_QP_AV) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 97d342fa5032..f50ef6a5ee5e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -123,6 +123,26 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
123 return err; 123 return err;
124} 124}
125 125
126int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
127{
128 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
129 struct mlx4_mac_table *table = &info->mac_table;
130 int i;
131
132 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
133 if (!table->refs[i])
134 continue;
135
136 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
137 *idx = i;
138 return 0;
139 }
140 }
141
142 return -ENOENT;
143}
144EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
145
126int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 146int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
127{ 147{
128 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 148 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 98fa492cf406..e1862997f933 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -34,6 +34,7 @@
34#define MLX4_CQ_H 34#define MLX4_CQ_H
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <uapi/linux/if_ether.h>
37 38
38#include <linux/mlx4/device.h> 39#include <linux/mlx4/device.h>
39#include <linux/mlx4/doorbell.h> 40#include <linux/mlx4/doorbell.h>
@@ -43,10 +44,15 @@ struct mlx4_cqe {
43 __be32 immed_rss_invalid; 44 __be32 immed_rss_invalid;
44 __be32 g_mlpath_rqpn; 45 __be32 g_mlpath_rqpn;
45 __be16 sl_vid; 46 __be16 sl_vid;
46 __be16 rlid; 47 union {
47 __be16 status; 48 struct {
48 u8 ipv6_ext_mask; 49 __be16 rlid;
49 u8 badfcs_enc; 50 __be16 status;
51 u8 ipv6_ext_mask;
52 u8 badfcs_enc;
53 };
54 u8 smac[ETH_ALEN];
55 };
50 __be32 byte_cnt; 56 __be32 byte_cnt;
51 __be16 wqe_index; 57 __be16 wqe_index;
52 __be16 checksum; 58 __be16 checksum;
@@ -83,6 +89,7 @@ struct mlx4_ts_cqe {
83enum { 89enum {
84 MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, 90 MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
85 MLX4_CQE_QPN_MASK = 0xffffff, 91 MLX4_CQE_QPN_MASK = 0xffffff,
92 MLX4_CQE_VID_MASK = 0xfff,
86}; 93};
87 94
88enum { 95enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7de9fde3a9dd..ac5cb1d92487 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1096,6 +1096,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1096int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); 1096int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
1097int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, 1097int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
1098 u8 *pg, u16 *ratelimit); 1098 u8 *pg, u16 *ratelimit);
1099int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
1099int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 1100int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1100int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1101int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1101void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); 1102void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index f3ac0f2c4c66..ce55906b54a0 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -38,10 +38,15 @@
38#include <linux/in6.h> 38#include <linux/in6.h>
39#include <linux/if_arp.h> 39#include <linux/if_arp.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/inetdevice.h>
41#include <linux/socket.h> 42#include <linux/socket.h>
42#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
44#include <net/ipv6.h>
45#include <net/if_inet6.h>
46#include <net/ip.h>
43#include <rdma/ib_verbs.h> 47#include <rdma/ib_verbs.h>
44#include <rdma/ib_pack.h> 48#include <rdma/ib_pack.h>
49#include <net/ipv6.h>
45 50
46struct rdma_addr_client { 51struct rdma_addr_client {
47 atomic_t refcount; 52 atomic_t refcount;
@@ -72,7 +77,8 @@ struct rdma_dev_addr {
72 * rdma_translate_ip - Translate a local IP address to an RDMA hardware 77 * rdma_translate_ip - Translate a local IP address to an RDMA hardware
73 * address. 78 * address.
74 */ 79 */
75int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr); 80int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
81 u16 *vlan_id);
76 82
77/** 83/**
78 * rdma_resolve_ip - Resolve source and destination IP addresses to 84 * rdma_resolve_ip - Resolve source and destination IP addresses to
@@ -104,6 +110,10 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
104 110
105int rdma_addr_size(struct sockaddr *addr); 111int rdma_addr_size(struct sockaddr *addr);
106 112
113int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
114int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *smac,
115 u16 *vlan_id);
116
107static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) 117static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
108{ 118{
109 return ((u16)dev_addr->broadcast[8] << 8) | (u16)dev_addr->broadcast[9]; 119 return ((u16)dev_addr->broadcast[8] << 8) | (u16)dev_addr->broadcast[9];
@@ -126,41 +136,60 @@ static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr)
126 return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0; 136 return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0;
127} 137}
128 138
129static inline void iboe_mac_vlan_to_ll(union ib_gid *gid, u8 *mac, u16 vid) 139static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev)
130{ 140{
131 memset(gid->raw, 0, 16); 141 return dev->priv_flags & IFF_802_1Q_VLAN ?
132 *((__be32 *) gid->raw) = cpu_to_be32(0xfe800000); 142 vlan_dev_vlan_id(dev) : 0xffff;
133 if (vid < 0x1000) { 143}
134 gid->raw[12] = vid & 0xff; 144
135 gid->raw[11] = vid >> 8; 145static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
136 } else { 146{
137 gid->raw[12] = 0xfe; 147 switch (addr->sa_family) {
138 gid->raw[11] = 0xff; 148 case AF_INET:
149 ipv6_addr_set_v4mapped(((struct sockaddr_in *)
150 addr)->sin_addr.s_addr,
151 (struct in6_addr *)gid);
152 break;
153 case AF_INET6:
154 memcpy(gid->raw, &((struct sockaddr_in6 *)addr)->sin6_addr, 16);
155 break;
156 default:
157 return -EINVAL;
139 } 158 }
140 memcpy(gid->raw + 13, mac + 3, 3); 159 return 0;
141 memcpy(gid->raw + 8, mac, 3);
142 gid->raw[8] ^= 2;
143} 160}
144 161
145static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev) 162/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */
163static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
146{ 164{
147 return dev->priv_flags & IFF_802_1Q_VLAN ? 165 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) {
148 vlan_dev_vlan_id(dev) : 0xffff; 166 struct sockaddr_in *out_in = (struct sockaddr_in *)out;
167 memset(out_in, 0, sizeof(*out_in));
168 out_in->sin_family = AF_INET;
169 memcpy(&out_in->sin_addr.s_addr, gid->raw + 12, 4);
170 } else {
171 struct sockaddr_in6 *out_in = (struct sockaddr_in6 *)out;
172 memset(out_in, 0, sizeof(*out_in));
173 out_in->sin6_family = AF_INET6;
174 memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16);
175 }
176 return 0;
149} 177}
150 178
151static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, 179static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
152 union ib_gid *gid) 180 union ib_gid *gid)
153{ 181{
154 struct net_device *dev; 182 struct net_device *dev;
155 u16 vid = 0xffff; 183 struct in_device *ip4;
156 184
157 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 185 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
158 if (dev) { 186 if (dev) {
159 vid = rdma_vlan_dev_vlan_id(dev); 187 ip4 = (struct in_device *)dev->ip_ptr;
188 if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
189 ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
190 (struct in6_addr *)gid);
160 dev_put(dev); 191 dev_put(dev);
161 } 192 }
162
163 iboe_mac_vlan_to_ll(gid, dev_addr->src_dev_addr, vid);
164} 193}
165 194
166static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) 195static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 0e3ff30647d5..f29e3a27c2cc 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -601,4 +601,5 @@ struct ib_cm_sidr_rep_param {
601int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 601int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
602 struct ib_cm_sidr_rep_param *param); 602 struct ib_cm_sidr_rep_param *param);
603 603
604int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac);
604#endif /* IB_CM_H */ 605#endif /* IB_CM_H */
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index b37fe3b10a9d..b1f7592e02e4 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -34,6 +34,7 @@
34#define IB_PACK_H 34#define IB_PACK_H
35 35
36#include <rdma/ib_verbs.h> 36#include <rdma/ib_verbs.h>
37#include <uapi/linux/if_ether.h>
37 38
38enum { 39enum {
39 IB_LRH_BYTES = 8, 40 IB_LRH_BYTES = 8,
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 125f8714301d..7e071a6abb34 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -154,6 +154,9 @@ struct ib_sa_path_rec {
154 u8 packet_life_time_selector; 154 u8 packet_life_time_selector;
155 u8 packet_life_time; 155 u8 packet_life_time;
156 u8 preference; 156 u8 preference;
157 u8 smac[ETH_ALEN];
158 u8 dmac[ETH_ALEN];
159 u16 vlan_id;
157}; 160};
158 161
159#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0) 162#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index c6d343078617..8d4a1c06f7e4 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -48,6 +48,7 @@
48#include <linux/rwsem.h> 48#include <linux/rwsem.h>
49#include <linux/scatterlist.h> 49#include <linux/scatterlist.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <uapi/linux/if_ether.h>
51 52
52#include <linux/atomic.h> 53#include <linux/atomic.h>
53#include <asm/uaccess.h> 54#include <asm/uaccess.h>
@@ -474,6 +475,8 @@ struct ib_ah_attr {
474 u8 static_rate; 475 u8 static_rate;
475 u8 ah_flags; 476 u8 ah_flags;
476 u8 port_num; 477 u8 port_num;
478 u8 dmac[ETH_ALEN];
479 u16 vlan_id;
477}; 480};
478 481
479enum ib_wc_status { 482enum ib_wc_status {
@@ -526,6 +529,8 @@ enum ib_wc_flags {
526 IB_WC_WITH_IMM = (1<<1), 529 IB_WC_WITH_IMM = (1<<1),
527 IB_WC_WITH_INVALIDATE = (1<<2), 530 IB_WC_WITH_INVALIDATE = (1<<2),
528 IB_WC_IP_CSUM_OK = (1<<3), 531 IB_WC_IP_CSUM_OK = (1<<3),
532 IB_WC_WITH_SMAC = (1<<4),
533 IB_WC_WITH_VLAN = (1<<5),
529}; 534};
530 535
531struct ib_wc { 536struct ib_wc {
@@ -546,6 +551,8 @@ struct ib_wc {
546 u8 sl; 551 u8 sl;
547 u8 dlid_path_bits; 552 u8 dlid_path_bits;
548 u8 port_num; /* valid only for DR SMPs on switches */ 553 u8 port_num; /* valid only for DR SMPs on switches */
554 u8 smac[ETH_ALEN];
555 u16 vlan_id;
549}; 556};
550 557
551enum ib_cq_notify_flags { 558enum ib_cq_notify_flags {
@@ -724,7 +731,11 @@ enum ib_qp_attr_mask {
724 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 731 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
725 IB_QP_PATH_MIG_STATE = (1<<18), 732 IB_QP_PATH_MIG_STATE = (1<<18),
726 IB_QP_CAP = (1<<19), 733 IB_QP_CAP = (1<<19),
727 IB_QP_DEST_QPN = (1<<20) 734 IB_QP_DEST_QPN = (1<<20),
735 IB_QP_SMAC = (1<<21),
736 IB_QP_ALT_SMAC = (1<<22),
737 IB_QP_VID = (1<<23),
738 IB_QP_ALT_VID = (1<<24),
728}; 739};
729 740
730enum ib_qp_state { 741enum ib_qp_state {
@@ -774,6 +785,10 @@ struct ib_qp_attr {
774 u8 rnr_retry; 785 u8 rnr_retry;
775 u8 alt_port_num; 786 u8 alt_port_num;
776 u8 alt_timeout; 787 u8 alt_timeout;
788 u8 smac[ETH_ALEN];
789 u8 alt_smac[ETH_ALEN];
790 u16 vlan_id;
791 u16 alt_vlan_id;
777}; 792};
778 793
779enum ib_wr_opcode { 794enum ib_wr_opcode {
@@ -1505,6 +1520,7 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
1505 * @next_state: Next QP state 1520 * @next_state: Next QP state
1506 * @type: QP type 1521 * @type: QP type
1507 * @mask: Mask of supplied QP attributes 1522 * @mask: Mask of supplied QP attributes
1523 * @ll : link layer of port
1508 * 1524 *
1509 * This function is a helper function that a low-level driver's 1525 * This function is a helper function that a low-level driver's
1510 * modify_qp method can use to validate the consumer's input. It 1526 * modify_qp method can use to validate the consumer's input. It
@@ -1513,7 +1529,8 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
1513 * and that the attribute mask supplied is allowed for the transition. 1529 * and that the attribute mask supplied is allowed for the transition.
1514 */ 1530 */
1515int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1531int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1516 enum ib_qp_type type, enum ib_qp_attr_mask mask); 1532 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1533 enum rdma_link_layer ll);
1517 1534
1518int ib_register_event_handler (struct ib_event_handler *event_handler); 1535int ib_register_event_handler (struct ib_event_handler *event_handler);
1519int ib_unregister_event_handler(struct ib_event_handler *event_handler); 1536int ib_unregister_event_handler(struct ib_event_handler *event_handler);