aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/addr.c97
-rw-r--r--drivers/infiniband/core/cm.c50
-rw-r--r--drivers/infiniband/core/cma.c60
-rw-r--r--drivers/infiniband/core/sa_query.c12
-rw-r--r--drivers/infiniband/core/verbs.c55
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c9
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c2
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/rdma/ib_addr.h42
-rw-r--r--include/rdma/ib_cm.h1
-rw-r--r--include/rdma/ib_pack.h1
-rw-r--r--include/rdma/ib_sa.h3
-rw-r--r--include/rdma/ib_verbs.h21
18 files changed, 343 insertions, 24 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index e90f2b2eabd7..8172d37f9add 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -86,6 +86,8 @@ int rdma_addr_size(struct sockaddr *addr)
86} 86}
87EXPORT_SYMBOL(rdma_addr_size); 87EXPORT_SYMBOL(rdma_addr_size);
88 88
89static struct rdma_addr_client self;
90
89void rdma_addr_register_client(struct rdma_addr_client *client) 91void rdma_addr_register_client(struct rdma_addr_client *client)
90{ 92{
91 atomic_set(&client->refcount, 1); 93 atomic_set(&client->refcount, 1);
@@ -119,7 +121,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
119} 121}
120EXPORT_SYMBOL(rdma_copy_addr); 122EXPORT_SYMBOL(rdma_copy_addr);
121 123
122int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 124int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
125 u16 *vlan_id)
123{ 126{
124 struct net_device *dev; 127 struct net_device *dev;
125 int ret = -EADDRNOTAVAIL; 128 int ret = -EADDRNOTAVAIL;
@@ -142,6 +145,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
142 return ret; 145 return ret;
143 146
144 ret = rdma_copy_addr(dev_addr, dev, NULL); 147 ret = rdma_copy_addr(dev_addr, dev, NULL);
148 if (vlan_id)
149 *vlan_id = rdma_vlan_dev_vlan_id(dev);
145 dev_put(dev); 150 dev_put(dev);
146 break; 151 break;
147 152
@@ -153,6 +158,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
153 &((struct sockaddr_in6 *) addr)->sin6_addr, 158 &((struct sockaddr_in6 *) addr)->sin6_addr,
154 dev, 1)) { 159 dev, 1)) {
155 ret = rdma_copy_addr(dev_addr, dev, NULL); 160 ret = rdma_copy_addr(dev_addr, dev, NULL);
161 if (vlan_id)
162 *vlan_id = rdma_vlan_dev_vlan_id(dev);
156 break; 163 break;
157 } 164 }
158 } 165 }
@@ -238,7 +245,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
238 src_in->sin_addr.s_addr = fl4.saddr; 245 src_in->sin_addr.s_addr = fl4.saddr;
239 246
240 if (rt->dst.dev->flags & IFF_LOOPBACK) { 247 if (rt->dst.dev->flags & IFF_LOOPBACK) {
241 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); 248 ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
242 if (!ret) 249 if (!ret)
243 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); 250 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
244 goto put; 251 goto put;
@@ -286,7 +293,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
286 } 293 }
287 294
288 if (dst->dev->flags & IFF_LOOPBACK) { 295 if (dst->dev->flags & IFF_LOOPBACK) {
289 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); 296 ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL);
290 if (!ret) 297 if (!ret)
291 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); 298 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
292 goto put; 299 goto put;
@@ -437,6 +444,88 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
437} 444}
438EXPORT_SYMBOL(rdma_addr_cancel); 445EXPORT_SYMBOL(rdma_addr_cancel);
439 446
447struct resolve_cb_context {
448 struct rdma_dev_addr *addr;
449 struct completion comp;
450};
451
452static void resolve_cb(int status, struct sockaddr *src_addr,
453 struct rdma_dev_addr *addr, void *context)
454{
455 memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct
456 rdma_dev_addr));
457 complete(&((struct resolve_cb_context *)context)->comp);
458}
459
460int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
461 u16 *vlan_id)
462{
463 int ret = 0;
464 struct rdma_dev_addr dev_addr;
465 struct resolve_cb_context ctx;
466 struct net_device *dev;
467
468 union {
469 struct sockaddr _sockaddr;
470 struct sockaddr_in _sockaddr_in;
471 struct sockaddr_in6 _sockaddr_in6;
472 } sgid_addr, dgid_addr;
473
474
475 ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid);
476 if (ret)
477 return ret;
478
479 ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid);
480 if (ret)
481 return ret;
482
483 memset(&dev_addr, 0, sizeof(dev_addr));
484
485 ctx.addr = &dev_addr;
486 init_completion(&ctx.comp);
487 ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
488 &dev_addr, 1000, resolve_cb, &ctx);
489 if (ret)
490 return ret;
491
492 wait_for_completion(&ctx.comp);
493
494 memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
495 dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
496 if (!dev)
497 return -ENODEV;
498 if (vlan_id)
499 *vlan_id = rdma_vlan_dev_vlan_id(dev);
500 dev_put(dev);
501 return ret;
502}
503EXPORT_SYMBOL(rdma_addr_find_dmac_by_grh);
504
505int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
506{
507 int ret = 0;
508 struct rdma_dev_addr dev_addr;
509 union {
510 struct sockaddr _sockaddr;
511 struct sockaddr_in _sockaddr_in;
512 struct sockaddr_in6 _sockaddr_in6;
513 } gid_addr;
514
515 ret = rdma_gid2ip(&gid_addr._sockaddr, sgid);
516
517 if (ret)
518 return ret;
519 memset(&dev_addr, 0, sizeof(dev_addr));
520 ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
521 if (ret)
522 return ret;
523
524 memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN);
525 return ret;
526}
527EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid);
528
440static int netevent_callback(struct notifier_block *self, unsigned long event, 529static int netevent_callback(struct notifier_block *self, unsigned long event,
441 void *ctx) 530 void *ctx)
442{ 531{
@@ -461,11 +550,13 @@ static int __init addr_init(void)
461 return -ENOMEM; 550 return -ENOMEM;
462 551
463 register_netevent_notifier(&nb); 552 register_netevent_notifier(&nb);
553 rdma_addr_register_client(&self);
464 return 0; 554 return 0;
465} 555}
466 556
467static void __exit addr_cleanup(void) 557static void __exit addr_cleanup(void)
468{ 558{
559 rdma_addr_unregister_client(&self);
469 unregister_netevent_notifier(&nb); 560 unregister_netevent_notifier(&nb);
470 destroy_workqueue(addr_wq); 561 destroy_workqueue(addr_wq);
471} 562}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f2ef7ef0f36f..c6d543c3ef52 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -47,6 +47,7 @@
47#include <linux/sysfs.h> 47#include <linux/sysfs.h>
48#include <linux/workqueue.h> 48#include <linux/workqueue.h>
49#include <linux/kdev_t.h> 49#include <linux/kdev_t.h>
50#include <linux/etherdevice.h>
50 51
51#include <rdma/ib_cache.h> 52#include <rdma/ib_cache.h>
52#include <rdma/ib_cm.h> 53#include <rdma/ib_cm.h>
@@ -177,6 +178,8 @@ struct cm_av {
177 struct ib_ah_attr ah_attr; 178 struct ib_ah_attr ah_attr;
178 u16 pkey_index; 179 u16 pkey_index;
179 u8 timeout; 180 u8 timeout;
181 u8 valid;
182 u8 smac[ETH_ALEN];
180}; 183};
181 184
182struct cm_work { 185struct cm_work {
@@ -346,6 +349,23 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
346 grh, &av->ah_attr); 349 grh, &av->ah_attr);
347} 350}
348 351
352int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac)
353{
354 struct cm_id_private *cm_id_priv;
355
356 cm_id_priv = container_of(id, struct cm_id_private, id);
357
358 if (smac != NULL)
359 memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac));
360
361 if (alt_smac != NULL)
362 memcpy(cm_id_priv->alt_av.smac, alt_smac,
363 sizeof(cm_id_priv->alt_av.smac));
364
365 return 0;
366}
367EXPORT_SYMBOL(ib_update_cm_av);
368
349static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 369static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
350{ 370{
351 struct cm_device *cm_dev; 371 struct cm_device *cm_dev;
@@ -376,6 +396,9 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
376 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, 396 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
377 &av->ah_attr); 397 &av->ah_attr);
378 av->timeout = path->packet_life_time + 1; 398 av->timeout = path->packet_life_time + 1;
399 memcpy(av->smac, path->smac, sizeof(av->smac));
400
401 av->valid = 1;
379 return 0; 402 return 0;
380} 403}
381 404
@@ -1554,6 +1577,9 @@ static int cm_req_handler(struct cm_work *work)
1554 1577
1555 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); 1578 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1556 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1579 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1580
1581 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
1582 work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id;
1557 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1583 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1558 if (ret) { 1584 if (ret) {
1559 ib_get_cached_gid(work->port->cm_dev->ib_device, 1585 ib_get_cached_gid(work->port->cm_dev->ib_device,
@@ -3500,6 +3526,30 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3500 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3526 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3501 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3527 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3502 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3528 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3529 if (!cm_id_priv->av.valid)
3530 return -EINVAL;
3531 if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) {
3532 qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id;
3533 *qp_attr_mask |= IB_QP_VID;
3534 }
3535 if (!is_zero_ether_addr(cm_id_priv->av.smac)) {
3536 memcpy(qp_attr->smac, cm_id_priv->av.smac,
3537 sizeof(qp_attr->smac));
3538 *qp_attr_mask |= IB_QP_SMAC;
3539 }
3540 if (cm_id_priv->alt_av.valid) {
3541 if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) {
3542 qp_attr->alt_vlan_id =
3543 cm_id_priv->alt_av.ah_attr.vlan_id;
3544 *qp_attr_mask |= IB_QP_ALT_VID;
3545 }
3546 if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) {
3547 memcpy(qp_attr->alt_smac,
3548 cm_id_priv->alt_av.smac,
3549 sizeof(qp_attr->alt_smac));
3550 *qp_attr_mask |= IB_QP_ALT_SMAC;
3551 }
3552 }
3503 qp_attr->path_mtu = cm_id_priv->path_mtu; 3553 qp_attr->path_mtu = cm_id_priv->path_mtu;
3504 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3554 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3505 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3555 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 8e49db690f33..0caf465f317c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -340,7 +340,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
340 int ret; 340 int ret;
341 341
342 if (addr->sa_family != AF_IB) { 342 if (addr->sa_family != AF_IB) {
343 ret = rdma_translate_ip(addr, dev_addr); 343 ret = rdma_translate_ip(addr, dev_addr, NULL);
344 } else { 344 } else {
345 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 345 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
346 ret = 0; 346 ret = 0;
@@ -603,6 +603,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
603{ 603{
604 struct ib_qp_attr qp_attr; 604 struct ib_qp_attr qp_attr;
605 int qp_attr_mask, ret; 605 int qp_attr_mask, ret;
606 union ib_gid sgid;
606 607
607 mutex_lock(&id_priv->qp_mutex); 608 mutex_lock(&id_priv->qp_mutex);
608 if (!id_priv->id.qp) { 609 if (!id_priv->id.qp) {
@@ -625,6 +626,20 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
625 if (ret) 626 if (ret)
626 goto out; 627 goto out;
627 628
629 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
630 qp_attr.ah_attr.grh.sgid_index, &sgid);
631 if (ret)
632 goto out;
633
634 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
635 == RDMA_TRANSPORT_IB &&
636 rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
637 == IB_LINK_LAYER_ETHERNET) {
638 ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
639
640 if (ret)
641 goto out;
642 }
628 if (conn_param) 643 if (conn_param)
629 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 644 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
630 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 645 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
@@ -725,6 +740,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
725 else 740 else
726 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 741 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
727 qp_attr_mask); 742 qp_attr_mask);
743
728 if (qp_attr->qp_state == IB_QPS_RTR) 744 if (qp_attr->qp_state == IB_QPS_RTR)
729 qp_attr->rq_psn = id_priv->seq_num; 745 qp_attr->rq_psn = id_priv->seq_num;
730 break; 746 break;
@@ -1266,6 +1282,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1266 struct rdma_id_private *listen_id, *conn_id; 1282 struct rdma_id_private *listen_id, *conn_id;
1267 struct rdma_cm_event event; 1283 struct rdma_cm_event event;
1268 int offset, ret; 1284 int offset, ret;
1285 u8 smac[ETH_ALEN];
1286 u8 alt_smac[ETH_ALEN];
1287 u8 *psmac = smac;
1288 u8 *palt_smac = alt_smac;
1289 int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) ==
1290 RDMA_TRANSPORT_IB) &&
1291 (rdma_port_get_link_layer(cm_id->device,
1292 ib_event->param.req_rcvd.port) ==
1293 IB_LINK_LAYER_ETHERNET));
1269 1294
1270 listen_id = cm_id->context; 1295 listen_id = cm_id->context;
1271 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) 1296 if (!cma_check_req_qp_type(&listen_id->id, ib_event))
@@ -1310,12 +1335,29 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1310 if (ret) 1335 if (ret)
1311 goto err3; 1336 goto err3;
1312 1337
1338 if (is_iboe) {
1339 if (ib_event->param.req_rcvd.primary_path != NULL)
1340 rdma_addr_find_smac_by_sgid(
1341 &ib_event->param.req_rcvd.primary_path->sgid,
1342 psmac, NULL);
1343 else
1344 psmac = NULL;
1345 if (ib_event->param.req_rcvd.alternate_path != NULL)
1346 rdma_addr_find_smac_by_sgid(
1347 &ib_event->param.req_rcvd.alternate_path->sgid,
1348 palt_smac, NULL);
1349 else
1350 palt_smac = NULL;
1351 }
1313 /* 1352 /*
1314 * Acquire mutex to prevent user executing rdma_destroy_id() 1353 * Acquire mutex to prevent user executing rdma_destroy_id()
1315 * while we're accessing the cm_id. 1354 * while we're accessing the cm_id.
1316 */ 1355 */
1317 mutex_lock(&lock); 1356 mutex_lock(&lock);
1318 if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) 1357 if (is_iboe)
1358 ib_update_cm_av(cm_id, psmac, palt_smac);
1359 if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
1360 (conn_id->id.qp_type != IB_QPT_UD))
1319 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1361 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1320 mutex_unlock(&lock); 1362 mutex_unlock(&lock);
1321 mutex_unlock(&conn_id->handler_mutex); 1363 mutex_unlock(&conn_id->handler_mutex);
@@ -1474,7 +1516,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1474 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1516 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1475 conn_id->state = RDMA_CM_CONNECT; 1517 conn_id->state = RDMA_CM_CONNECT;
1476 1518
1477 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); 1519 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
1478 if (ret) { 1520 if (ret) {
1479 mutex_unlock(&conn_id->handler_mutex); 1521 mutex_unlock(&conn_id->handler_mutex);
1480 rdma_destroy_id(new_cm_id); 1522 rdma_destroy_id(new_cm_id);
@@ -1873,7 +1915,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
1873 struct cma_work *work; 1915 struct cma_work *work;
1874 int ret; 1916 int ret;
1875 struct net_device *ndev = NULL; 1917 struct net_device *ndev = NULL;
1876 u16 vid; 1918
1877 1919
1878 work = kzalloc(sizeof *work, GFP_KERNEL); 1920 work = kzalloc(sizeof *work, GFP_KERNEL);
1879 if (!work) 1921 if (!work)
@@ -1897,10 +1939,14 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
1897 goto err2; 1939 goto err2;
1898 } 1940 }
1899 1941
1900 vid = rdma_vlan_dev_vlan_id(ndev); 1942 route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
1943 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
1944 memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
1901 1945
1902 iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); 1946 iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr,
1903 iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); 1947 route->path_rec->vlan_id);
1948 iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr,
1949 route->path_rec->vlan_id);
1904 1950
1905 route->path_rec->hop_limit = 1; 1951 route->path_rec->hop_limit = 1;
1906 route->path_rec->reversible = 1; 1952 route->path_rec->reversible = 1;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 9838ca484389..f820958e4047 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -42,7 +42,7 @@
42#include <linux/kref.h> 42#include <linux/kref.h>
43#include <linux/idr.h> 43#include <linux/idr.h>
44#include <linux/workqueue.h> 44#include <linux/workqueue.h>
45 45#include <uapi/linux/if_ether.h>
46#include <rdma/ib_pack.h> 46#include <rdma/ib_pack.h>
47#include <rdma/ib_cache.h> 47#include <rdma/ib_cache.h>
48#include "sa.h" 48#include "sa.h"
@@ -556,6 +556,13 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
556 ah_attr->grh.hop_limit = rec->hop_limit; 556 ah_attr->grh.hop_limit = rec->hop_limit;
557 ah_attr->grh.traffic_class = rec->traffic_class; 557 ah_attr->grh.traffic_class = rec->traffic_class;
558 } 558 }
559 if (force_grh) {
560 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
561 ah_attr->vlan_id = rec->vlan_id;
562 } else {
563 ah_attr->vlan_id = 0xffff;
564 }
565
559 return 0; 566 return 0;
560} 567}
561EXPORT_SYMBOL(ib_init_ah_from_path); 568EXPORT_SYMBOL(ib_init_ah_from_path);
@@ -670,6 +677,9 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
670 677
671 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 678 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
672 mad->data, &rec); 679 mad->data, &rec);
680 rec.vlan_id = 0xffff;
681 memset(rec.dmac, 0, ETH_ALEN);
682 memset(rec.smac, 0, ETH_ALEN);
673 query->callback(status, &rec, query->context); 683 query->callback(status, &rec, query->context);
674 } else 684 } else
675 query->callback(status, NULL, query->context); 685 query->callback(status, NULL, query->context);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index d4f6ddf72ffa..7978394738a3 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -44,6 +44,7 @@
44 44
45#include <rdma/ib_verbs.h> 45#include <rdma/ib_verbs.h>
46#include <rdma/ib_cache.h> 46#include <rdma/ib_cache.h>
47#include <rdma/ib_addr.h>
47 48
48int ib_rate_to_mult(enum ib_rate rate) 49int ib_rate_to_mult(enum ib_rate rate)
49{ 50{
@@ -192,8 +193,28 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
192 u32 flow_class; 193 u32 flow_class;
193 u16 gid_index; 194 u16 gid_index;
194 int ret; 195 int ret;
196 int is_eth = (rdma_port_get_link_layer(device, port_num) ==
197 IB_LINK_LAYER_ETHERNET);
195 198
196 memset(ah_attr, 0, sizeof *ah_attr); 199 memset(ah_attr, 0, sizeof *ah_attr);
200 if (is_eth) {
201 if (!(wc->wc_flags & IB_WC_GRH))
202 return -EPROTOTYPE;
203
204 if (wc->wc_flags & IB_WC_WITH_SMAC &&
205 wc->wc_flags & IB_WC_WITH_VLAN) {
206 memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
207 ah_attr->vlan_id = wc->vlan_id;
208 } else {
209 ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
210 ah_attr->dmac, &ah_attr->vlan_id);
211 if (ret)
212 return ret;
213 }
214 } else {
215 ah_attr->vlan_id = 0xffff;
216 }
217
197 ah_attr->dlid = wc->slid; 218 ah_attr->dlid = wc->slid;
198 ah_attr->sl = wc->sl; 219 ah_attr->sl = wc->sl;
199 ah_attr->src_path_bits = wc->dlid_path_bits; 220 ah_attr->src_path_bits = wc->dlid_path_bits;
@@ -476,7 +497,9 @@ EXPORT_SYMBOL(ib_create_qp);
476static const struct { 497static const struct {
477 int valid; 498 int valid;
478 enum ib_qp_attr_mask req_param[IB_QPT_MAX]; 499 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
500 enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX];
479 enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; 501 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
502 enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX];
480} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 503} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
481 [IB_QPS_RESET] = { 504 [IB_QPS_RESET] = {
482 [IB_QPS_RESET] = { .valid = 1 }, 505 [IB_QPS_RESET] = { .valid = 1 },
@@ -557,6 +580,12 @@ static const struct {
557 IB_QP_MAX_DEST_RD_ATOMIC | 580 IB_QP_MAX_DEST_RD_ATOMIC |
558 IB_QP_MIN_RNR_TIMER), 581 IB_QP_MIN_RNR_TIMER),
559 }, 582 },
583 .req_param_add_eth = {
584 [IB_QPT_RC] = (IB_QP_SMAC),
585 [IB_QPT_UC] = (IB_QP_SMAC),
586 [IB_QPT_XRC_INI] = (IB_QP_SMAC),
587 [IB_QPT_XRC_TGT] = (IB_QP_SMAC)
588 },
560 .opt_param = { 589 .opt_param = {
561 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 590 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
562 IB_QP_QKEY), 591 IB_QP_QKEY),
@@ -576,7 +605,21 @@ static const struct {
576 IB_QP_QKEY), 605 IB_QP_QKEY),
577 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 606 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
578 IB_QP_QKEY), 607 IB_QP_QKEY),
579 } 608 },
609 .opt_param_add_eth = {
610 [IB_QPT_RC] = (IB_QP_ALT_SMAC |
611 IB_QP_VID |
612 IB_QP_ALT_VID),
613 [IB_QPT_UC] = (IB_QP_ALT_SMAC |
614 IB_QP_VID |
615 IB_QP_ALT_VID),
616 [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC |
617 IB_QP_VID |
618 IB_QP_ALT_VID),
619 [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC |
620 IB_QP_VID |
621 IB_QP_ALT_VID)
622 }
580 } 623 }
581 }, 624 },
582 [IB_QPS_RTR] = { 625 [IB_QPS_RTR] = {
@@ -779,7 +822,8 @@ static const struct {
779}; 822};
780 823
781int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 824int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
782 enum ib_qp_type type, enum ib_qp_attr_mask mask) 825 enum ib_qp_type type, enum ib_qp_attr_mask mask,
826 enum rdma_link_layer ll)
783{ 827{
784 enum ib_qp_attr_mask req_param, opt_param; 828 enum ib_qp_attr_mask req_param, opt_param;
785 829
@@ -798,6 +842,13 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
798 req_param = qp_state_table[cur_state][next_state].req_param[type]; 842 req_param = qp_state_table[cur_state][next_state].req_param[type];
799 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 843 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
800 844
845 if (ll == IB_LINK_LAYER_ETHERNET) {
846 req_param |= qp_state_table[cur_state][next_state].
847 req_param_add_eth[type];
848 opt_param |= qp_state_table[cur_state][next_state].
849 opt_param_add_eth[type];
850 }
851
801 if ((mask & req_param) != req_param) 852 if ((mask & req_param) != req_param)
802 return 0; 853 return 0;
803 854
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 00d6861a6a18..2e89356c46fa 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -1329,7 +1329,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1329 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state; 1329 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
1330 if (!smi_reset2init && 1330 if (!smi_reset2init &&
1331 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type, 1331 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
1332 attr_mask)) { 1332 attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
1333 ret = -EINVAL; 1333 ret = -EINVAL;
1334 ehca_err(ibqp->device, 1334 ehca_err(ibqp->device,
1335 "Invalid qp transition new_state=%x cur_state=%x " 1335 "Invalid qp transition new_state=%x cur_state=%x "
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 0857a9c3cd3d..face87602dc1 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -463,7 +463,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
463 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 463 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
464 464
465 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 465 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
466 attr_mask)) 466 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
467 goto inval; 467 goto inval;
468 468
469 if (attr_mask & IB_QP_AV) { 469 if (attr_mask & IB_QP_AV) {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 4f10af2905b5..da6f5fa0c328 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1561,13 +1561,18 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1561 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1561 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1562 enum ib_qp_state cur_state, new_state; 1562 enum ib_qp_state cur_state, new_state;
1563 int err = -EINVAL; 1563 int err = -EINVAL;
1564 1564 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1565 mutex_lock(&qp->mutex); 1565 mutex_lock(&qp->mutex);
1566 1566
1567 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 1567 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1568 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1568 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1569 1569
1570 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { 1570 if (cur_state == new_state && cur_state == IB_QPS_RESET)
1571 p = IB_LINK_LAYER_UNSPECIFIED;
1572
1573 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1574 attr_mask,
1575 rdma_port_get_link_layer(&dev->ib_dev, p))) {
1571 pr_debug("qpn 0x%x: invalid attribute mask specified " 1576 pr_debug("qpn 0x%x: invalid attribute mask specified "
1572 "for transition %d to %d. qp_type %d," 1577 "for transition %d to %d. qp_type %d,"
1573 " attr_mask 0x%x\n", 1578 " attr_mask 0x%x\n",
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7c6b4ba49bec..ca2936233713 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1616,7 +1616,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1616 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1616 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1617 1617
1618 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR && 1618 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1619 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) 1619 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1620 IB_LINK_LAYER_UNSPECIFIED))
1620 goto out; 1621 goto out;
1621 1622
1622 if ((attr_mask & IB_QP_PORT) && 1623 if ((attr_mask & IB_QP_PORT) &&
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 26a684536109..e354b2f04ad9 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -860,7 +860,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
860 860
861 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 861 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
862 862
863 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { 863 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
864 IB_LINK_LAYER_UNSPECIFIED)) {
864 mthca_dbg(dev, "Bad QP transition (transport %d) " 865 mthca_dbg(dev, "Bad QP transition (transport %d) "
865 "%d->%d with attr 0x%08x\n", 866 "%d->%d with attr 0x%08x\n",
866 qp->transport, cur_state, new_state, 867 qp->transport, cur_state, new_state,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 7686dceadd29..a0f1c47c333f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1326,7 +1326,8 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1326 new_qps = old_qps; 1326 new_qps = old_qps;
1327 spin_unlock_irqrestore(&qp->q_lock, flags); 1327 spin_unlock_irqrestore(&qp->q_lock, flags);
1328 1328
1329 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { 1329 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
1330 IB_LINK_LAYER_UNSPECIFIED)) {
1330 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" 1331 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1331 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", 1332 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1332 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, 1333 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 3cca55b51e54..0cad0c40d742 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -585,7 +585,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
585 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 585 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
586 586
587 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 587 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
588 attr_mask)) 588 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
589 goto inval; 589 goto inval;
590 590
591 if (attr_mask & IB_QP_AV) { 591 if (attr_mask & IB_QP_AV) {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7d3a523160ba..9347a78f7872 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1095,6 +1095,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1095int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); 1095int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
1096int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, 1096int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
1097 u8 *pg, u16 *ratelimit); 1097 u8 *pg, u16 *ratelimit);
1098int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
1098int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 1099int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1099int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1100int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1100void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); 1101void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index f3ac0f2c4c66..a0715606ebb2 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -42,6 +42,7 @@
42#include <linux/if_vlan.h> 42#include <linux/if_vlan.h>
43#include <rdma/ib_verbs.h> 43#include <rdma/ib_verbs.h>
44#include <rdma/ib_pack.h> 44#include <rdma/ib_pack.h>
45#include <net/ipv6.h>
45 46
46struct rdma_addr_client { 47struct rdma_addr_client {
47 atomic_t refcount; 48 atomic_t refcount;
@@ -72,7 +73,8 @@ struct rdma_dev_addr {
72 * rdma_translate_ip - Translate a local IP address to an RDMA hardware 73 * rdma_translate_ip - Translate a local IP address to an RDMA hardware
73 * address. 74 * address.
74 */ 75 */
75int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr); 76int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr,
77 u16 *vlan_id);
76 78
77/** 79/**
78 * rdma_resolve_ip - Resolve source and destination IP addresses to 80 * rdma_resolve_ip - Resolve source and destination IP addresses to
@@ -104,6 +106,10 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
104 106
105int rdma_addr_size(struct sockaddr *addr); 107int rdma_addr_size(struct sockaddr *addr);
106 108
109int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
110int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *smac,
111 u16 *vlan_id);
112
107static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) 113static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
108{ 114{
109 return ((u16)dev_addr->broadcast[8] << 8) | (u16)dev_addr->broadcast[9]; 115 return ((u16)dev_addr->broadcast[8] << 8) | (u16)dev_addr->broadcast[9];
@@ -142,6 +148,40 @@ static inline void iboe_mac_vlan_to_ll(union ib_gid *gid, u8 *mac, u16 vid)
142 gid->raw[8] ^= 2; 148 gid->raw[8] ^= 2;
143} 149}
144 150
151static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
152{
153 switch (addr->sa_family) {
154 case AF_INET:
155 ipv6_addr_set_v4mapped(((struct sockaddr_in *)
156 addr)->sin_addr.s_addr,
157 (struct in6_addr *)gid);
158 break;
159 case AF_INET6:
160 memcpy(gid->raw, &((struct sockaddr_in6 *)addr)->sin6_addr, 16);
161 break;
162 default:
163 return -EINVAL;
164 }
165 return 0;
166}
167
168/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */
169static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
170{
171 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) {
172 struct sockaddr_in *out_in = (struct sockaddr_in *)out;
173 memset(out_in, 0, sizeof(*out_in));
174 out_in->sin_family = AF_INET;
175 memcpy(&out_in->sin_addr.s_addr, gid->raw + 12, 4);
176 } else {
177 struct sockaddr_in6 *out_in = (struct sockaddr_in6 *)out;
178 memset(out_in, 0, sizeof(*out_in));
179 out_in->sin6_family = AF_INET6;
180 memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16);
181 }
182 return 0;
183}
184
145static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev) 185static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev)
146{ 186{
147 return dev->priv_flags & IFF_802_1Q_VLAN ? 187 return dev->priv_flags & IFF_802_1Q_VLAN ?
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 0e3ff30647d5..f29e3a27c2cc 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -601,4 +601,5 @@ struct ib_cm_sidr_rep_param {
601int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 601int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
602 struct ib_cm_sidr_rep_param *param); 602 struct ib_cm_sidr_rep_param *param);
603 603
604int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac);
604#endif /* IB_CM_H */ 605#endif /* IB_CM_H */
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index b37fe3b10a9d..b1f7592e02e4 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -34,6 +34,7 @@
34#define IB_PACK_H 34#define IB_PACK_H
35 35
36#include <rdma/ib_verbs.h> 36#include <rdma/ib_verbs.h>
37#include <uapi/linux/if_ether.h>
37 38
38enum { 39enum {
39 IB_LRH_BYTES = 8, 40 IB_LRH_BYTES = 8,
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 125f8714301d..7e071a6abb34 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -154,6 +154,9 @@ struct ib_sa_path_rec {
154 u8 packet_life_time_selector; 154 u8 packet_life_time_selector;
155 u8 packet_life_time; 155 u8 packet_life_time;
156 u8 preference; 156 u8 preference;
157 u8 smac[ETH_ALEN];
158 u8 dmac[ETH_ALEN];
159 u16 vlan_id;
157}; 160};
158 161
159#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0) 162#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 61e1935c91b1..ea0f6eed7863 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -48,6 +48,7 @@
48#include <linux/rwsem.h> 48#include <linux/rwsem.h>
49#include <linux/scatterlist.h> 49#include <linux/scatterlist.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <uapi/linux/if_ether.h>
51 52
52#include <linux/atomic.h> 53#include <linux/atomic.h>
53#include <asm/uaccess.h> 54#include <asm/uaccess.h>
@@ -472,6 +473,8 @@ struct ib_ah_attr {
472 u8 static_rate; 473 u8 static_rate;
473 u8 ah_flags; 474 u8 ah_flags;
474 u8 port_num; 475 u8 port_num;
476 u8 dmac[ETH_ALEN];
477 u16 vlan_id;
475}; 478};
476 479
477enum ib_wc_status { 480enum ib_wc_status {
@@ -524,6 +527,8 @@ enum ib_wc_flags {
524 IB_WC_WITH_IMM = (1<<1), 527 IB_WC_WITH_IMM = (1<<1),
525 IB_WC_WITH_INVALIDATE = (1<<2), 528 IB_WC_WITH_INVALIDATE = (1<<2),
526 IB_WC_IP_CSUM_OK = (1<<3), 529 IB_WC_IP_CSUM_OK = (1<<3),
530 IB_WC_WITH_SMAC = (1<<4),
531 IB_WC_WITH_VLAN = (1<<5),
527}; 532};
528 533
529struct ib_wc { 534struct ib_wc {
@@ -544,6 +549,8 @@ struct ib_wc {
544 u8 sl; 549 u8 sl;
545 u8 dlid_path_bits; 550 u8 dlid_path_bits;
546 u8 port_num; /* valid only for DR SMPs on switches */ 551 u8 port_num; /* valid only for DR SMPs on switches */
552 u8 smac[ETH_ALEN];
553 u16 vlan_id;
547}; 554};
548 555
549enum ib_cq_notify_flags { 556enum ib_cq_notify_flags {
@@ -721,7 +728,11 @@ enum ib_qp_attr_mask {
721 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 728 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
722 IB_QP_PATH_MIG_STATE = (1<<18), 729 IB_QP_PATH_MIG_STATE = (1<<18),
723 IB_QP_CAP = (1<<19), 730 IB_QP_CAP = (1<<19),
724 IB_QP_DEST_QPN = (1<<20) 731 IB_QP_DEST_QPN = (1<<20),
732 IB_QP_SMAC = (1<<21),
733 IB_QP_ALT_SMAC = (1<<22),
734 IB_QP_VID = (1<<23),
735 IB_QP_ALT_VID = (1<<24),
725}; 736};
726 737
727enum ib_qp_state { 738enum ib_qp_state {
@@ -771,6 +782,10 @@ struct ib_qp_attr {
771 u8 rnr_retry; 782 u8 rnr_retry;
772 u8 alt_port_num; 783 u8 alt_port_num;
773 u8 alt_timeout; 784 u8 alt_timeout;
785 u8 smac[ETH_ALEN];
786 u8 alt_smac[ETH_ALEN];
787 u16 vlan_id;
788 u16 alt_vlan_id;
774}; 789};
775 790
776enum ib_wr_opcode { 791enum ib_wr_opcode {
@@ -1488,6 +1503,7 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
1488 * @next_state: Next QP state 1503 * @next_state: Next QP state
1489 * @type: QP type 1504 * @type: QP type
1490 * @mask: Mask of supplied QP attributes 1505 * @mask: Mask of supplied QP attributes
1506 * @ll : link layer of port
1491 * 1507 *
1492 * This function is a helper function that a low-level driver's 1508 * This function is a helper function that a low-level driver's
1493 * modify_qp method can use to validate the consumer's input. It 1509 * modify_qp method can use to validate the consumer's input. It
@@ -1496,7 +1512,8 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
1496 * and that the attribute mask supplied is allowed for the transition. 1512 * and that the attribute mask supplied is allowed for the transition.
1497 */ 1513 */
1498int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1514int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1499 enum ib_qp_type type, enum ib_qp_attr_mask mask); 1515 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1516 enum rdma_link_layer ll);
1500 1517
1501int ib_register_event_handler (struct ib_event_handler *event_handler); 1518int ib_register_event_handler (struct ib_event_handler *event_handler);
1502int ib_unregister_event_handler(struct ib_event_handler *event_handler); 1519int ib_unregister_event_handler(struct ib_event_handler *event_handler);