aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4
diff options
context:
space:
mode:
authorRoland Dreier <roland@purestorage.com>2014-01-23 02:24:21 -0500
committerRoland Dreier <roland@purestorage.com>2014-01-23 02:24:21 -0500
commitfb1b5034e4987b158179a62732fb6dfb8f7ec88e (patch)
tree93d02ff7f0f530286fd54d03b632c6eaabc9dcc9 /drivers/infiniband/hw/mlx4
parent8f399921ea9a562bc8221258c4b8a7bd69577939 (diff)
parent27cdef637c25705b433d5c4deeef4cf8dcb75d6a (diff)
Merge branch 'ip-roce' into for-next
Conflicts: drivers/infiniband/hw/mlx4/main.c
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c40
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/main.c475
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c104
6 files changed, 437 insertions, 199 deletions
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index 24ab11a9ad1e..fc01deac1d3c 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,6 @@
1config MLX4_INFINIBAND 1config MLX4_INFINIBAND
2 tristate "Mellanox ConnectX HCA support" 2 tristate "Mellanox ConnectX HCA support"
3 depends on NETDEVICES && ETHERNET && PCI 3 depends on NETDEVICES && ETHERNET && PCI && INET
4 select NET_VENDOR_MELLANOX 4 select NET_VENDOR_MELLANOX
5 select MLX4_CORE 5 select MLX4_CORE
6 ---help--- 6 ---help---
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index a251becdaa98..170dca608042 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -39,25 +39,6 @@
39 39
40#include "mlx4_ib.h" 40#include "mlx4_ib.h"
41 41
42int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
43 u8 *mac, int *is_mcast, u8 port)
44{
45 struct in6_addr in6;
46
47 *is_mcast = 0;
48
49 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
50 if (rdma_link_local_addr(&in6))
51 rdma_get_ll_mac(&in6, mac);
52 else if (rdma_is_multicast_addr(&in6)) {
53 rdma_get_mcast_mac(&in6, mac);
54 *is_mcast = 1;
55 } else
56 return -EINVAL;
57
58 return 0;
59}
60
61static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, 42static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
62 struct mlx4_ib_ah *ah) 43 struct mlx4_ib_ah *ah)
63{ 44{
@@ -92,21 +73,18 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
92{ 73{
93 struct mlx4_ib_dev *ibdev = to_mdev(pd->device); 74 struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
94 struct mlx4_dev *dev = ibdev->dev; 75 struct mlx4_dev *dev = ibdev->dev;
95 union ib_gid sgid;
96 u8 mac[6];
97 int err;
98 int is_mcast; 76 int is_mcast;
77 struct in6_addr in6;
99 u16 vlan_tag; 78 u16 vlan_tag;
100 79
101 err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num); 80 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
102 if (err) 81 if (rdma_is_multicast_addr(&in6)) {
103 return ERR_PTR(err); 82 is_mcast = 1;
104 83 rdma_get_mcast_mac(&in6, ah->av.eth.mac);
105 memcpy(ah->av.eth.mac, mac, 6); 84 } else {
106 err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid); 85 memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
107 if (err) 86 }
108 return ERR_PTR(err); 87 vlan_tag = ah_attr->vlan_id;
109 vlan_tag = rdma_get_vlan_id(&sgid);
110 if (vlan_tag < 0x1000) 88 if (vlan_tag < 0x1000)
111 vlan_tag |= (ah_attr->sl & 7) << 13; 89 vlan_tag |= (ah_attr->sl & 7) << 13;
112 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 90 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 66dbf8062374..cc40f08ca8f1 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -798,6 +798,15 @@ repoll:
798 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 798 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
799 else 799 else
800 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 800 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
801 if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) {
802 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
803 MLX4_CQE_VID_MASK;
804 } else {
805 wc->vlan_id = 0xffff;
806 }
807 wc->wc_flags |= IB_WC_WITH_VLAN;
808 memcpy(wc->smac, cqe->smac, ETH_ALEN);
809 wc->wc_flags |= IB_WC_WITH_SMAC;
801 } 810 }
802 811
803 return 0; 812 return 0;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ea5844e89b2a..c2702f549f10 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -39,6 +39,8 @@
39#include <linux/inetdevice.h> 39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h> 40#include <linux/rtnetlink.h>
41#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
42#include <net/ipv6.h>
43#include <net/addrconf.h>
42 44
43#include <rdma/ib_smi.h> 45#include <rdma/ib_smi.h>
44#include <rdma/ib_user_verbs.h> 46#include <rdma/ib_user_verbs.h>
@@ -794,7 +796,6 @@ static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
794int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 796int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
795 union ib_gid *gid) 797 union ib_gid *gid)
796{ 798{
797 u8 mac[6];
798 struct net_device *ndev; 799 struct net_device *ndev;
799 int ret = 0; 800 int ret = 0;
800 801
@@ -808,11 +809,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
808 spin_unlock(&mdev->iboe.lock); 809 spin_unlock(&mdev->iboe.lock);
809 810
810 if (ndev) { 811 if (ndev) {
811 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
812 rtnl_lock();
813 dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
814 ret = 1; 812 ret = 1;
815 rtnl_unlock();
816 dev_put(ndev); 813 dev_put(ndev);
817 } 814 }
818 815
@@ -1164,6 +1161,8 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1164 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1161 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1165 u64 reg_id; 1162 u64 reg_id;
1166 struct mlx4_ib_steering *ib_steering = NULL; 1163 struct mlx4_ib_steering *ib_steering = NULL;
1164 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1165 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1167 1166
1168 if (mdev->dev->caps.steering_mode == 1167 if (mdev->dev->caps.steering_mode ==
1169 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1168 MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1175,7 +1174,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1175 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, 1174 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1176 !!(mqp->flags & 1175 !!(mqp->flags &
1177 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1176 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1178 MLX4_PROT_IB_IPV6, &reg_id); 1177 prot, &reg_id);
1179 if (err) 1178 if (err)
1180 goto err_malloc; 1179 goto err_malloc;
1181 1180
@@ -1194,7 +1193,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1194 1193
1195err_add: 1194err_add:
1196 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1195 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1197 MLX4_PROT_IB_IPV6, reg_id); 1196 prot, reg_id);
1198err_malloc: 1197err_malloc:
1199 kfree(ib_steering); 1198 kfree(ib_steering);
1200 1199
@@ -1222,10 +1221,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1222 int err; 1221 int err;
1223 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1222 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1224 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1223 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1225 u8 mac[6];
1226 struct net_device *ndev; 1224 struct net_device *ndev;
1227 struct mlx4_ib_gid_entry *ge; 1225 struct mlx4_ib_gid_entry *ge;
1228 u64 reg_id = 0; 1226 u64 reg_id = 0;
1227 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1228 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1229 1229
1230 if (mdev->dev->caps.steering_mode == 1230 if (mdev->dev->caps.steering_mode ==
1231 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1231 MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1248,7 +1248,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1248 } 1248 }
1249 1249
1250 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1250 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1251 MLX4_PROT_IB_IPV6, reg_id); 1251 prot, reg_id);
1252 if (err) 1252 if (err)
1253 return err; 1253 return err;
1254 1254
@@ -1260,13 +1260,8 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1260 if (ndev) 1260 if (ndev)
1261 dev_hold(ndev); 1261 dev_hold(ndev);
1262 spin_unlock(&mdev->iboe.lock); 1262 spin_unlock(&mdev->iboe.lock);
1263 rdma_get_mcast_mac((struct in6_addr *)gid, mac); 1263 if (ndev)
1264 if (ndev) {
1265 rtnl_lock();
1266 dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
1267 rtnl_unlock();
1268 dev_put(ndev); 1264 dev_put(ndev);
1269 }
1270 list_del(&ge->list); 1265 list_del(&ge->list);
1271 kfree(ge); 1266 kfree(ge);
1272 } else 1267 } else
@@ -1362,20 +1357,6 @@ static struct device_attribute *mlx4_class_attributes[] = {
1362 &dev_attr_board_id 1357 &dev_attr_board_id
1363}; 1358};
1364 1359
1365static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
1366{
1367 memcpy(eui, dev->dev_addr, 3);
1368 memcpy(eui + 5, dev->dev_addr + 3, 3);
1369 if (vlan_id < 0x1000) {
1370 eui[3] = vlan_id >> 8;
1371 eui[4] = vlan_id & 0xff;
1372 } else {
1373 eui[3] = 0xff;
1374 eui[4] = 0xfe;
1375 }
1376 eui[0] ^= 2;
1377}
1378
1379static void update_gids_task(struct work_struct *work) 1360static void update_gids_task(struct work_struct *work)
1380{ 1361{
1381 struct update_gid_work *gw = container_of(work, struct update_gid_work, work); 1362 struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
@@ -1398,161 +1379,318 @@ static void update_gids_task(struct work_struct *work)
1398 MLX4_CMD_WRAPPED); 1379 MLX4_CMD_WRAPPED);
1399 if (err) 1380 if (err)
1400 pr_warn("set port command failed\n"); 1381 pr_warn("set port command failed\n");
1401 else { 1382 else
1402 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
1403 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); 1383 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
1404 }
1405 1384
1406 mlx4_free_cmd_mailbox(dev, mailbox); 1385 mlx4_free_cmd_mailbox(dev, mailbox);
1407 kfree(gw); 1386 kfree(gw);
1408} 1387}
1409 1388
1410static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear) 1389static void reset_gids_task(struct work_struct *work)
1411{ 1390{
1412 struct net_device *ndev = dev->iboe.netdevs[port - 1]; 1391 struct update_gid_work *gw =
1413 struct update_gid_work *work; 1392 container_of(work, struct update_gid_work, work);
1414 struct net_device *tmp; 1393 struct mlx4_cmd_mailbox *mailbox;
1394 union ib_gid *gids;
1395 int err;
1415 int i; 1396 int i;
1416 u8 *hits; 1397 struct mlx4_dev *dev = gw->dev->dev;
1417 int ret;
1418 union ib_gid gid;
1419 int free;
1420 int found;
1421 int need_update = 0;
1422 u16 vid;
1423 1398
1424 work = kzalloc(sizeof *work, GFP_ATOMIC); 1399 mailbox = mlx4_alloc_cmd_mailbox(dev);
1425 if (!work) 1400 if (IS_ERR(mailbox)) {
1426 return -ENOMEM; 1401 pr_warn("reset gid table failed\n");
1402 goto free;
1403 }
1427 1404
1428 hits = kzalloc(128, GFP_ATOMIC); 1405 gids = mailbox->buf;
1429 if (!hits) { 1406 memcpy(gids, gw->gids, sizeof(gw->gids));
1430 ret = -ENOMEM; 1407
1431 goto out; 1408 for (i = 1; i < gw->dev->num_ports + 1; i++) {
1409 if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) ==
1410 IB_LINK_LAYER_ETHERNET) {
1411 err = mlx4_cmd(dev, mailbox->dma,
1412 MLX4_SET_PORT_GID_TABLE << 8 | i,
1413 1, MLX4_CMD_SET_PORT,
1414 MLX4_CMD_TIME_CLASS_B,
1415 MLX4_CMD_WRAPPED);
1416 if (err)
1417 pr_warn(KERN_WARNING
1418 "set port %d command failed\n", i);
1419 }
1432 } 1420 }
1433 1421
1434 rcu_read_lock(); 1422 mlx4_free_cmd_mailbox(dev, mailbox);
1435 for_each_netdev_rcu(&init_net, tmp) { 1423free:
1436 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) { 1424 kfree(gw);
1437 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 1425}
1438 vid = rdma_vlan_dev_vlan_id(tmp);
1439 mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
1440 found = 0;
1441 free = -1;
1442 for (i = 0; i < 128; ++i) {
1443 if (free < 0 &&
1444 !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1445 free = i;
1446 if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
1447 hits[i] = 1;
1448 found = 1;
1449 break;
1450 }
1451 }
1452 1426
1453 if (!found) { 1427static int update_gid_table(struct mlx4_ib_dev *dev, int port,
1454 if (tmp == ndev && 1428 union ib_gid *gid, int clear)
1455 (memcmp(&dev->iboe.gid_table[port - 1][0], 1429{
1456 &gid, sizeof gid) || 1430 struct update_gid_work *work;
1457 !memcmp(&dev->iboe.gid_table[port - 1][0], 1431 int i;
1458 &zgid, sizeof gid))) { 1432 int need_update = 0;
1459 dev->iboe.gid_table[port - 1][0] = gid; 1433 int free = -1;
1460 ++need_update; 1434 int found = -1;
1461 hits[0] = 1; 1435 int max_gids;
1462 } else if (free >= 0) { 1436
1463 dev->iboe.gid_table[port - 1][free] = gid; 1437 max_gids = dev->dev->caps.gid_table_len[port];
1464 hits[free] = 1; 1438 for (i = 0; i < max_gids; ++i) {
1465 ++need_update; 1439 if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
1466 } 1440 sizeof(*gid)))
1441 found = i;
1442
1443 if (clear) {
1444 if (found >= 0) {
1445 need_update = 1;
1446 dev->iboe.gid_table[port - 1][found] = zgid;
1447 break;
1467 } 1448 }
1449 } else {
1450 if (found >= 0)
1451 break;
1452
1453 if (free < 0 &&
1454 !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid,
1455 sizeof(*gid)))
1456 free = i;
1468 } 1457 }
1469 } 1458 }
1470 rcu_read_unlock();
1471 1459
1472 for (i = 0; i < 128; ++i) 1460 if (found == -1 && !clear && free >= 0) {
1473 if (!hits[i]) { 1461 dev->iboe.gid_table[port - 1][free] = *gid;
1474 if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) 1462 need_update = 1;
1475 ++need_update; 1463 }
1476 dev->iboe.gid_table[port - 1][i] = zgid;
1477 }
1478 1464
1479 if (need_update) { 1465 if (!need_update)
1480 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids); 1466 return 0;
1481 INIT_WORK(&work->work, update_gids_task); 1467
1482 work->port = port; 1468 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1483 work->dev = dev; 1469 if (!work)
1484 queue_work(wq, &work->work); 1470 return -ENOMEM;
1485 } else 1471
1486 kfree(work); 1472 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
1473 INIT_WORK(&work->work, update_gids_task);
1474 work->port = port;
1475 work->dev = dev;
1476 queue_work(wq, &work->work);
1487 1477
1488 kfree(hits);
1489 return 0; 1478 return 0;
1479}
1490 1480
1491out: 1481static int reset_gid_table(struct mlx4_ib_dev *dev)
1492 kfree(work); 1482{
1493 return ret; 1483 struct update_gid_work *work;
1484
1485
1486 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1487 if (!work)
1488 return -ENOMEM;
1489 memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table));
1490 memset(work->gids, 0, sizeof(work->gids));
1491 INIT_WORK(&work->work, reset_gids_task);
1492 work->dev = dev;
1493 queue_work(wq, &work->work);
1494 return 0;
1494} 1495}
1495 1496
1496static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event) 1497static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1498 struct mlx4_ib_dev *ibdev, union ib_gid *gid)
1497{ 1499{
1498 switch (event) { 1500 struct mlx4_ib_iboe *iboe;
1499 case NETDEV_UP: 1501 int port = 0;
1500 case NETDEV_CHANGEADDR: 1502 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
1501 update_ipv6_gids(dev, port, 0); 1503 rdma_vlan_dev_real_dev(event_netdev) :
1502 break; 1504 event_netdev;
1505
1506 if (event != NETDEV_DOWN && event != NETDEV_UP)
1507 return 0;
1508
1509 if ((real_dev != event_netdev) &&
1510 (event == NETDEV_DOWN) &&
1511 rdma_link_local_addr((struct in6_addr *)gid))
1512 return 0;
1513
1514 iboe = &ibdev->iboe;
1515 spin_lock(&iboe->lock);
1516
1517 for (port = 1; port <= MLX4_MAX_PORTS; ++port)
1518 if ((netif_is_bond_master(real_dev) &&
1519 (real_dev == iboe->masters[port - 1])) ||
1520 (!netif_is_bond_master(real_dev) &&
1521 (real_dev == iboe->netdevs[port - 1])))
1522 update_gid_table(ibdev, port, gid,
1523 event == NETDEV_DOWN);
1524
1525 spin_unlock(&iboe->lock);
1526 return 0;
1503 1527
1504 case NETDEV_DOWN:
1505 update_ipv6_gids(dev, port, 1);
1506 dev->iboe.netdevs[port - 1] = NULL;
1507 }
1508} 1528}
1509 1529
1510static void netdev_added(struct mlx4_ib_dev *dev, int port) 1530static u8 mlx4_ib_get_dev_port(struct net_device *dev,
1531 struct mlx4_ib_dev *ibdev)
1511{ 1532{
1512 update_ipv6_gids(dev, port, 0); 1533 u8 port = 0;
1534 struct mlx4_ib_iboe *iboe;
1535 struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
1536 rdma_vlan_dev_real_dev(dev) : dev;
1537
1538 iboe = &ibdev->iboe;
1539 spin_lock(&iboe->lock);
1540
1541 for (port = 1; port <= MLX4_MAX_PORTS; ++port)
1542 if ((netif_is_bond_master(real_dev) &&
1543 (real_dev == iboe->masters[port - 1])) ||
1544 (!netif_is_bond_master(real_dev) &&
1545 (real_dev == iboe->netdevs[port - 1])))
1546 break;
1547
1548 spin_unlock(&iboe->lock);
1549
1550 if ((port == 0) || (port > MLX4_MAX_PORTS))
1551 return 0;
1552 else
1553 return port;
1513} 1554}
1514 1555
1515static void netdev_removed(struct mlx4_ib_dev *dev, int port) 1556static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
1557 void *ptr)
1516{ 1558{
1517 update_ipv6_gids(dev, port, 1); 1559 struct mlx4_ib_dev *ibdev;
1560 struct in_ifaddr *ifa = ptr;
1561 union ib_gid gid;
1562 struct net_device *event_netdev = ifa->ifa_dev->dev;
1563
1564 ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
1565
1566 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
1567
1568 mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
1569 return NOTIFY_DONE;
1518} 1570}
1519 1571
1520static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event, 1572#if IS_ENABLED(CONFIG_IPV6)
1573static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
1521 void *ptr) 1574 void *ptr)
1522{ 1575{
1523 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1524 struct mlx4_ib_dev *ibdev; 1576 struct mlx4_ib_dev *ibdev;
1525 struct net_device *oldnd; 1577 struct inet6_ifaddr *ifa = ptr;
1578 union ib_gid *gid = (union ib_gid *)&ifa->addr;
1579 struct net_device *event_netdev = ifa->idev->dev;
1580
1581 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
1582
1583 mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
1584 return NOTIFY_DONE;
1585}
1586#endif
1587
1588static void mlx4_ib_get_dev_addr(struct net_device *dev,
1589 struct mlx4_ib_dev *ibdev, u8 port)
1590{
1591 struct in_device *in_dev;
1592#if IS_ENABLED(CONFIG_IPV6)
1593 struct inet6_dev *in6_dev;
1594 union ib_gid *pgid;
1595 struct inet6_ifaddr *ifp;
1596#endif
1597 union ib_gid gid;
1598
1599
1600 if ((port == 0) || (port > MLX4_MAX_PORTS))
1601 return;
1602
1603 /* IPv4 gids */
1604 in_dev = in_dev_get(dev);
1605 if (in_dev) {
1606 for_ifa(in_dev) {
1607 /*ifa->ifa_address;*/
1608 ipv6_addr_set_v4mapped(ifa->ifa_address,
1609 (struct in6_addr *)&gid);
1610 update_gid_table(ibdev, port, &gid, 0);
1611 }
1612 endfor_ifa(in_dev);
1613 in_dev_put(in_dev);
1614 }
1615#if IS_ENABLED(CONFIG_IPV6)
1616 /* IPv6 gids */
1617 in6_dev = in6_dev_get(dev);
1618 if (in6_dev) {
1619 read_lock_bh(&in6_dev->lock);
1620 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
1621 pgid = (union ib_gid *)&ifp->addr;
1622 update_gid_table(ibdev, port, pgid, 0);
1623 }
1624 read_unlock_bh(&in6_dev->lock);
1625 in6_dev_put(in6_dev);
1626 }
1627#endif
1628}
1629
1630static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1631{
1632 struct net_device *dev;
1633
1634 if (reset_gid_table(ibdev))
1635 return -1;
1636
1637 read_lock(&dev_base_lock);
1638
1639 for_each_netdev(&init_net, dev) {
1640 u8 port = mlx4_ib_get_dev_port(dev, ibdev);
1641 if (port)
1642 mlx4_ib_get_dev_addr(dev, ibdev, port);
1643 }
1644
1645 read_unlock(&dev_base_lock);
1646
1647 return 0;
1648}
1649
1650static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
1651{
1526 struct mlx4_ib_iboe *iboe; 1652 struct mlx4_ib_iboe *iboe;
1527 int port; 1653 int port;
1528 1654
1529 if (!net_eq(dev_net(dev), &init_net))
1530 return NOTIFY_DONE;
1531
1532 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1533 iboe = &ibdev->iboe; 1655 iboe = &ibdev->iboe;
1534 1656
1535 spin_lock(&iboe->lock); 1657 spin_lock(&iboe->lock);
1536 mlx4_foreach_ib_transport_port(port, ibdev->dev) { 1658 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1537 oldnd = iboe->netdevs[port - 1]; 1659 struct net_device *old_master = iboe->masters[port - 1];
1660 struct net_device *curr_master;
1538 iboe->netdevs[port - 1] = 1661 iboe->netdevs[port - 1] =
1539 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); 1662 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1540 if (oldnd != iboe->netdevs[port - 1]) { 1663
1541 if (iboe->netdevs[port - 1]) 1664 if (iboe->netdevs[port - 1] &&
1542 netdev_added(ibdev, port); 1665 netif_is_bond_slave(iboe->netdevs[port - 1])) {
1543 else 1666 rtnl_lock();
1544 netdev_removed(ibdev, port); 1667 iboe->masters[port - 1] = netdev_master_upper_dev_get(
1668 iboe->netdevs[port - 1]);
1669 rtnl_unlock();
1545 } 1670 }
1546 } 1671 curr_master = iboe->masters[port - 1];
1547 1672
1548 if (dev == iboe->netdevs[0] || 1673 /* if bonding is used it is possible that we add it to masters
1549 (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0])) 1674 only after IP address is assigned to the net bonding
1550 handle_en_event(ibdev, 1, event); 1675 interface */
1551 else if (dev == iboe->netdevs[1] 1676 if (curr_master && (old_master != curr_master))
1552 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1])) 1677 mlx4_ib_get_dev_addr(curr_master, ibdev, port);
1553 handle_en_event(ibdev, 2, event); 1678 }
1554 1679
1555 spin_unlock(&iboe->lock); 1680 spin_unlock(&iboe->lock);
1681}
1682
1683static int mlx4_ib_netdev_event(struct notifier_block *this,
1684 unsigned long event, void *ptr)
1685{
1686 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1687 struct mlx4_ib_dev *ibdev;
1688
1689 if (!net_eq(dev_net(dev), &init_net))
1690 return NOTIFY_DONE;
1691
1692 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1693 mlx4_ib_scan_netdevs(ibdev);
1556 1694
1557 return NOTIFY_DONE; 1695 return NOTIFY_DONE;
1558} 1696}
@@ -1886,11 +2024,35 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1886 if (mlx4_ib_init_sriov(ibdev)) 2024 if (mlx4_ib_init_sriov(ibdev))
1887 goto err_mad; 2025 goto err_mad;
1888 2026
1889 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) { 2027 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
1890 iboe->nb.notifier_call = mlx4_ib_netdev_event; 2028 if (!iboe->nb.notifier_call) {
1891 err = register_netdevice_notifier(&iboe->nb); 2029 iboe->nb.notifier_call = mlx4_ib_netdev_event;
1892 if (err) 2030 err = register_netdevice_notifier(&iboe->nb);
1893 goto err_sriov; 2031 if (err) {
2032 iboe->nb.notifier_call = NULL;
2033 goto err_notif;
2034 }
2035 }
2036 if (!iboe->nb_inet.notifier_call) {
2037 iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
2038 err = register_inetaddr_notifier(&iboe->nb_inet);
2039 if (err) {
2040 iboe->nb_inet.notifier_call = NULL;
2041 goto err_notif;
2042 }
2043 }
2044#if IS_ENABLED(CONFIG_IPV6)
2045 if (!iboe->nb_inet6.notifier_call) {
2046 iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
2047 err = register_inet6addr_notifier(&iboe->nb_inet6);
2048 if (err) {
2049 iboe->nb_inet6.notifier_call = NULL;
2050 goto err_notif;
2051 }
2052 }
2053#endif
2054 mlx4_ib_scan_netdevs(ibdev);
2055 mlx4_ib_init_gid_table(ibdev);
1894 } 2056 }
1895 2057
1896 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { 2058 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -1916,11 +2078,25 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1916 return ibdev; 2078 return ibdev;
1917 2079
1918err_notif: 2080err_notif:
1919 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 2081 if (ibdev->iboe.nb.notifier_call) {
1920 pr_warn("failure unregistering notifier\n"); 2082 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2083 pr_warn("failure unregistering notifier\n");
2084 ibdev->iboe.nb.notifier_call = NULL;
2085 }
2086 if (ibdev->iboe.nb_inet.notifier_call) {
2087 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2088 pr_warn("failure unregistering notifier\n");
2089 ibdev->iboe.nb_inet.notifier_call = NULL;
2090 }
2091#if IS_ENABLED(CONFIG_IPV6)
2092 if (ibdev->iboe.nb_inet6.notifier_call) {
2093 if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2094 pr_warn("failure unregistering notifier\n");
2095 ibdev->iboe.nb_inet6.notifier_call = NULL;
2096 }
2097#endif
1921 flush_workqueue(wq); 2098 flush_workqueue(wq);
1922 2099
1923err_sriov:
1924 mlx4_ib_close_sriov(ibdev); 2100 mlx4_ib_close_sriov(ibdev);
1925 2101
1926err_mad: 2102err_mad:
@@ -2039,6 +2215,19 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2039 kfree(ibdev->ib_uc_qpns_bitmap); 2215 kfree(ibdev->ib_uc_qpns_bitmap);
2040 } 2216 }
2041 2217
2218 if (ibdev->iboe.nb_inet.notifier_call) {
2219 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2220 pr_warn("failure unregistering notifier\n");
2221 ibdev->iboe.nb_inet.notifier_call = NULL;
2222 }
2223#if IS_ENABLED(CONFIG_IPV6)
2224 if (ibdev->iboe.nb_inet6.notifier_call) {
2225 if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2226 pr_warn("failure unregistering notifier\n");
2227 ibdev->iboe.nb_inet6.notifier_call = NULL;
2228 }
2229#endif
2230
2042 iounmap(ibdev->uar_map); 2231 iounmap(ibdev->uar_map);
2043 for (p = 0; p < ibdev->num_ports; ++p) 2232 for (p = 0; p < ibdev->num_ports; ++p)
2044 if (ibdev->counters[p] != -1) 2233 if (ibdev->counters[p] != -1)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 837f9aa3d2a2..a230683af940 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -432,7 +432,10 @@ struct mlx4_ib_sriov {
432struct mlx4_ib_iboe { 432struct mlx4_ib_iboe {
433 spinlock_t lock; 433 spinlock_t lock;
434 struct net_device *netdevs[MLX4_MAX_PORTS]; 434 struct net_device *netdevs[MLX4_MAX_PORTS];
435 struct net_device *masters[MLX4_MAX_PORTS];
435 struct notifier_block nb; 436 struct notifier_block nb;
437 struct notifier_block nb_inet;
438 struct notifier_block nb_inet6;
436 union ib_gid gid_table[MLX4_MAX_PORTS][128]; 439 union ib_gid gid_table[MLX4_MAX_PORTS][128];
437}; 440};
438 441
@@ -683,9 +686,6 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
683int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 686int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
684 union ib_gid *gid, int netw_view); 687 union ib_gid *gid, int netw_view);
685 688
686int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
687 u8 *mac, int *is_mcast, u8 port);
688
689static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) 689static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
690{ 690{
691 u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; 691 u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 387fbf274151..d8f4d1fe8494 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -90,6 +90,21 @@ enum {
90 MLX4_RAW_QP_MSGMAX = 31, 90 MLX4_RAW_QP_MSGMAX = 31,
91}; 91};
92 92
93#ifndef ETH_ALEN
94#define ETH_ALEN 6
95#endif
96static inline u64 mlx4_mac_to_u64(u8 *addr)
97{
98 u64 mac = 0;
99 int i;
100
101 for (i = 0; i < ETH_ALEN; i++) {
102 mac <<= 8;
103 mac |= addr[i];
104 }
105 return mac;
106}
107
93static const __be32 mlx4_ib_opcode[] = { 108static const __be32 mlx4_ib_opcode[] = {
94 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 109 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
95 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), 110 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
@@ -1171,16 +1186,15 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
1171 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); 1186 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
1172} 1187}
1173 1188
1174static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, 1189static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1175 struct mlx4_qp_path *path, u8 port) 1190 u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
1191 u8 port)
1176{ 1192{
1177 int err;
1178 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == 1193 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
1179 IB_LINK_LAYER_ETHERNET; 1194 IB_LINK_LAYER_ETHERNET;
1180 u8 mac[6];
1181 int is_mcast;
1182 u16 vlan_tag;
1183 int vidx; 1195 int vidx;
1196 int smac_index;
1197
1184 1198
1185 path->grh_mylmc = ah->src_path_bits & 0x7f; 1199 path->grh_mylmc = ah->src_path_bits & 0x7f;
1186 path->rlid = cpu_to_be16(ah->dlid); 1200 path->rlid = cpu_to_be16(ah->dlid);
@@ -1215,22 +1229,27 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1215 if (!(ah->ah_flags & IB_AH_GRH)) 1229 if (!(ah->ah_flags & IB_AH_GRH))
1216 return -1; 1230 return -1;
1217 1231
1218 err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); 1232 memcpy(path->dmac, ah->dmac, ETH_ALEN);
1219 if (err)
1220 return err;
1221
1222 memcpy(path->dmac, mac, 6);
1223 path->ackto = MLX4_IB_LINK_TYPE_ETH; 1233 path->ackto = MLX4_IB_LINK_TYPE_ETH;
1224 /* use index 0 into MAC table for IBoE */ 1234 /* find the index into MAC table for IBoE */
1225 path->grh_mylmc &= 0x80; 1235 if (!is_zero_ether_addr((const u8 *)&smac)) {
1236 if (mlx4_find_cached_mac(dev->dev, port, smac,
1237 &smac_index))
1238 return -ENOENT;
1239 } else {
1240 smac_index = 0;
1241 }
1226 1242
1227 vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); 1243 path->grh_mylmc &= 0x80 | smac_index;
1244
1245 path->feup |= MLX4_FEUP_FORCE_ETH_UP;
1228 if (vlan_tag < 0x1000) { 1246 if (vlan_tag < 0x1000) {
1229 if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx)) 1247 if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
1230 return -ENOENT; 1248 return -ENOENT;
1231 1249
1232 path->vlan_index = vidx; 1250 path->vlan_index = vidx;
1233 path->fl = 1 << 6; 1251 path->fl = 1 << 6;
1252 path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
1234 } 1253 }
1235 } else 1254 } else
1236 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | 1255 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
@@ -1239,6 +1258,28 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1239 return 0; 1258 return 0;
1240} 1259}
1241 1260
1261static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
1262 enum ib_qp_attr_mask qp_attr_mask,
1263 struct mlx4_qp_path *path, u8 port)
1264{
1265 return _mlx4_set_path(dev, &qp->ah_attr,
1266 mlx4_mac_to_u64((u8 *)qp->smac),
1267 (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
1268 path, port);
1269}
1270
1271static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
1272 const struct ib_qp_attr *qp,
1273 enum ib_qp_attr_mask qp_attr_mask,
1274 struct mlx4_qp_path *path, u8 port)
1275{
1276 return _mlx4_set_path(dev, &qp->alt_ah_attr,
1277 mlx4_mac_to_u64((u8 *)qp->alt_smac),
1278 (qp_attr_mask & IB_QP_ALT_VID) ?
1279 qp->alt_vlan_id : 0xffff,
1280 path, port);
1281}
1282
1242static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 1283static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1243{ 1284{
1244 struct mlx4_ib_gid_entry *ge, *tmp; 1285 struct mlx4_ib_gid_entry *ge, *tmp;
@@ -1362,7 +1403,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1362 } 1403 }
1363 1404
1364 if (attr_mask & IB_QP_AV) { 1405 if (attr_mask & IB_QP_AV) {
1365 if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path, 1406 if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path,
1366 attr_mask & IB_QP_PORT ? 1407 attr_mask & IB_QP_PORT ?
1367 attr->port_num : qp->port)) 1408 attr->port_num : qp->port))
1368 goto out; 1409 goto out;
@@ -1385,8 +1426,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1385 dev->dev->caps.pkey_table_len[attr->alt_port_num]) 1426 dev->dev->caps.pkey_table_len[attr->alt_port_num])
1386 goto out; 1427 goto out;
1387 1428
1388 if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, 1429 if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path,
1389 attr->alt_port_num)) 1430 attr->alt_port_num))
1390 goto out; 1431 goto out;
1391 1432
1392 context->alt_path.pkey_index = attr->alt_pkey_index; 1433 context->alt_path.pkey_index = attr->alt_pkey_index;
@@ -1497,6 +1538,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1497 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | 1538 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
1498 MLX4_IB_LINK_TYPE_ETH; 1539 MLX4_IB_LINK_TYPE_ETH;
1499 1540
1541 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
1542 int is_eth = rdma_port_get_link_layer(
1543 &dev->ib_dev, qp->port) ==
1544 IB_LINK_LAYER_ETHERNET;
1545 if (is_eth) {
1546 context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
1547 optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
1548 }
1549 }
1550
1551
1500 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 1552 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1501 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) 1553 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1502 sqd_event = 1; 1554 sqd_event = 1;
@@ -1599,13 +1651,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1599 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1651 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1600 enum ib_qp_state cur_state, new_state; 1652 enum ib_qp_state cur_state, new_state;
1601 int err = -EINVAL; 1653 int err = -EINVAL;
1602 1654 int ll;
1603 mutex_lock(&qp->mutex); 1655 mutex_lock(&qp->mutex);
1604 1656
1605 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 1657 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1606 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1658 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1607 1659
1608 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { 1660 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1661 ll = IB_LINK_LAYER_UNSPECIFIED;
1662 } else {
1663 int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1664 ll = rdma_port_get_link_layer(&dev->ib_dev, port);
1665 }
1666
1667 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1668 attr_mask, ll)) {
1609 pr_debug("qpn 0x%x: invalid attribute mask specified " 1669 pr_debug("qpn 0x%x: invalid attribute mask specified "
1610 "for transition %d to %d. qp_type %d," 1670 "for transition %d to %d. qp_type %d,"
1611 " attr_mask 0x%x\n", 1671 " attr_mask 0x%x\n",
@@ -1822,8 +1882,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1822 return err; 1882 return err;
1823 } 1883 }
1824 1884
1825 vlan = rdma_get_vlan_id(&sgid); 1885 if (ah->av.eth.vlan != 0xffff) {
1826 is_vlan = vlan < 0x1000; 1886 vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
1887 is_vlan = 1;
1888 }
1827 } 1889 }
1828 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); 1890 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
1829 1891