diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-09-23 19:47:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-09-23 19:47:34 -0400 |
commit | 452b6361c4d9baf6940adb7b1316e0f386c39799 (patch) | |
tree | 9b2ad0dac3ea58fe9c29893d0d0eb80005974344 /drivers/infiniband/hw/mlx4 | |
parent | ffd4341d6af49ad899144b5a02f2eba65be03f6c (diff) | |
parent | 3bdad2d13fa62bcb59ca2506e74ce467ea436586 (diff) |
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband/rdma fixes from Roland Dreier:
"Last late set of InfiniBand/RDMA fixes for 3.17:
- fixes for the new memory region re-registration support
- iSER initiator error path fixes
- grab bag of small fixes for the qib and ocrdma hardware drivers
- larger set of fixes for mlx4, especially in RoCE mode"
* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (26 commits)
IB/mlx4: Fix VF mac handling in RoCE
IB/mlx4: Do not allow APM under RoCE
IB/mlx4: Don't update QP1 in native mode
IB/mlx4: Avoid accessing netdevice when building RoCE qp1 header
mlx4: Fix mlx4 reg/unreg mac to work properly with 0-mac addresses
IB/core: When marshaling uverbs path, clear unused fields
IB/mlx4: Avoid executing gid task when device is being removed
IB/mlx4: Fix lockdep splat for the iboe lock
IB/mlx4: Get upper dev addresses as RoCE GIDs when port comes up
IB/mlx4: Reorder steps in RoCE GID table initialization
IB/mlx4: Don't duplicate the default RoCE GID
IB/mlx4: Avoid null pointer dereference in mlx4_ib_scan_netdevs()
IB/iser: Bump version to 1.4.1
IB/iser: Allow bind only when connection state is UP
IB/iser: Fix RX/TX CQ resource leak on error flow
RDMA/ocrdma: Use right macro in query AH
RDMA/ocrdma: Resolve L2 address when creating user AH
mlx4: Correct error flows in rereg_mr
IB/qib: Correct reference counting in debugfs qp_stats
IPoIB: Remove unnecessary port query
...
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 169 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 60 |
4 files changed, 159 insertions, 78 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 162b82c1dde4..bda5994ceb68 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -59,6 +59,7 @@ | |||
59 | 59 | ||
60 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF | 60 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF |
61 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF | 61 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF |
62 | #define MLX4_IB_CARD_REV_A0 0xA0 | ||
62 | 63 | ||
63 | MODULE_AUTHOR("Roland Dreier"); | 64 | MODULE_AUTHOR("Roland Dreier"); |
64 | MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); | 65 | MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); |
@@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev) | |||
119 | return dmfs; | 120 | return dmfs; |
120 | } | 121 | } |
121 | 122 | ||
123 | static int num_ib_ports(struct mlx4_dev *dev) | ||
124 | { | ||
125 | int ib_ports = 0; | ||
126 | int i; | ||
127 | |||
128 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
129 | ib_ports++; | ||
130 | |||
131 | return ib_ports; | ||
132 | } | ||
133 | |||
122 | static int mlx4_ib_query_device(struct ib_device *ibdev, | 134 | static int mlx4_ib_query_device(struct ib_device *ibdev, |
123 | struct ib_device_attr *props) | 135 | struct ib_device_attr *props) |
124 | { | 136 | { |
@@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
126 | struct ib_smp *in_mad = NULL; | 138 | struct ib_smp *in_mad = NULL; |
127 | struct ib_smp *out_mad = NULL; | 139 | struct ib_smp *out_mad = NULL; |
128 | int err = -ENOMEM; | 140 | int err = -ENOMEM; |
141 | int have_ib_ports; | ||
129 | 142 | ||
130 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); | 143 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); |
131 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); | 144 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
@@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
142 | 155 | ||
143 | memset(props, 0, sizeof *props); | 156 | memset(props, 0, sizeof *props); |
144 | 157 | ||
158 | have_ib_ports = num_ib_ports(dev->dev); | ||
159 | |||
145 | props->fw_ver = dev->dev->caps.fw_ver; | 160 | props->fw_ver = dev->dev->caps.fw_ver; |
146 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | | 161 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | |
147 | IB_DEVICE_PORT_ACTIVE_EVENT | | 162 | IB_DEVICE_PORT_ACTIVE_EVENT | |
@@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
152 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; | 167 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; |
153 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) | 168 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) |
154 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; | 169 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; |
155 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) | 170 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) |
156 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; | 171 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; |
157 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) | 172 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) |
158 | props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; | 173 | props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; |
159 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) | 174 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) |
160 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; | 175 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; |
161 | if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) | 176 | if (dev->dev->caps.max_gso_sz && |
177 | (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && | ||
178 | (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) | ||
162 | props->device_cap_flags |= IB_DEVICE_UD_TSO; | 179 | props->device_cap_flags |= IB_DEVICE_UD_TSO; |
163 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) | 180 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) |
164 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; | 181 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; |
@@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
357 | props->state = IB_PORT_DOWN; | 374 | props->state = IB_PORT_DOWN; |
358 | props->phys_state = state_to_phys_state(props->state); | 375 | props->phys_state = state_to_phys_state(props->state); |
359 | props->active_mtu = IB_MTU_256; | 376 | props->active_mtu = IB_MTU_256; |
360 | spin_lock(&iboe->lock); | 377 | spin_lock_bh(&iboe->lock); |
361 | ndev = iboe->netdevs[port - 1]; | 378 | ndev = iboe->netdevs[port - 1]; |
362 | if (!ndev) | 379 | if (!ndev) |
363 | goto out_unlock; | 380 | goto out_unlock; |
@@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
369 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 386 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
370 | props->phys_state = state_to_phys_state(props->state); | 387 | props->phys_state = state_to_phys_state(props->state); |
371 | out_unlock: | 388 | out_unlock: |
372 | spin_unlock(&iboe->lock); | 389 | spin_unlock_bh(&iboe->lock); |
373 | out: | 390 | out: |
374 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | 391 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); |
375 | return err; | 392 | return err; |
@@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | |||
811 | if (!mqp->port) | 828 | if (!mqp->port) |
812 | return 0; | 829 | return 0; |
813 | 830 | ||
814 | spin_lock(&mdev->iboe.lock); | 831 | spin_lock_bh(&mdev->iboe.lock); |
815 | ndev = mdev->iboe.netdevs[mqp->port - 1]; | 832 | ndev = mdev->iboe.netdevs[mqp->port - 1]; |
816 | if (ndev) | 833 | if (ndev) |
817 | dev_hold(ndev); | 834 | dev_hold(ndev); |
818 | spin_unlock(&mdev->iboe.lock); | 835 | spin_unlock_bh(&mdev->iboe.lock); |
819 | 836 | ||
820 | if (ndev) { | 837 | if (ndev) { |
821 | ret = 1; | 838 | ret = 1; |
@@ -1292,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
1292 | mutex_lock(&mqp->mutex); | 1309 | mutex_lock(&mqp->mutex); |
1293 | ge = find_gid_entry(mqp, gid->raw); | 1310 | ge = find_gid_entry(mqp, gid->raw); |
1294 | if (ge) { | 1311 | if (ge) { |
1295 | spin_lock(&mdev->iboe.lock); | 1312 | spin_lock_bh(&mdev->iboe.lock); |
1296 | ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; | 1313 | ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; |
1297 | if (ndev) | 1314 | if (ndev) |
1298 | dev_hold(ndev); | 1315 | dev_hold(ndev); |
1299 | spin_unlock(&mdev->iboe.lock); | 1316 | spin_unlock_bh(&mdev->iboe.lock); |
1300 | if (ndev) | 1317 | if (ndev) |
1301 | dev_put(ndev); | 1318 | dev_put(ndev); |
1302 | list_del(&ge->list); | 1319 | list_del(&ge->list); |
@@ -1417,6 +1434,9 @@ static void update_gids_task(struct work_struct *work) | |||
1417 | int err; | 1434 | int err; |
1418 | struct mlx4_dev *dev = gw->dev->dev; | 1435 | struct mlx4_dev *dev = gw->dev->dev; |
1419 | 1436 | ||
1437 | if (!gw->dev->ib_active) | ||
1438 | return; | ||
1439 | |||
1420 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 1440 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
1421 | if (IS_ERR(mailbox)) { | 1441 | if (IS_ERR(mailbox)) { |
1422 | pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); | 1442 | pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); |
@@ -1447,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work) | |||
1447 | int err; | 1467 | int err; |
1448 | struct mlx4_dev *dev = gw->dev->dev; | 1468 | struct mlx4_dev *dev = gw->dev->dev; |
1449 | 1469 | ||
1470 | if (!gw->dev->ib_active) | ||
1471 | return; | ||
1472 | |||
1450 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 1473 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
1451 | if (IS_ERR(mailbox)) { | 1474 | if (IS_ERR(mailbox)) { |
1452 | pr_warn("reset gid table failed\n"); | 1475 | pr_warn("reset gid table failed\n"); |
@@ -1581,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1581 | return 0; | 1604 | return 0; |
1582 | 1605 | ||
1583 | iboe = &ibdev->iboe; | 1606 | iboe = &ibdev->iboe; |
1584 | spin_lock(&iboe->lock); | 1607 | spin_lock_bh(&iboe->lock); |
1585 | 1608 | ||
1586 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) | 1609 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) |
1587 | if ((netif_is_bond_master(real_dev) && | 1610 | if ((netif_is_bond_master(real_dev) && |
@@ -1591,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1591 | update_gid_table(ibdev, port, gid, | 1614 | update_gid_table(ibdev, port, gid, |
1592 | event == NETDEV_DOWN, 0); | 1615 | event == NETDEV_DOWN, 0); |
1593 | 1616 | ||
1594 | spin_unlock(&iboe->lock); | 1617 | spin_unlock_bh(&iboe->lock); |
1595 | return 0; | 1618 | return 0; |
1596 | 1619 | ||
1597 | } | 1620 | } |
@@ -1664,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, | |||
1664 | new_smac = mlx4_mac_to_u64(dev->dev_addr); | 1687 | new_smac = mlx4_mac_to_u64(dev->dev_addr); |
1665 | read_unlock(&dev_base_lock); | 1688 | read_unlock(&dev_base_lock); |
1666 | 1689 | ||
1690 | atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); | ||
1691 | |||
1692 | /* no need for update QP1 and mac registration in non-SRIOV */ | ||
1693 | if (!mlx4_is_mfunc(ibdev->dev)) | ||
1694 | return; | ||
1695 | |||
1667 | mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); | 1696 | mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); |
1668 | qp = ibdev->qp1_proxy[port - 1]; | 1697 | qp = ibdev->qp1_proxy[port - 1]; |
1669 | if (qp) { | 1698 | if (qp) { |
1670 | int new_smac_index; | 1699 | int new_smac_index; |
1671 | u64 old_smac = qp->pri.smac; | 1700 | u64 old_smac; |
1672 | struct mlx4_update_qp_params update_params; | 1701 | struct mlx4_update_qp_params update_params; |
1673 | 1702 | ||
1703 | mutex_lock(&qp->mutex); | ||
1704 | old_smac = qp->pri.smac; | ||
1674 | if (new_smac == old_smac) | 1705 | if (new_smac == old_smac) |
1675 | goto unlock; | 1706 | goto unlock; |
1676 | 1707 | ||
@@ -1685,17 +1716,20 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, | |||
1685 | release_mac = new_smac; | 1716 | release_mac = new_smac; |
1686 | goto unlock; | 1717 | goto unlock; |
1687 | } | 1718 | } |
1688 | 1719 | /* if old port was zero, no mac was yet registered for this QP */ | |
1720 | if (qp->pri.smac_port) | ||
1721 | release_mac = old_smac; | ||
1689 | qp->pri.smac = new_smac; | 1722 | qp->pri.smac = new_smac; |
1723 | qp->pri.smac_port = port; | ||
1690 | qp->pri.smac_index = new_smac_index; | 1724 | qp->pri.smac_index = new_smac_index; |
1691 | |||
1692 | release_mac = old_smac; | ||
1693 | } | 1725 | } |
1694 | 1726 | ||
1695 | unlock: | 1727 | unlock: |
1696 | mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); | ||
1697 | if (release_mac != MLX4_IB_INVALID_MAC) | 1728 | if (release_mac != MLX4_IB_INVALID_MAC) |
1698 | mlx4_unregister_mac(ibdev->dev, port, release_mac); | 1729 | mlx4_unregister_mac(ibdev->dev, port, release_mac); |
1730 | if (qp) | ||
1731 | mutex_unlock(&qp->mutex); | ||
1732 | mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); | ||
1699 | } | 1733 | } |
1700 | 1734 | ||
1701 | static void mlx4_ib_get_dev_addr(struct net_device *dev, | 1735 | static void mlx4_ib_get_dev_addr(struct net_device *dev, |
@@ -1706,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1706 | struct inet6_dev *in6_dev; | 1740 | struct inet6_dev *in6_dev; |
1707 | union ib_gid *pgid; | 1741 | union ib_gid *pgid; |
1708 | struct inet6_ifaddr *ifp; | 1742 | struct inet6_ifaddr *ifp; |
1743 | union ib_gid default_gid; | ||
1709 | #endif | 1744 | #endif |
1710 | union ib_gid gid; | 1745 | union ib_gid gid; |
1711 | 1746 | ||
@@ -1726,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1726 | in_dev_put(in_dev); | 1761 | in_dev_put(in_dev); |
1727 | } | 1762 | } |
1728 | #if IS_ENABLED(CONFIG_IPV6) | 1763 | #if IS_ENABLED(CONFIG_IPV6) |
1764 | mlx4_make_default_gid(dev, &default_gid); | ||
1729 | /* IPv6 gids */ | 1765 | /* IPv6 gids */ |
1730 | in6_dev = in6_dev_get(dev); | 1766 | in6_dev = in6_dev_get(dev); |
1731 | if (in6_dev) { | 1767 | if (in6_dev) { |
1732 | read_lock_bh(&in6_dev->lock); | 1768 | read_lock_bh(&in6_dev->lock); |
1733 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { | 1769 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { |
1734 | pgid = (union ib_gid *)&ifp->addr; | 1770 | pgid = (union ib_gid *)&ifp->addr; |
1771 | if (!memcmp(pgid, &default_gid, sizeof(*pgid))) | ||
1772 | continue; | ||
1735 | update_gid_table(ibdev, port, pgid, 0, 0); | 1773 | update_gid_table(ibdev, port, pgid, 0, 0); |
1736 | } | 1774 | } |
1737 | read_unlock_bh(&in6_dev->lock); | 1775 | read_unlock_bh(&in6_dev->lock); |
@@ -1753,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | |||
1753 | struct net_device *dev; | 1791 | struct net_device *dev; |
1754 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; | 1792 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; |
1755 | int i; | 1793 | int i; |
1794 | int err = 0; | ||
1756 | 1795 | ||
1757 | for (i = 1; i <= ibdev->num_ports; ++i) | 1796 | for (i = 1; i <= ibdev->num_ports; ++i) { |
1758 | if (reset_gid_table(ibdev, i)) | 1797 | if (rdma_port_get_link_layer(&ibdev->ib_dev, i) == |
1759 | return -1; | 1798 | IB_LINK_LAYER_ETHERNET) { |
1799 | err = reset_gid_table(ibdev, i); | ||
1800 | if (err) | ||
1801 | goto out; | ||
1802 | } | ||
1803 | } | ||
1760 | 1804 | ||
1761 | read_lock(&dev_base_lock); | 1805 | read_lock(&dev_base_lock); |
1762 | spin_lock(&iboe->lock); | 1806 | spin_lock_bh(&iboe->lock); |
1763 | 1807 | ||
1764 | for_each_netdev(&init_net, dev) { | 1808 | for_each_netdev(&init_net, dev) { |
1765 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); | 1809 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); |
1766 | if (port) | 1810 | /* port will be non-zero only for ETH ports */ |
1811 | if (port) { | ||
1812 | mlx4_ib_set_default_gid(ibdev, dev, port); | ||
1767 | mlx4_ib_get_dev_addr(dev, ibdev, port); | 1813 | mlx4_ib_get_dev_addr(dev, ibdev, port); |
1814 | } | ||
1768 | } | 1815 | } |
1769 | 1816 | ||
1770 | spin_unlock(&iboe->lock); | 1817 | spin_unlock_bh(&iboe->lock); |
1771 | read_unlock(&dev_base_lock); | 1818 | read_unlock(&dev_base_lock); |
1772 | 1819 | out: | |
1773 | return 0; | 1820 | return err; |
1774 | } | 1821 | } |
1775 | 1822 | ||
1776 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | 1823 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, |
@@ -1784,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | |||
1784 | 1831 | ||
1785 | iboe = &ibdev->iboe; | 1832 | iboe = &ibdev->iboe; |
1786 | 1833 | ||
1787 | spin_lock(&iboe->lock); | 1834 | spin_lock_bh(&iboe->lock); |
1788 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { | 1835 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { |
1789 | enum ib_port_state port_state = IB_PORT_NOP; | 1836 | enum ib_port_state port_state = IB_PORT_NOP; |
1790 | struct net_device *old_master = iboe->masters[port - 1]; | 1837 | struct net_device *old_master = iboe->masters[port - 1]; |
@@ -1816,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | |||
1816 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? | 1863 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? |
1817 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 1864 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
1818 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1865 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); |
1819 | } else { | 1866 | if (curr_master) { |
1820 | reset_gid_table(ibdev, port); | 1867 | /* if using bonding/team and a slave port is down, we |
1821 | } | 1868 | * don't want the bond IP based gids in the table since |
1822 | /* if using bonding/team and a slave port is down, we don't the bond IP | 1869 | * flows that select port by gid may get the down port. |
1823 | * based gids in the table since flows that select port by gid may get | 1870 | */ |
1824 | * the down port. | 1871 | if (port_state == IB_PORT_DOWN) { |
1825 | */ | 1872 | reset_gid_table(ibdev, port); |
1826 | if (curr_master && (port_state == IB_PORT_DOWN)) { | 1873 | mlx4_ib_set_default_gid(ibdev, |
1827 | reset_gid_table(ibdev, port); | 1874 | curr_netdev, |
1828 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1875 | port); |
1829 | } | 1876 | } else { |
1830 | /* if bonding is used it is possible that we add it to masters | 1877 | /* gids from the upper dev (bond/team) |
1831 | * only after IP address is assigned to the net bonding | 1878 | * should appear in port's gid table |
1832 | * interface. | 1879 | */ |
1833 | */ | 1880 | mlx4_ib_get_dev_addr(curr_master, |
1834 | if (curr_master && (old_master != curr_master)) { | 1881 | ibdev, port); |
1835 | reset_gid_table(ibdev, port); | 1882 | } |
1836 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1883 | } |
1837 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); | 1884 | /* if bonding is used it is possible that we add it to |
1838 | } | 1885 | * masters only after IP address is assigned to the |
1886 | * net bonding interface. | ||
1887 | */ | ||
1888 | if (curr_master && (old_master != curr_master)) { | ||
1889 | reset_gid_table(ibdev, port); | ||
1890 | mlx4_ib_set_default_gid(ibdev, | ||
1891 | curr_netdev, port); | ||
1892 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); | ||
1893 | } | ||
1839 | 1894 | ||
1840 | if (!curr_master && (old_master != curr_master)) { | 1895 | if (!curr_master && (old_master != curr_master)) { |
1896 | reset_gid_table(ibdev, port); | ||
1897 | mlx4_ib_set_default_gid(ibdev, | ||
1898 | curr_netdev, port); | ||
1899 | mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||
1900 | } | ||
1901 | } else { | ||
1841 | reset_gid_table(ibdev, port); | 1902 | reset_gid_table(ibdev, port); |
1842 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1843 | mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||
1844 | } | 1903 | } |
1845 | } | 1904 | } |
1846 | 1905 | ||
1847 | spin_unlock(&iboe->lock); | 1906 | spin_unlock_bh(&iboe->lock); |
1848 | 1907 | ||
1849 | if (update_qps_port > 0) | 1908 | if (update_qps_port > 0) |
1850 | mlx4_ib_update_qps(ibdev, dev, update_qps_port); | 1909 | mlx4_ib_update_qps(ibdev, dev, update_qps_port); |
@@ -2186,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2186 | goto err_steer_free_bitmap; | 2245 | goto err_steer_free_bitmap; |
2187 | } | 2246 | } |
2188 | 2247 | ||
2248 | for (j = 1; j <= ibdev->dev->caps.num_ports; j++) | ||
2249 | atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); | ||
2250 | |||
2189 | if (ib_register_device(&ibdev->ib_dev, NULL)) | 2251 | if (ib_register_device(&ibdev->ib_dev, NULL)) |
2190 | goto err_steer_free_bitmap; | 2252 | goto err_steer_free_bitmap; |
2191 | 2253 | ||
@@ -2222,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2222 | } | 2284 | } |
2223 | } | 2285 | } |
2224 | #endif | 2286 | #endif |
2225 | for (i = 1 ; i <= ibdev->num_ports ; ++i) | 2287 | if (mlx4_ib_init_gid_table(ibdev)) |
2226 | reset_gid_table(ibdev, i); | 2288 | goto err_notif; |
2227 | rtnl_lock(); | ||
2228 | mlx4_ib_scan_netdevs(ibdev, NULL, 0); | ||
2229 | rtnl_unlock(); | ||
2230 | mlx4_ib_init_gid_table(ibdev); | ||
2231 | } | 2289 | } |
2232 | 2290 | ||
2233 | for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { | 2291 | for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { |
@@ -2375,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
2375 | struct mlx4_ib_dev *ibdev = ibdev_ptr; | 2433 | struct mlx4_ib_dev *ibdev = ibdev_ptr; |
2376 | int p; | 2434 | int p; |
2377 | 2435 | ||
2436 | ibdev->ib_active = false; | ||
2437 | flush_workqueue(wq); | ||
2438 | |||
2378 | mlx4_ib_close_sriov(ibdev); | 2439 | mlx4_ib_close_sriov(ibdev); |
2379 | mlx4_ib_mad_cleanup(ibdev); | 2440 | mlx4_ib_mad_cleanup(ibdev); |
2380 | ib_unregister_device(&ibdev->ib_dev); | 2441 | ib_unregister_device(&ibdev->ib_dev); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index e8cad3926bfc..6eb743f65f6f 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -451,6 +451,7 @@ struct mlx4_ib_iboe { | |||
451 | spinlock_t lock; | 451 | spinlock_t lock; |
452 | struct net_device *netdevs[MLX4_MAX_PORTS]; | 452 | struct net_device *netdevs[MLX4_MAX_PORTS]; |
453 | struct net_device *masters[MLX4_MAX_PORTS]; | 453 | struct net_device *masters[MLX4_MAX_PORTS]; |
454 | atomic64_t mac[MLX4_MAX_PORTS]; | ||
454 | struct notifier_block nb; | 455 | struct notifier_block nb; |
455 | struct notifier_block nb_inet; | 456 | struct notifier_block nb_inet; |
456 | struct notifier_block nb_inet6; | 457 | struct notifier_block nb_inet6; |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 9b0e80e59b08..8f9325cfc85d 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
234 | 0); | 234 | 0); |
235 | if (IS_ERR(mmr->umem)) { | 235 | if (IS_ERR(mmr->umem)) { |
236 | err = PTR_ERR(mmr->umem); | 236 | err = PTR_ERR(mmr->umem); |
237 | /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ | ||
237 | mmr->umem = NULL; | 238 | mmr->umem = NULL; |
238 | goto release_mpt_entry; | 239 | goto release_mpt_entry; |
239 | } | 240 | } |
240 | n = ib_umem_page_count(mmr->umem); | 241 | n = ib_umem_page_count(mmr->umem); |
241 | shift = ilog2(mmr->umem->page_size); | 242 | shift = ilog2(mmr->umem->page_size); |
242 | 243 | ||
243 | mmr->mmr.iova = virt_addr; | ||
244 | mmr->mmr.size = length; | ||
245 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, | 244 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, |
246 | virt_addr, length, n, shift, | 245 | virt_addr, length, n, shift, |
247 | *pmpt_entry); | 246 | *pmpt_entry); |
@@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
249 | ib_umem_release(mmr->umem); | 248 | ib_umem_release(mmr->umem); |
250 | goto release_mpt_entry; | 249 | goto release_mpt_entry; |
251 | } | 250 | } |
251 | mmr->mmr.iova = virt_addr; | ||
252 | mmr->mmr.size = length; | ||
252 | 253 | ||
253 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); | 254 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); |
254 | if (err) { | 255 | if (err) { |
@@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
262 | * return a failure. But dereg_mr will free the resources. | 263 | * return a failure. But dereg_mr will free the resources. |
263 | */ | 264 | */ |
264 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); | 265 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); |
266 | if (!err && flags & IB_MR_REREG_ACCESS) | ||
267 | mmr->mmr.access = mr_access_flags; | ||
265 | 268 | ||
266 | release_mpt_entry: | 269 | release_mpt_entry: |
267 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); | 270 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 836503927dea..9c5150c3cb31 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
964 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) | 964 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) |
965 | pr_warn("modify QP %06x to RESET failed.\n", | 965 | pr_warn("modify QP %06x to RESET failed.\n", |
966 | qp->mqp.qpn); | 966 | qp->mqp.qpn); |
967 | if (qp->pri.smac) { | 967 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { |
968 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 968 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
969 | qp->pri.smac = 0; | 969 | qp->pri.smac = 0; |
970 | qp->pri.smac_port = 0; | ||
970 | } | 971 | } |
971 | if (qp->alt.smac) { | 972 | if (qp->alt.smac) { |
972 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); | 973 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); |
@@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, | |||
1325 | * If one was already assigned, but the new mac differs, | 1326 | * If one was already assigned, but the new mac differs, |
1326 | * unregister the old one and register the new one. | 1327 | * unregister the old one and register the new one. |
1327 | */ | 1328 | */ |
1328 | if (!smac_info->smac || smac_info->smac != smac) { | 1329 | if ((!smac_info->smac && !smac_info->smac_port) || |
1330 | smac_info->smac != smac) { | ||
1329 | /* register candidate now, unreg if needed, after success */ | 1331 | /* register candidate now, unreg if needed, after success */ |
1330 | smac_index = mlx4_register_mac(dev->dev, port, smac); | 1332 | smac_index = mlx4_register_mac(dev->dev, port, smac); |
1331 | if (smac_index >= 0) { | 1333 | if (smac_index >= 0) { |
@@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) | |||
1390 | static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, | 1392 | static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, |
1391 | struct mlx4_qp_context *context) | 1393 | struct mlx4_qp_context *context) |
1392 | { | 1394 | { |
1393 | struct net_device *ndev; | ||
1394 | u64 u64_mac; | 1395 | u64 u64_mac; |
1395 | int smac_index; | 1396 | int smac_index; |
1396 | 1397 | ||
1397 | 1398 | u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); | |
1398 | ndev = dev->iboe.netdevs[qp->port - 1]; | ||
1399 | if (ndev) { | ||
1400 | smac = ndev->dev_addr; | ||
1401 | u64_mac = mlx4_mac_to_u64(smac); | ||
1402 | } else { | ||
1403 | u64_mac = dev->dev->caps.def_mac[qp->port]; | ||
1404 | } | ||
1405 | 1399 | ||
1406 | context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); | 1400 | context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); |
1407 | if (!qp->pri.smac) { | 1401 | if (!qp->pri.smac && !qp->pri.smac_port) { |
1408 | smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); | 1402 | smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); |
1409 | if (smac_index >= 0) { | 1403 | if (smac_index >= 0) { |
1410 | qp->pri.candidate_smac_index = smac_index; | 1404 | qp->pri.candidate_smac_index = smac_index; |
@@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1432 | int steer_qp = 0; | 1426 | int steer_qp = 0; |
1433 | int err = -EINVAL; | 1427 | int err = -EINVAL; |
1434 | 1428 | ||
1429 | /* APM is not supported under RoCE */ | ||
1430 | if (attr_mask & IB_QP_ALT_PATH && | ||
1431 | rdma_port_get_link_layer(&dev->ib_dev, qp->port) == | ||
1432 | IB_LINK_LAYER_ETHERNET) | ||
1433 | return -ENOTSUPP; | ||
1434 | |||
1435 | context = kzalloc(sizeof *context, GFP_KERNEL); | 1435 | context = kzalloc(sizeof *context, GFP_KERNEL); |
1436 | if (!context) | 1436 | if (!context) |
1437 | return -ENOMEM; | 1437 | return -ENOMEM; |
@@ -1786,9 +1786,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1786 | if (qp->flags & MLX4_IB_QP_NETIF) | 1786 | if (qp->flags & MLX4_IB_QP_NETIF) |
1787 | mlx4_ib_steer_qp_reg(dev, qp, 0); | 1787 | mlx4_ib_steer_qp_reg(dev, qp, 0); |
1788 | } | 1788 | } |
1789 | if (qp->pri.smac) { | 1789 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { |
1790 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 1790 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
1791 | qp->pri.smac = 0; | 1791 | qp->pri.smac = 0; |
1792 | qp->pri.smac_port = 0; | ||
1792 | } | 1793 | } |
1793 | if (qp->alt.smac) { | 1794 | if (qp->alt.smac) { |
1794 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); | 1795 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); |
@@ -1812,11 +1813,12 @@ out: | |||
1812 | if (err && steer_qp) | 1813 | if (err && steer_qp) |
1813 | mlx4_ib_steer_qp_reg(dev, qp, 0); | 1814 | mlx4_ib_steer_qp_reg(dev, qp, 0); |
1814 | kfree(context); | 1815 | kfree(context); |
1815 | if (qp->pri.candidate_smac) { | 1816 | if (qp->pri.candidate_smac || |
1817 | (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { | ||
1816 | if (err) { | 1818 | if (err) { |
1817 | mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); | 1819 | mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); |
1818 | } else { | 1820 | } else { |
1819 | if (qp->pri.smac) | 1821 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) |
1820 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); | 1822 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
1821 | qp->pri.smac = qp->pri.candidate_smac; | 1823 | qp->pri.smac = qp->pri.candidate_smac; |
1822 | qp->pri.smac_index = qp->pri.candidate_smac_index; | 1824 | qp->pri.smac_index = qp->pri.candidate_smac_index; |
@@ -2089,6 +2091,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, | |||
2089 | return 0; | 2091 | return 0; |
2090 | } | 2092 | } |
2091 | 2093 | ||
2094 | static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac) | ||
2095 | { | ||
2096 | int i; | ||
2097 | |||
2098 | for (i = ETH_ALEN; i; i--) { | ||
2099 | dst_mac[i - 1] = src_mac & 0xff; | ||
2100 | src_mac >>= 8; | ||
2101 | } | ||
2102 | } | ||
2103 | |||
2092 | static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | 2104 | static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, |
2093 | void *wqe, unsigned *mlx_seg_len) | 2105 | void *wqe, unsigned *mlx_seg_len) |
2094 | { | 2106 | { |
@@ -2203,7 +2215,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
2203 | } | 2215 | } |
2204 | 2216 | ||
2205 | if (is_eth) { | 2217 | if (is_eth) { |
2206 | u8 *smac; | ||
2207 | struct in6_addr in6; | 2218 | struct in6_addr in6; |
2208 | 2219 | ||
2209 | u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; | 2220 | u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; |
@@ -2216,12 +2227,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
2216 | memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); | 2227 | memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); |
2217 | memcpy(&in6, sgid.raw, sizeof(in6)); | 2228 | memcpy(&in6, sgid.raw, sizeof(in6)); |
2218 | 2229 | ||
2219 | if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) | 2230 | if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { |
2220 | smac = to_mdev(sqp->qp.ibqp.device)-> | 2231 | u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); |
2221 | iboe.netdevs[sqp->qp.port - 1]->dev_addr; | 2232 | u8 smac[ETH_ALEN]; |
2222 | else /* use the src mac of the tunnel */ | 2233 | |
2223 | smac = ah->av.eth.s_mac; | 2234 | mlx4_u64_to_smac(smac, mac); |
2224 | memcpy(sqp->ud_header.eth.smac_h, smac, 6); | 2235 | memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN); |
2236 | } else { | ||
2237 | /* use the src mac of the tunnel */ | ||
2238 | memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN); | ||
2239 | } | ||
2240 | |||
2225 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) | 2241 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) |
2226 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); | 2242 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); |
2227 | if (!is_vlan) { | 2243 | if (!is_vlan) { |