diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx4/main.c')
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 201 |
1 files changed, 146 insertions, 55 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e1e558a3d692..bda5994ceb68 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -59,6 +59,7 @@ | |||
59 | 59 | ||
60 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF | 60 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF |
61 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF | 61 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF |
62 | #define MLX4_IB_CARD_REV_A0 0xA0 | ||
62 | 63 | ||
63 | MODULE_AUTHOR("Roland Dreier"); | 64 | MODULE_AUTHOR("Roland Dreier"); |
64 | MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); | 65 | MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); |
@@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev) | |||
119 | return dmfs; | 120 | return dmfs; |
120 | } | 121 | } |
121 | 122 | ||
123 | static int num_ib_ports(struct mlx4_dev *dev) | ||
124 | { | ||
125 | int ib_ports = 0; | ||
126 | int i; | ||
127 | |||
128 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
129 | ib_ports++; | ||
130 | |||
131 | return ib_ports; | ||
132 | } | ||
133 | |||
122 | static int mlx4_ib_query_device(struct ib_device *ibdev, | 134 | static int mlx4_ib_query_device(struct ib_device *ibdev, |
123 | struct ib_device_attr *props) | 135 | struct ib_device_attr *props) |
124 | { | 136 | { |
@@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
126 | struct ib_smp *in_mad = NULL; | 138 | struct ib_smp *in_mad = NULL; |
127 | struct ib_smp *out_mad = NULL; | 139 | struct ib_smp *out_mad = NULL; |
128 | int err = -ENOMEM; | 140 | int err = -ENOMEM; |
141 | int have_ib_ports; | ||
129 | 142 | ||
130 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); | 143 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); |
131 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); | 144 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
@@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
142 | 155 | ||
143 | memset(props, 0, sizeof *props); | 156 | memset(props, 0, sizeof *props); |
144 | 157 | ||
158 | have_ib_ports = num_ib_ports(dev->dev); | ||
159 | |||
145 | props->fw_ver = dev->dev->caps.fw_ver; | 160 | props->fw_ver = dev->dev->caps.fw_ver; |
146 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | | 161 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | |
147 | IB_DEVICE_PORT_ACTIVE_EVENT | | 162 | IB_DEVICE_PORT_ACTIVE_EVENT | |
@@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
152 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; | 167 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; |
153 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) | 168 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) |
154 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; | 169 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; |
155 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) | 170 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) |
156 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; | 171 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; |
157 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) | 172 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) |
158 | props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; | 173 | props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; |
159 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) | 174 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) |
160 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; | 175 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; |
161 | if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) | 176 | if (dev->dev->caps.max_gso_sz && |
177 | (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && | ||
178 | (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) | ||
162 | props->device_cap_flags |= IB_DEVICE_UD_TSO; | 179 | props->device_cap_flags |= IB_DEVICE_UD_TSO; |
163 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) | 180 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) |
164 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; | 181 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; |
@@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
357 | props->state = IB_PORT_DOWN; | 374 | props->state = IB_PORT_DOWN; |
358 | props->phys_state = state_to_phys_state(props->state); | 375 | props->phys_state = state_to_phys_state(props->state); |
359 | props->active_mtu = IB_MTU_256; | 376 | props->active_mtu = IB_MTU_256; |
360 | spin_lock(&iboe->lock); | 377 | spin_lock_bh(&iboe->lock); |
361 | ndev = iboe->netdevs[port - 1]; | 378 | ndev = iboe->netdevs[port - 1]; |
362 | if (!ndev) | 379 | if (!ndev) |
363 | goto out_unlock; | 380 | goto out_unlock; |
@@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
369 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 386 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
370 | props->phys_state = state_to_phys_state(props->state); | 387 | props->phys_state = state_to_phys_state(props->state); |
371 | out_unlock: | 388 | out_unlock: |
372 | spin_unlock(&iboe->lock); | 389 | spin_unlock_bh(&iboe->lock); |
373 | out: | 390 | out: |
374 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | 391 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); |
375 | return err; | 392 | return err; |
@@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | |||
811 | if (!mqp->port) | 828 | if (!mqp->port) |
812 | return 0; | 829 | return 0; |
813 | 830 | ||
814 | spin_lock(&mdev->iboe.lock); | 831 | spin_lock_bh(&mdev->iboe.lock); |
815 | ndev = mdev->iboe.netdevs[mqp->port - 1]; | 832 | ndev = mdev->iboe.netdevs[mqp->port - 1]; |
816 | if (ndev) | 833 | if (ndev) |
817 | dev_hold(ndev); | 834 | dev_hold(ndev); |
818 | spin_unlock(&mdev->iboe.lock); | 835 | spin_unlock_bh(&mdev->iboe.lock); |
819 | 836 | ||
820 | if (ndev) { | 837 | if (ndev) { |
821 | ret = 1; | 838 | ret = 1; |
@@ -1089,6 +1106,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) | |||
1089 | return err; | 1106 | return err; |
1090 | } | 1107 | } |
1091 | 1108 | ||
1109 | static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr, | ||
1110 | u64 *reg_id) | ||
1111 | { | ||
1112 | void *ib_flow; | ||
1113 | union ib_flow_spec *ib_spec; | ||
1114 | struct mlx4_dev *dev = to_mdev(qp->device)->dev; | ||
1115 | int err = 0; | ||
1116 | |||
1117 | if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) | ||
1118 | return 0; /* do nothing */ | ||
1119 | |||
1120 | ib_flow = flow_attr + 1; | ||
1121 | ib_spec = (union ib_flow_spec *)ib_flow; | ||
1122 | |||
1123 | if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1) | ||
1124 | return 0; /* do nothing */ | ||
1125 | |||
1126 | err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac, | ||
1127 | flow_attr->port, qp->qp_num, | ||
1128 | MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff), | ||
1129 | reg_id); | ||
1130 | return err; | ||
1131 | } | ||
1132 | |||
1092 | static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | 1133 | static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, |
1093 | struct ib_flow_attr *flow_attr, | 1134 | struct ib_flow_attr *flow_attr, |
1094 | int domain) | 1135 | int domain) |
@@ -1136,6 +1177,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
1136 | i++; | 1177 | i++; |
1137 | } | 1178 | } |
1138 | 1179 | ||
1180 | if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { | ||
1181 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); | ||
1182 | if (err) | ||
1183 | goto err_free; | ||
1184 | } | ||
1185 | |||
1139 | return &mflow->ibflow; | 1186 | return &mflow->ibflow; |
1140 | 1187 | ||
1141 | err_free: | 1188 | err_free: |
@@ -1262,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
1262 | mutex_lock(&mqp->mutex); | 1309 | mutex_lock(&mqp->mutex); |
1263 | ge = find_gid_entry(mqp, gid->raw); | 1310 | ge = find_gid_entry(mqp, gid->raw); |
1264 | if (ge) { | 1311 | if (ge) { |
1265 | spin_lock(&mdev->iboe.lock); | 1312 | spin_lock_bh(&mdev->iboe.lock); |
1266 | ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; | 1313 | ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; |
1267 | if (ndev) | 1314 | if (ndev) |
1268 | dev_hold(ndev); | 1315 | dev_hold(ndev); |
1269 | spin_unlock(&mdev->iboe.lock); | 1316 | spin_unlock_bh(&mdev->iboe.lock); |
1270 | if (ndev) | 1317 | if (ndev) |
1271 | dev_put(ndev); | 1318 | dev_put(ndev); |
1272 | list_del(&ge->list); | 1319 | list_del(&ge->list); |
@@ -1387,6 +1434,9 @@ static void update_gids_task(struct work_struct *work) | |||
1387 | int err; | 1434 | int err; |
1388 | struct mlx4_dev *dev = gw->dev->dev; | 1435 | struct mlx4_dev *dev = gw->dev->dev; |
1389 | 1436 | ||
1437 | if (!gw->dev->ib_active) | ||
1438 | return; | ||
1439 | |||
1390 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 1440 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
1391 | if (IS_ERR(mailbox)) { | 1441 | if (IS_ERR(mailbox)) { |
1392 | pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); | 1442 | pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); |
@@ -1417,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work) | |||
1417 | int err; | 1467 | int err; |
1418 | struct mlx4_dev *dev = gw->dev->dev; | 1468 | struct mlx4_dev *dev = gw->dev->dev; |
1419 | 1469 | ||
1470 | if (!gw->dev->ib_active) | ||
1471 | return; | ||
1472 | |||
1420 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 1473 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
1421 | if (IS_ERR(mailbox)) { | 1474 | if (IS_ERR(mailbox)) { |
1422 | pr_warn("reset gid table failed\n"); | 1475 | pr_warn("reset gid table failed\n"); |
@@ -1551,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1551 | return 0; | 1604 | return 0; |
1552 | 1605 | ||
1553 | iboe = &ibdev->iboe; | 1606 | iboe = &ibdev->iboe; |
1554 | spin_lock(&iboe->lock); | 1607 | spin_lock_bh(&iboe->lock); |
1555 | 1608 | ||
1556 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) | 1609 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) |
1557 | if ((netif_is_bond_master(real_dev) && | 1610 | if ((netif_is_bond_master(real_dev) && |
@@ -1561,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1561 | update_gid_table(ibdev, port, gid, | 1614 | update_gid_table(ibdev, port, gid, |
1562 | event == NETDEV_DOWN, 0); | 1615 | event == NETDEV_DOWN, 0); |
1563 | 1616 | ||
1564 | spin_unlock(&iboe->lock); | 1617 | spin_unlock_bh(&iboe->lock); |
1565 | return 0; | 1618 | return 0; |
1566 | 1619 | ||
1567 | } | 1620 | } |
@@ -1634,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, | |||
1634 | new_smac = mlx4_mac_to_u64(dev->dev_addr); | 1687 | new_smac = mlx4_mac_to_u64(dev->dev_addr); |
1635 | read_unlock(&dev_base_lock); | 1688 | read_unlock(&dev_base_lock); |
1636 | 1689 | ||
1690 | atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); | ||
1691 | |||
1692 | /* no need for update QP1 and mac registration in non-SRIOV */ | ||
1693 | if (!mlx4_is_mfunc(ibdev->dev)) | ||
1694 | return; | ||
1695 | |||
1637 | mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); | 1696 | mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); |
1638 | qp = ibdev->qp1_proxy[port - 1]; | 1697 | qp = ibdev->qp1_proxy[port - 1]; |
1639 | if (qp) { | 1698 | if (qp) { |
1640 | int new_smac_index; | 1699 | int new_smac_index; |
1641 | u64 old_smac = qp->pri.smac; | 1700 | u64 old_smac; |
1642 | struct mlx4_update_qp_params update_params; | 1701 | struct mlx4_update_qp_params update_params; |
1643 | 1702 | ||
1703 | mutex_lock(&qp->mutex); | ||
1704 | old_smac = qp->pri.smac; | ||
1644 | if (new_smac == old_smac) | 1705 | if (new_smac == old_smac) |
1645 | goto unlock; | 1706 | goto unlock; |
1646 | 1707 | ||
@@ -1650,22 +1711,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, | |||
1650 | goto unlock; | 1711 | goto unlock; |
1651 | 1712 | ||
1652 | update_params.smac_index = new_smac_index; | 1713 | update_params.smac_index = new_smac_index; |
1653 | if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC, | 1714 | if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, |
1654 | &update_params)) { | 1715 | &update_params)) { |
1655 | release_mac = new_smac; | 1716 | release_mac = new_smac; |
1656 | goto unlock; | 1717 | goto unlock; |
1657 | } | 1718 | } |
1658 | 1719 | /* if old port was zero, no mac was yet registered for this QP */ | |
1720 | if (qp->pri.smac_port) | ||
1721 | release_mac = old_smac; | ||
1659 | qp->pri.smac = new_smac; | 1722 | qp->pri.smac = new_smac; |
1723 | qp->pri.smac_port = port; | ||
1660 | qp->pri.smac_index = new_smac_index; | 1724 | qp->pri.smac_index = new_smac_index; |
1661 | |||
1662 | release_mac = old_smac; | ||
1663 | } | 1725 | } |
1664 | 1726 | ||
1665 | unlock: | 1727 | unlock: |
1666 | mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); | ||
1667 | if (release_mac != MLX4_IB_INVALID_MAC) | 1728 | if (release_mac != MLX4_IB_INVALID_MAC) |
1668 | mlx4_unregister_mac(ibdev->dev, port, release_mac); | 1729 | mlx4_unregister_mac(ibdev->dev, port, release_mac); |
1730 | if (qp) | ||
1731 | mutex_unlock(&qp->mutex); | ||
1732 | mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); | ||
1669 | } | 1733 | } |
1670 | 1734 | ||
1671 | static void mlx4_ib_get_dev_addr(struct net_device *dev, | 1735 | static void mlx4_ib_get_dev_addr(struct net_device *dev, |
@@ -1676,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1676 | struct inet6_dev *in6_dev; | 1740 | struct inet6_dev *in6_dev; |
1677 | union ib_gid *pgid; | 1741 | union ib_gid *pgid; |
1678 | struct inet6_ifaddr *ifp; | 1742 | struct inet6_ifaddr *ifp; |
1743 | union ib_gid default_gid; | ||
1679 | #endif | 1744 | #endif |
1680 | union ib_gid gid; | 1745 | union ib_gid gid; |
1681 | 1746 | ||
@@ -1696,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1696 | in_dev_put(in_dev); | 1761 | in_dev_put(in_dev); |
1697 | } | 1762 | } |
1698 | #if IS_ENABLED(CONFIG_IPV6) | 1763 | #if IS_ENABLED(CONFIG_IPV6) |
1764 | mlx4_make_default_gid(dev, &default_gid); | ||
1699 | /* IPv6 gids */ | 1765 | /* IPv6 gids */ |
1700 | in6_dev = in6_dev_get(dev); | 1766 | in6_dev = in6_dev_get(dev); |
1701 | if (in6_dev) { | 1767 | if (in6_dev) { |
1702 | read_lock_bh(&in6_dev->lock); | 1768 | read_lock_bh(&in6_dev->lock); |
1703 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { | 1769 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { |
1704 | pgid = (union ib_gid *)&ifp->addr; | 1770 | pgid = (union ib_gid *)&ifp->addr; |
1771 | if (!memcmp(pgid, &default_gid, sizeof(*pgid))) | ||
1772 | continue; | ||
1705 | update_gid_table(ibdev, port, pgid, 0, 0); | 1773 | update_gid_table(ibdev, port, pgid, 0, 0); |
1706 | } | 1774 | } |
1707 | read_unlock_bh(&in6_dev->lock); | 1775 | read_unlock_bh(&in6_dev->lock); |
@@ -1723,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | |||
1723 | struct net_device *dev; | 1791 | struct net_device *dev; |
1724 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; | 1792 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; |
1725 | int i; | 1793 | int i; |
1794 | int err = 0; | ||
1726 | 1795 | ||
1727 | for (i = 1; i <= ibdev->num_ports; ++i) | 1796 | for (i = 1; i <= ibdev->num_ports; ++i) { |
1728 | if (reset_gid_table(ibdev, i)) | 1797 | if (rdma_port_get_link_layer(&ibdev->ib_dev, i) == |
1729 | return -1; | 1798 | IB_LINK_LAYER_ETHERNET) { |
1799 | err = reset_gid_table(ibdev, i); | ||
1800 | if (err) | ||
1801 | goto out; | ||
1802 | } | ||
1803 | } | ||
1730 | 1804 | ||
1731 | read_lock(&dev_base_lock); | 1805 | read_lock(&dev_base_lock); |
1732 | spin_lock(&iboe->lock); | 1806 | spin_lock_bh(&iboe->lock); |
1733 | 1807 | ||
1734 | for_each_netdev(&init_net, dev) { | 1808 | for_each_netdev(&init_net, dev) { |
1735 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); | 1809 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); |
1736 | if (port) | 1810 | /* port will be non-zero only for ETH ports */ |
1811 | if (port) { | ||
1812 | mlx4_ib_set_default_gid(ibdev, dev, port); | ||
1737 | mlx4_ib_get_dev_addr(dev, ibdev, port); | 1813 | mlx4_ib_get_dev_addr(dev, ibdev, port); |
1814 | } | ||
1738 | } | 1815 | } |
1739 | 1816 | ||
1740 | spin_unlock(&iboe->lock); | 1817 | spin_unlock_bh(&iboe->lock); |
1741 | read_unlock(&dev_base_lock); | 1818 | read_unlock(&dev_base_lock); |
1742 | 1819 | out: | |
1743 | return 0; | 1820 | return err; |
1744 | } | 1821 | } |
1745 | 1822 | ||
1746 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | 1823 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, |
@@ -1754,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | |||
1754 | 1831 | ||
1755 | iboe = &ibdev->iboe; | 1832 | iboe = &ibdev->iboe; |
1756 | 1833 | ||
1757 | spin_lock(&iboe->lock); | 1834 | spin_lock_bh(&iboe->lock); |
1758 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { | 1835 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { |
1759 | enum ib_port_state port_state = IB_PORT_NOP; | 1836 | enum ib_port_state port_state = IB_PORT_NOP; |
1760 | struct net_device *old_master = iboe->masters[port - 1]; | 1837 | struct net_device *old_master = iboe->masters[port - 1]; |
@@ -1786,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | |||
1786 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? | 1863 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? |
1787 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 1864 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
1788 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1865 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); |
1789 | } else { | 1866 | if (curr_master) { |
1790 | reset_gid_table(ibdev, port); | 1867 | /* if using bonding/team and a slave port is down, we |
1791 | } | 1868 | * don't want the bond IP based gids in the table since |
1792 | /* if using bonding/team and a slave port is down, we don't the bond IP | 1869 | * flows that select port by gid may get the down port. |
1793 | * based gids in the table since flows that select port by gid may get | 1870 | */ |
1794 | * the down port. | 1871 | if (port_state == IB_PORT_DOWN) { |
1795 | */ | 1872 | reset_gid_table(ibdev, port); |
1796 | if (curr_master && (port_state == IB_PORT_DOWN)) { | 1873 | mlx4_ib_set_default_gid(ibdev, |
1797 | reset_gid_table(ibdev, port); | 1874 | curr_netdev, |
1798 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1875 | port); |
1799 | } | 1876 | } else { |
1800 | /* if bonding is used it is possible that we add it to masters | 1877 | /* gids from the upper dev (bond/team) |
1801 | * only after IP address is assigned to the net bonding | 1878 | * should appear in port's gid table |
1802 | * interface. | 1879 | */ |
1803 | */ | 1880 | mlx4_ib_get_dev_addr(curr_master, |
1804 | if (curr_master && (old_master != curr_master)) { | 1881 | ibdev, port); |
1805 | reset_gid_table(ibdev, port); | 1882 | } |
1806 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | 1883 | } |
1807 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); | 1884 | /* if bonding is used it is possible that we add it to |
1808 | } | 1885 | * masters only after IP address is assigned to the |
1886 | * net bonding interface. | ||
1887 | */ | ||
1888 | if (curr_master && (old_master != curr_master)) { | ||
1889 | reset_gid_table(ibdev, port); | ||
1890 | mlx4_ib_set_default_gid(ibdev, | ||
1891 | curr_netdev, port); | ||
1892 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); | ||
1893 | } | ||
1809 | 1894 | ||
1810 | if (!curr_master && (old_master != curr_master)) { | 1895 | if (!curr_master && (old_master != curr_master)) { |
1896 | reset_gid_table(ibdev, port); | ||
1897 | mlx4_ib_set_default_gid(ibdev, | ||
1898 | curr_netdev, port); | ||
1899 | mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||
1900 | } | ||
1901 | } else { | ||
1811 | reset_gid_table(ibdev, port); | 1902 | reset_gid_table(ibdev, port); |
1812 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1813 | mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||
1814 | } | 1903 | } |
1815 | } | 1904 | } |
1816 | 1905 | ||
1817 | spin_unlock(&iboe->lock); | 1906 | spin_unlock_bh(&iboe->lock); |
1818 | 1907 | ||
1819 | if (update_qps_port > 0) | 1908 | if (update_qps_port > 0) |
1820 | mlx4_ib_update_qps(ibdev, dev, update_qps_port); | 1909 | mlx4_ib_update_qps(ibdev, dev, update_qps_port); |
@@ -2156,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2156 | goto err_steer_free_bitmap; | 2245 | goto err_steer_free_bitmap; |
2157 | } | 2246 | } |
2158 | 2247 | ||
2248 | for (j = 1; j <= ibdev->dev->caps.num_ports; j++) | ||
2249 | atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); | ||
2250 | |||
2159 | if (ib_register_device(&ibdev->ib_dev, NULL)) | 2251 | if (ib_register_device(&ibdev->ib_dev, NULL)) |
2160 | goto err_steer_free_bitmap; | 2252 | goto err_steer_free_bitmap; |
2161 | 2253 | ||
@@ -2192,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2192 | } | 2284 | } |
2193 | } | 2285 | } |
2194 | #endif | 2286 | #endif |
2195 | for (i = 1 ; i <= ibdev->num_ports ; ++i) | 2287 | if (mlx4_ib_init_gid_table(ibdev)) |
2196 | reset_gid_table(ibdev, i); | 2288 | goto err_notif; |
2197 | rtnl_lock(); | ||
2198 | mlx4_ib_scan_netdevs(ibdev, NULL, 0); | ||
2199 | rtnl_unlock(); | ||
2200 | mlx4_ib_init_gid_table(ibdev); | ||
2201 | } | 2289 | } |
2202 | 2290 | ||
2203 | for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { | 2291 | for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { |
@@ -2345,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
2345 | struct mlx4_ib_dev *ibdev = ibdev_ptr; | 2433 | struct mlx4_ib_dev *ibdev = ibdev_ptr; |
2346 | int p; | 2434 | int p; |
2347 | 2435 | ||
2436 | ibdev->ib_active = false; | ||
2437 | flush_workqueue(wq); | ||
2438 | |||
2348 | mlx4_ib_close_sriov(ibdev); | 2439 | mlx4_ib_close_sriov(ibdev); |
2349 | mlx4_ib_mad_cleanup(ibdev); | 2440 | mlx4_ib_mad_cleanup(ibdev); |
2350 | ib_unregister_device(&ibdev->ib_dev); | 2441 | ib_unregister_device(&ibdev->ib_dev); |