aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4/main.c')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c171
1 files changed, 116 insertions, 55 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index af8256353c7d..bda5994ceb68 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -59,6 +59,7 @@
59 59
60#define MLX4_IB_FLOW_MAX_PRIO 0xFFF 60#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
61#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF 61#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
62#define MLX4_IB_CARD_REV_A0 0xA0
62 63
63MODULE_AUTHOR("Roland Dreier"); 64MODULE_AUTHOR("Roland Dreier");
64MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); 65MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev)
119 return dmfs; 120 return dmfs;
120} 121}
121 122
123static int num_ib_ports(struct mlx4_dev *dev)
124{
125 int ib_ports = 0;
126 int i;
127
128 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
129 ib_ports++;
130
131 return ib_ports;
132}
133
122static int mlx4_ib_query_device(struct ib_device *ibdev, 134static int mlx4_ib_query_device(struct ib_device *ibdev,
123 struct ib_device_attr *props) 135 struct ib_device_attr *props)
124{ 136{
@@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
126 struct ib_smp *in_mad = NULL; 138 struct ib_smp *in_mad = NULL;
127 struct ib_smp *out_mad = NULL; 139 struct ib_smp *out_mad = NULL;
128 int err = -ENOMEM; 140 int err = -ENOMEM;
141 int have_ib_ports;
129 142
130 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 143 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
131 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 144 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
142 155
143 memset(props, 0, sizeof *props); 156 memset(props, 0, sizeof *props);
144 157
158 have_ib_ports = num_ib_ports(dev->dev);
159
145 props->fw_ver = dev->dev->caps.fw_ver; 160 props->fw_ver = dev->dev->caps.fw_ver;
146 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 161 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
147 IB_DEVICE_PORT_ACTIVE_EVENT | 162 IB_DEVICE_PORT_ACTIVE_EVENT |
@@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
152 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 167 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
153 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) 168 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
154 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 169 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
155 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) 170 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
156 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 171 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
157 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) 172 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
158 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; 173 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
159 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 174 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
160 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 175 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
161 if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) 176 if (dev->dev->caps.max_gso_sz &&
177 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
178 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
162 props->device_cap_flags |= IB_DEVICE_UD_TSO; 179 props->device_cap_flags |= IB_DEVICE_UD_TSO;
163 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) 180 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
164 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 181 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
@@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
357 props->state = IB_PORT_DOWN; 374 props->state = IB_PORT_DOWN;
358 props->phys_state = state_to_phys_state(props->state); 375 props->phys_state = state_to_phys_state(props->state);
359 props->active_mtu = IB_MTU_256; 376 props->active_mtu = IB_MTU_256;
360 spin_lock(&iboe->lock); 377 spin_lock_bh(&iboe->lock);
361 ndev = iboe->netdevs[port - 1]; 378 ndev = iboe->netdevs[port - 1];
362 if (!ndev) 379 if (!ndev)
363 goto out_unlock; 380 goto out_unlock;
@@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
369 IB_PORT_ACTIVE : IB_PORT_DOWN; 386 IB_PORT_ACTIVE : IB_PORT_DOWN;
370 props->phys_state = state_to_phys_state(props->state); 387 props->phys_state = state_to_phys_state(props->state);
371out_unlock: 388out_unlock:
372 spin_unlock(&iboe->lock); 389 spin_unlock_bh(&iboe->lock);
373out: 390out:
374 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 391 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
375 return err; 392 return err;
@@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
811 if (!mqp->port) 828 if (!mqp->port)
812 return 0; 829 return 0;
813 830
814 spin_lock(&mdev->iboe.lock); 831 spin_lock_bh(&mdev->iboe.lock);
815 ndev = mdev->iboe.netdevs[mqp->port - 1]; 832 ndev = mdev->iboe.netdevs[mqp->port - 1];
816 if (ndev) 833 if (ndev)
817 dev_hold(ndev); 834 dev_hold(ndev);
818 spin_unlock(&mdev->iboe.lock); 835 spin_unlock_bh(&mdev->iboe.lock);
819 836
820 if (ndev) { 837 if (ndev) {
821 ret = 1; 838 ret = 1;
@@ -1292,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1292 mutex_lock(&mqp->mutex); 1309 mutex_lock(&mqp->mutex);
1293 ge = find_gid_entry(mqp, gid->raw); 1310 ge = find_gid_entry(mqp, gid->raw);
1294 if (ge) { 1311 if (ge) {
1295 spin_lock(&mdev->iboe.lock); 1312 spin_lock_bh(&mdev->iboe.lock);
1296 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; 1313 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1297 if (ndev) 1314 if (ndev)
1298 dev_hold(ndev); 1315 dev_hold(ndev);
1299 spin_unlock(&mdev->iboe.lock); 1316 spin_unlock_bh(&mdev->iboe.lock);
1300 if (ndev) 1317 if (ndev)
1301 dev_put(ndev); 1318 dev_put(ndev);
1302 list_del(&ge->list); 1319 list_del(&ge->list);
@@ -1417,6 +1434,9 @@ static void update_gids_task(struct work_struct *work)
1417 int err; 1434 int err;
1418 struct mlx4_dev *dev = gw->dev->dev; 1435 struct mlx4_dev *dev = gw->dev->dev;
1419 1436
1437 if (!gw->dev->ib_active)
1438 return;
1439
1420 mailbox = mlx4_alloc_cmd_mailbox(dev); 1440 mailbox = mlx4_alloc_cmd_mailbox(dev);
1421 if (IS_ERR(mailbox)) { 1441 if (IS_ERR(mailbox)) {
1422 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); 1442 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
@@ -1447,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work)
1447 int err; 1467 int err;
1448 struct mlx4_dev *dev = gw->dev->dev; 1468 struct mlx4_dev *dev = gw->dev->dev;
1449 1469
1470 if (!gw->dev->ib_active)
1471 return;
1472
1450 mailbox = mlx4_alloc_cmd_mailbox(dev); 1473 mailbox = mlx4_alloc_cmd_mailbox(dev);
1451 if (IS_ERR(mailbox)) { 1474 if (IS_ERR(mailbox)) {
1452 pr_warn("reset gid table failed\n"); 1475 pr_warn("reset gid table failed\n");
@@ -1581,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1581 return 0; 1604 return 0;
1582 1605
1583 iboe = &ibdev->iboe; 1606 iboe = &ibdev->iboe;
1584 spin_lock(&iboe->lock); 1607 spin_lock_bh(&iboe->lock);
1585 1608
1586 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) 1609 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1587 if ((netif_is_bond_master(real_dev) && 1610 if ((netif_is_bond_master(real_dev) &&
@@ -1591,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1591 update_gid_table(ibdev, port, gid, 1614 update_gid_table(ibdev, port, gid,
1592 event == NETDEV_DOWN, 0); 1615 event == NETDEV_DOWN, 0);
1593 1616
1594 spin_unlock(&iboe->lock); 1617 spin_unlock_bh(&iboe->lock);
1595 return 0; 1618 return 0;
1596 1619
1597} 1620}
@@ -1664,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1664 new_smac = mlx4_mac_to_u64(dev->dev_addr); 1687 new_smac = mlx4_mac_to_u64(dev->dev_addr);
1665 read_unlock(&dev_base_lock); 1688 read_unlock(&dev_base_lock);
1666 1689
1690 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
1691
1692 /* no need for update QP1 and mac registration in non-SRIOV */
1693 if (!mlx4_is_mfunc(ibdev->dev))
1694 return;
1695
1667 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); 1696 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1668 qp = ibdev->qp1_proxy[port - 1]; 1697 qp = ibdev->qp1_proxy[port - 1];
1669 if (qp) { 1698 if (qp) {
1670 int new_smac_index; 1699 int new_smac_index;
1671 u64 old_smac = qp->pri.smac; 1700 u64 old_smac;
1672 struct mlx4_update_qp_params update_params; 1701 struct mlx4_update_qp_params update_params;
1673 1702
1703 mutex_lock(&qp->mutex);
1704 old_smac = qp->pri.smac;
1674 if (new_smac == old_smac) 1705 if (new_smac == old_smac)
1675 goto unlock; 1706 goto unlock;
1676 1707
@@ -1680,22 +1711,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1680 goto unlock; 1711 goto unlock;
1681 1712
1682 update_params.smac_index = new_smac_index; 1713 update_params.smac_index = new_smac_index;
1683 if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC, 1714 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
1684 &update_params)) { 1715 &update_params)) {
1685 release_mac = new_smac; 1716 release_mac = new_smac;
1686 goto unlock; 1717 goto unlock;
1687 } 1718 }
1688 1719 /* if old port was zero, no mac was yet registered for this QP */
1720 if (qp->pri.smac_port)
1721 release_mac = old_smac;
1689 qp->pri.smac = new_smac; 1722 qp->pri.smac = new_smac;
1723 qp->pri.smac_port = port;
1690 qp->pri.smac_index = new_smac_index; 1724 qp->pri.smac_index = new_smac_index;
1691
1692 release_mac = old_smac;
1693 } 1725 }
1694 1726
1695unlock: 1727unlock:
1696 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1697 if (release_mac != MLX4_IB_INVALID_MAC) 1728 if (release_mac != MLX4_IB_INVALID_MAC)
1698 mlx4_unregister_mac(ibdev->dev, port, release_mac); 1729 mlx4_unregister_mac(ibdev->dev, port, release_mac);
1730 if (qp)
1731 mutex_unlock(&qp->mutex);
1732 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1699} 1733}
1700 1734
1701static void mlx4_ib_get_dev_addr(struct net_device *dev, 1735static void mlx4_ib_get_dev_addr(struct net_device *dev,
@@ -1706,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
1706 struct inet6_dev *in6_dev; 1740 struct inet6_dev *in6_dev;
1707 union ib_gid *pgid; 1741 union ib_gid *pgid;
1708 struct inet6_ifaddr *ifp; 1742 struct inet6_ifaddr *ifp;
1743 union ib_gid default_gid;
1709#endif 1744#endif
1710 union ib_gid gid; 1745 union ib_gid gid;
1711 1746
@@ -1726,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
1726 in_dev_put(in_dev); 1761 in_dev_put(in_dev);
1727 } 1762 }
1728#if IS_ENABLED(CONFIG_IPV6) 1763#if IS_ENABLED(CONFIG_IPV6)
1764 mlx4_make_default_gid(dev, &default_gid);
1729 /* IPv6 gids */ 1765 /* IPv6 gids */
1730 in6_dev = in6_dev_get(dev); 1766 in6_dev = in6_dev_get(dev);
1731 if (in6_dev) { 1767 if (in6_dev) {
1732 read_lock_bh(&in6_dev->lock); 1768 read_lock_bh(&in6_dev->lock);
1733 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { 1769 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
1734 pgid = (union ib_gid *)&ifp->addr; 1770 pgid = (union ib_gid *)&ifp->addr;
1771 if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
1772 continue;
1735 update_gid_table(ibdev, port, pgid, 0, 0); 1773 update_gid_table(ibdev, port, pgid, 0, 0);
1736 } 1774 }
1737 read_unlock_bh(&in6_dev->lock); 1775 read_unlock_bh(&in6_dev->lock);
@@ -1753,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1753 struct net_device *dev; 1791 struct net_device *dev;
1754 struct mlx4_ib_iboe *iboe = &ibdev->iboe; 1792 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
1755 int i; 1793 int i;
1794 int err = 0;
1756 1795
1757 for (i = 1; i <= ibdev->num_ports; ++i) 1796 for (i = 1; i <= ibdev->num_ports; ++i) {
1758 if (reset_gid_table(ibdev, i)) 1797 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
1759 return -1; 1798 IB_LINK_LAYER_ETHERNET) {
1799 err = reset_gid_table(ibdev, i);
1800 if (err)
1801 goto out;
1802 }
1803 }
1760 1804
1761 read_lock(&dev_base_lock); 1805 read_lock(&dev_base_lock);
1762 spin_lock(&iboe->lock); 1806 spin_lock_bh(&iboe->lock);
1763 1807
1764 for_each_netdev(&init_net, dev) { 1808 for_each_netdev(&init_net, dev) {
1765 u8 port = mlx4_ib_get_dev_port(dev, ibdev); 1809 u8 port = mlx4_ib_get_dev_port(dev, ibdev);
1766 if (port) 1810 /* port will be non-zero only for ETH ports */
1811 if (port) {
1812 mlx4_ib_set_default_gid(ibdev, dev, port);
1767 mlx4_ib_get_dev_addr(dev, ibdev, port); 1813 mlx4_ib_get_dev_addr(dev, ibdev, port);
1814 }
1768 } 1815 }
1769 1816
1770 spin_unlock(&iboe->lock); 1817 spin_unlock_bh(&iboe->lock);
1771 read_unlock(&dev_base_lock); 1818 read_unlock(&dev_base_lock);
1772 1819out:
1773 return 0; 1820 return err;
1774} 1821}
1775 1822
1776static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, 1823static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
@@ -1784,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1784 1831
1785 iboe = &ibdev->iboe; 1832 iboe = &ibdev->iboe;
1786 1833
1787 spin_lock(&iboe->lock); 1834 spin_lock_bh(&iboe->lock);
1788 mlx4_foreach_ib_transport_port(port, ibdev->dev) { 1835 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1789 enum ib_port_state port_state = IB_PORT_NOP; 1836 enum ib_port_state port_state = IB_PORT_NOP;
1790 struct net_device *old_master = iboe->masters[port - 1]; 1837 struct net_device *old_master = iboe->masters[port - 1];
@@ -1816,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1816 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? 1863 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
1817 IB_PORT_ACTIVE : IB_PORT_DOWN; 1864 IB_PORT_ACTIVE : IB_PORT_DOWN;
1818 mlx4_ib_set_default_gid(ibdev, curr_netdev, port); 1865 mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1819 } else { 1866 if (curr_master) {
1820 reset_gid_table(ibdev, port); 1867 /* if using bonding/team and a slave port is down, we
1821 } 1868 * don't want the bond IP based gids in the table since
1822 /* if using bonding/team and a slave port is down, we don't the bond IP 1869 * flows that select port by gid may get the down port.
1823 * based gids in the table since flows that select port by gid may get 1870 */
1824 * the down port. 1871 if (port_state == IB_PORT_DOWN) {
1825 */ 1872 reset_gid_table(ibdev, port);
1826 if (curr_master && (port_state == IB_PORT_DOWN)) { 1873 mlx4_ib_set_default_gid(ibdev,
1827 reset_gid_table(ibdev, port); 1874 curr_netdev,
1828 mlx4_ib_set_default_gid(ibdev, curr_netdev, port); 1875 port);
1829 } 1876 } else {
1830 /* if bonding is used it is possible that we add it to masters 1877 /* gids from the upper dev (bond/team)
1831 * only after IP address is assigned to the net bonding 1878 * should appear in port's gid table
1832 * interface. 1879 */
1833 */ 1880 mlx4_ib_get_dev_addr(curr_master,
1834 if (curr_master && (old_master != curr_master)) { 1881 ibdev, port);
1835 reset_gid_table(ibdev, port); 1882 }
1836 mlx4_ib_set_default_gid(ibdev, curr_netdev, port); 1883 }
1837 mlx4_ib_get_dev_addr(curr_master, ibdev, port); 1884 /* if bonding is used it is possible that we add it to
1838 } 1885 * masters only after IP address is assigned to the
1886 * net bonding interface.
1887 */
1888 if (curr_master && (old_master != curr_master)) {
1889 reset_gid_table(ibdev, port);
1890 mlx4_ib_set_default_gid(ibdev,
1891 curr_netdev, port);
1892 mlx4_ib_get_dev_addr(curr_master, ibdev, port);
1893 }
1839 1894
1840 if (!curr_master && (old_master != curr_master)) { 1895 if (!curr_master && (old_master != curr_master)) {
1896 reset_gid_table(ibdev, port);
1897 mlx4_ib_set_default_gid(ibdev,
1898 curr_netdev, port);
1899 mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
1900 }
1901 } else {
1841 reset_gid_table(ibdev, port); 1902 reset_gid_table(ibdev, port);
1842 mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1843 mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
1844 } 1903 }
1845 } 1904 }
1846 1905
1847 spin_unlock(&iboe->lock); 1906 spin_unlock_bh(&iboe->lock);
1848 1907
1849 if (update_qps_port > 0) 1908 if (update_qps_port > 0)
1850 mlx4_ib_update_qps(ibdev, dev, update_qps_port); 1909 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
@@ -2186,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2186 goto err_steer_free_bitmap; 2245 goto err_steer_free_bitmap;
2187 } 2246 }
2188 2247
2248 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2249 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2250
2189 if (ib_register_device(&ibdev->ib_dev, NULL)) 2251 if (ib_register_device(&ibdev->ib_dev, NULL))
2190 goto err_steer_free_bitmap; 2252 goto err_steer_free_bitmap;
2191 2253
@@ -2222,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2222 } 2284 }
2223 } 2285 }
2224#endif 2286#endif
2225 for (i = 1 ; i <= ibdev->num_ports ; ++i) 2287 if (mlx4_ib_init_gid_table(ibdev))
2226 reset_gid_table(ibdev, i); 2288 goto err_notif;
2227 rtnl_lock();
2228 mlx4_ib_scan_netdevs(ibdev, NULL, 0);
2229 rtnl_unlock();
2230 mlx4_ib_init_gid_table(ibdev);
2231 } 2289 }
2232 2290
2233 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { 2291 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -2375,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2375 struct mlx4_ib_dev *ibdev = ibdev_ptr; 2433 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2376 int p; 2434 int p;
2377 2435
2436 ibdev->ib_active = false;
2437 flush_workqueue(wq);
2438
2378 mlx4_ib_close_sriov(ibdev); 2439 mlx4_ib_close_sriov(ibdev);
2379 mlx4_ib_mad_cleanup(ibdev); 2440 mlx4_ib_mad_cleanup(ibdev);
2380 ib_unregister_device(&ibdev->ib_dev); 2441 ib_unregister_device(&ibdev->ib_dev);