aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c29
-rw-r--r--drivers/infiniband/hw/mlx4/main.c23
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c148
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c158
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c143
-rw-r--r--include/linux/mlx4/device.h35
12 files changed, 676 insertions, 114 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 2c572aed3f6f..fd36ec672632 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1245,21 +1245,9 @@ out:
1245 1245
1246static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port) 1246static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
1247{ 1247{
1248 int gids;
1249 int vfs;
1250
1251 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) 1248 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1252 return slave; 1249 return slave;
1253 1250 return mlx4_get_base_gid_ix(dev->dev, slave, port);
1254 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1255 vfs = dev->dev->num_vfs;
1256
1257 if (slave == 0)
1258 return 0;
1259 if (slave <= gids % vfs)
1260 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave - 1);
1261
1262 return MLX4_ROCE_PF_GIDS + (gids % vfs) + ((gids / vfs) * (slave - 1));
1263} 1251}
1264 1252
1265static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port, 1253static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
@@ -1281,6 +1269,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
1281 struct ib_ah_attr ah_attr; 1269 struct ib_ah_attr ah_attr;
1282 u8 *slave_id; 1270 u8 *slave_id;
1283 int slave; 1271 int slave;
1272 int port;
1284 1273
1285 /* Get slave that sent this packet */ 1274 /* Get slave that sent this packet */
1286 if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn || 1275 if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
@@ -1360,6 +1349,10 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
1360 if (ah_attr.ah_flags & IB_AH_GRH) 1349 if (ah_attr.ah_flags & IB_AH_GRH)
1361 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr); 1350 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
1362 1351
1352 port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num);
1353 if (port < 0)
1354 return;
1355 ah_attr.port_num = port;
1363 memcpy(ah_attr.dmac, tunnel->hdr.mac, 6); 1356 memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
1364 ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan); 1357 ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
1365 /* if slave have default vlan use it */ 1358 /* if slave have default vlan use it */
@@ -1949,7 +1942,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
1949 ctx->port = port; 1942 ctx->port = port;
1950 ctx->ib_dev = &dev->ib_dev; 1943 ctx->ib_dev = &dev->ib_dev;
1951 1944
1952 for (i = 0; i < dev->dev->caps.sqp_demux; i++) { 1945 for (i = 0;
1946 i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1));
1947 i++) {
1948 struct mlx4_active_ports actv_ports =
1949 mlx4_get_active_ports(dev->dev, i);
1950
1951 if (!test_bit(port - 1, actv_ports.ports))
1952 continue;
1953
1953 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]); 1954 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
1954 if (ret) { 1955 if (ret) {
1955 ret = -ENOMEM; 1956 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1d1750ef000a..6cb85467dde7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1546,7 +1546,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1546 iboe = &ibdev->iboe; 1546 iboe = &ibdev->iboe;
1547 spin_lock(&iboe->lock); 1547 spin_lock(&iboe->lock);
1548 1548
1549 for (port = 1; port <= MLX4_MAX_PORTS; ++port) 1549 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1550 if ((netif_is_bond_master(real_dev) && 1550 if ((netif_is_bond_master(real_dev) &&
1551 (real_dev == iboe->masters[port - 1])) || 1551 (real_dev == iboe->masters[port - 1])) ||
1552 (!netif_is_bond_master(real_dev) && 1552 (!netif_is_bond_master(real_dev) &&
@@ -1569,14 +1569,14 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
1569 1569
1570 iboe = &ibdev->iboe; 1570 iboe = &ibdev->iboe;
1571 1571
1572 for (port = 1; port <= MLX4_MAX_PORTS; ++port) 1572 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1573 if ((netif_is_bond_master(real_dev) && 1573 if ((netif_is_bond_master(real_dev) &&
1574 (real_dev == iboe->masters[port - 1])) || 1574 (real_dev == iboe->masters[port - 1])) ||
1575 (!netif_is_bond_master(real_dev) && 1575 (!netif_is_bond_master(real_dev) &&
1576 (real_dev == iboe->netdevs[port - 1]))) 1576 (real_dev == iboe->netdevs[port - 1])))
1577 break; 1577 break;
1578 1578
1579 if ((port == 0) || (port > MLX4_MAX_PORTS)) 1579 if ((port == 0) || (port > ibdev->dev->caps.num_ports))
1580 return 0; 1580 return 0;
1581 else 1581 else
1582 return port; 1582 return port;
@@ -1626,7 +1626,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
1626 union ib_gid gid; 1626 union ib_gid gid;
1627 1627
1628 1628
1629 if ((port == 0) || (port > MLX4_MAX_PORTS)) 1629 if ((port == 0) || (port > ibdev->dev->caps.num_ports))
1630 return; 1630 return;
1631 1631
1632 /* IPv4 gids */ 1632 /* IPv4 gids */
@@ -2323,17 +2323,24 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2323 struct mlx4_dev *dev = ibdev->dev; 2323 struct mlx4_dev *dev = ibdev->dev;
2324 int i; 2324 int i;
2325 unsigned long flags; 2325 unsigned long flags;
2326 struct mlx4_active_ports actv_ports;
2327 unsigned int ports;
2328 unsigned int first_port;
2326 2329
2327 if (!mlx4_is_master(dev)) 2330 if (!mlx4_is_master(dev))
2328 return; 2331 return;
2329 2332
2330 dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC); 2333 actv_ports = mlx4_get_active_ports(dev, slave);
2334 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2335 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2336
2337 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
2331 if (!dm) { 2338 if (!dm) {
2332 pr_err("failed to allocate memory for tunneling qp update\n"); 2339 pr_err("failed to allocate memory for tunneling qp update\n");
2333 goto out; 2340 goto out;
2334 } 2341 }
2335 2342
2336 for (i = 0; i < dev->caps.num_ports; i++) { 2343 for (i = 0; i < ports; i++) {
2337 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); 2344 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2338 if (!dm[i]) { 2345 if (!dm[i]) {
2339 pr_err("failed to allocate memory for tunneling qp update work struct\n"); 2346 pr_err("failed to allocate memory for tunneling qp update work struct\n");
@@ -2345,9 +2352,9 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2345 } 2352 }
2346 } 2353 }
2347 /* initialize or tear down tunnel QPs for the slave */ 2354 /* initialize or tear down tunnel QPs for the slave */
2348 for (i = 0; i < dev->caps.num_ports; i++) { 2355 for (i = 0; i < ports; i++) {
2349 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); 2356 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2350 dm[i]->port = i + 1; 2357 dm[i]->port = first_port + i + 1;
2351 dm[i]->slave = slave; 2358 dm[i]->slave = slave;
2352 dm[i]->do_init = do_init; 2359 dm[i]->do_init = do_init;
2353 dm[i]->dev = ibdev; 2360 dm[i]->dev = ibdev;
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index db2ea31df832..5a38e43eca65 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -627,6 +627,7 @@ static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
627 int port; 627 int port;
628 struct kobject *p, *t; 628 struct kobject *p, *t;
629 struct mlx4_port *mport; 629 struct mlx4_port *mport;
630 struct mlx4_active_ports actv_ports;
630 631
631 get_name(dev, name, slave, sizeof name); 632 get_name(dev, name, slave, sizeof name);
632 633
@@ -649,7 +650,11 @@ static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
649 goto err_ports; 650 goto err_ports;
650 } 651 }
651 652
653 actv_ports = mlx4_get_active_ports(dev->dev, slave);
654
652 for (port = 1; port <= dev->dev->caps.num_ports; ++port) { 655 for (port = 1; port <= dev->dev->caps.num_ports; ++port) {
656 if (!test_bit(port - 1, actv_ports.ports))
657 continue;
653 err = add_port(dev, port, slave); 658 err = add_port(dev, port, slave);
654 if (err) 659 if (err)
655 goto err_add; 660 goto err_add;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 2b0b45ece14b..516c1dd4963b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1643,8 +1643,16 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1643 int port, err; 1643 int port, err;
1644 struct mlx4_vport_state *vp_admin; 1644 struct mlx4_vport_state *vp_admin;
1645 struct mlx4_vport_oper_state *vp_oper; 1645 struct mlx4_vport_oper_state *vp_oper;
1646 1646 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1647 for (port = 1; port <= MLX4_MAX_PORTS; port++) { 1647 &priv->dev, slave);
1648 int min_port = find_first_bit(actv_ports.ports,
1649 priv->dev.caps.num_ports) + 1;
1650 int max_port = min_port - 1 +
1651 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1652
1653 for (port = min_port; port <= max_port; port++) {
1654 if (!test_bit(port - 1, actv_ports.ports))
1655 continue;
1648 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1656 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1649 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 1657 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1650 vp_oper->state = *vp_admin; 1658 vp_oper->state = *vp_admin;
@@ -1685,8 +1693,17 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
1685{ 1693{
1686 int port; 1694 int port;
1687 struct mlx4_vport_oper_state *vp_oper; 1695 struct mlx4_vport_oper_state *vp_oper;
1696 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1697 &priv->dev, slave);
1698 int min_port = find_first_bit(actv_ports.ports,
1699 priv->dev.caps.num_ports) + 1;
1700 int max_port = min_port - 1 +
1701 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1702
1688 1703
1689 for (port = 1; port <= MLX4_MAX_PORTS; port++) { 1704 for (port = min_port; port <= max_port; port++) {
1705 if (!test_bit(port - 1, actv_ports.ports))
1706 continue;
1690 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1707 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1691 if (NO_INDX != vp_oper->vlan_idx) { 1708 if (NO_INDX != vp_oper->vlan_idx) {
1692 __mlx4_unregister_vlan(&priv->dev, 1709 __mlx4_unregister_vlan(&priv->dev,
@@ -2234,6 +2251,112 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2234 return vf+1; 2251 return vf+1;
2235} 2252}
2236 2253
2254int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2255{
2256 if (slave < 1 || slave > dev->num_vfs) {
2257 mlx4_err(dev,
2258 "Bad slave number:%d (number of activated slaves: %lu)\n",
2259 slave, dev->num_slaves);
2260 return -EINVAL;
2261 }
2262 return slave - 1;
2263}
2264
2265struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2266{
2267 struct mlx4_active_ports actv_ports;
2268 int vf;
2269
2270 bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2271
2272 if (slave == 0) {
2273 bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2274 return actv_ports;
2275 }
2276
2277 vf = mlx4_get_vf_indx(dev, slave);
2278 if (vf < 0)
2279 return actv_ports;
2280
2281 bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2282 min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2283 dev->caps.num_ports));
2284
2285 return actv_ports;
2286}
2287EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2288
2289int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2290{
2291 unsigned n;
2292 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2293 unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2294
2295 if (port <= 0 || port > m)
2296 return -EINVAL;
2297
2298 n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2299 if (port <= n)
2300 port = n + 1;
2301
2302 return port;
2303}
2304EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2305
2306int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2307{
2308 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2309 if (test_bit(port - 1, actv_ports.ports))
2310 return port -
2311 find_first_bit(actv_ports.ports, dev->caps.num_ports);
2312
2313 return -1;
2314}
2315EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2316
2317struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2318 int port)
2319{
2320 unsigned i;
2321 struct mlx4_slaves_pport slaves_pport;
2322
2323 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2324
2325 if (port <= 0 || port > dev->caps.num_ports)
2326 return slaves_pport;
2327
2328 for (i = 0; i < dev->num_vfs + 1; i++) {
2329 struct mlx4_active_ports actv_ports =
2330 mlx4_get_active_ports(dev, i);
2331 if (test_bit(port - 1, actv_ports.ports))
2332 set_bit(i, slaves_pport.slaves);
2333 }
2334
2335 return slaves_pport;
2336}
2337EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2338
2339struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2340 struct mlx4_dev *dev,
2341 const struct mlx4_active_ports *crit_ports)
2342{
2343 unsigned i;
2344 struct mlx4_slaves_pport slaves_pport;
2345
2346 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2347
2348 for (i = 0; i < dev->num_vfs + 1; i++) {
2349 struct mlx4_active_ports actv_ports =
2350 mlx4_get_active_ports(dev, i);
2351 if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2352 dev->caps.num_ports))
2353 set_bit(i, slaves_pport.slaves);
2354 }
2355
2356 return slaves_pport;
2357}
2358EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2359
2237int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) 2360int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2238{ 2361{
2239 struct mlx4_priv *priv = mlx4_priv(dev); 2362 struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8992b38578d5..d501a2b0fb79 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -271,7 +271,10 @@ enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave,
271{ 271{
272 struct mlx4_priv *priv = mlx4_priv(dev); 272 struct mlx4_priv *priv = mlx4_priv(dev);
273 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 273 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
274 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) { 274 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
275
276 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
277 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
275 pr_err("%s: Error: asking for slave:%d, port:%d\n", 278 pr_err("%s: Error: asking for slave:%d, port:%d\n",
276 __func__, slave, port); 279 __func__, slave, port);
277 return SLAVE_PORT_DOWN; 280 return SLAVE_PORT_DOWN;
@@ -285,8 +288,10 @@ static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
285{ 288{
286 struct mlx4_priv *priv = mlx4_priv(dev); 289 struct mlx4_priv *priv = mlx4_priv(dev);
287 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 290 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
291 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
288 292
289 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { 293 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
294 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
290 pr_err("%s: Error: asking for slave:%d, port:%d\n", 295 pr_err("%s: Error: asking for slave:%d, port:%d\n",
291 __func__, slave, port); 296 __func__, slave, port);
292 return -1; 297 return -1;
@@ -300,9 +305,13 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
300{ 305{
301 int i; 306 int i;
302 enum slave_port_gen_event gen_event; 307 enum slave_port_gen_event gen_event;
308 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
309 port);
303 310
304 for (i = 0; i < dev->num_slaves; i++) 311 for (i = 0; i < dev->num_vfs + 1; i++)
305 set_and_calc_slave_port_state(dev, i, port, event, &gen_event); 312 if (test_bit(i, slaves_pport.slaves))
313 set_and_calc_slave_port_state(dev, i, port,
314 event, &gen_event);
306} 315}
307/************************************************************************** 316/**************************************************************************
308 The function get as input the new event to that port, 317 The function get as input the new event to that port,
@@ -321,12 +330,14 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
321 struct mlx4_slave_state *ctx = NULL; 330 struct mlx4_slave_state *ctx = NULL;
322 unsigned long flags; 331 unsigned long flags;
323 int ret = -1; 332 int ret = -1;
333 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
324 enum slave_port_state cur_state = 334 enum slave_port_state cur_state =
325 mlx4_get_slave_port_state(dev, slave, port); 335 mlx4_get_slave_port_state(dev, slave, port);
326 336
327 *gen_event = SLAVE_PORT_GEN_EVENT_NONE; 337 *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
328 338
329 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { 339 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
340 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
330 pr_err("%s: Error: asking for slave:%d, port:%d\n", 341 pr_err("%s: Error: asking for slave:%d, port:%d\n",
331 __func__, slave, port); 342 __func__, slave, port);
332 return ret; 343 return ret;
@@ -542,15 +553,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
542 be64_to_cpu(eqe->event.cmd.out_param)); 553 be64_to_cpu(eqe->event.cmd.out_param));
543 break; 554 break;
544 555
545 case MLX4_EVENT_TYPE_PORT_CHANGE: 556 case MLX4_EVENT_TYPE_PORT_CHANGE: {
557 struct mlx4_slaves_pport slaves_port;
546 port = be32_to_cpu(eqe->event.port_change.port) >> 28; 558 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
559 slaves_port = mlx4_phys_to_slaves_pport(dev, port);
547 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { 560 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
548 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, 561 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
549 port); 562 port);
550 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 563 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
551 if (!mlx4_is_master(dev)) 564 if (!mlx4_is_master(dev))
552 break; 565 break;
553 for (i = 0; i < dev->num_slaves; i++) { 566 for (i = 0; i < dev->num_vfs + 1; i++) {
567 if (!test_bit(i, slaves_port.slaves))
568 continue;
554 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 569 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
555 if (i == mlx4_master_func_num(dev)) 570 if (i == mlx4_master_func_num(dev))
556 continue; 571 continue;
@@ -558,8 +573,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
558 " to slave: %d, port:%d\n", 573 " to slave: %d, port:%d\n",
559 __func__, i, port); 574 __func__, i, port);
560 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 575 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
561 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) 576 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
577 eqe->event.port_change.port =
578 cpu_to_be32(
579 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
580 | (mlx4_phys_to_slave_port(dev, i, port) << 28));
562 mlx4_slave_event(dev, i, eqe); 581 mlx4_slave_event(dev, i, eqe);
582 }
563 } else { /* IB port */ 583 } else { /* IB port */
564 set_and_calc_slave_port_state(dev, i, port, 584 set_and_calc_slave_port_state(dev, i, port,
565 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 585 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
@@ -580,12 +600,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
580 if (!mlx4_is_master(dev)) 600 if (!mlx4_is_master(dev))
581 break; 601 break;
582 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 602 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
583 for (i = 0; i < dev->num_slaves; i++) { 603 for (i = 0; i < dev->num_vfs + 1; i++) {
604 if (!test_bit(i, slaves_port.slaves))
605 continue;
584 if (i == mlx4_master_func_num(dev)) 606 if (i == mlx4_master_func_num(dev))
585 continue; 607 continue;
586 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 608 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
587 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) 609 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
610 eqe->event.port_change.port =
611 cpu_to_be32(
612 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
613 | (mlx4_phys_to_slave_port(dev, i, port) << 28));
588 mlx4_slave_event(dev, i, eqe); 614 mlx4_slave_event(dev, i, eqe);
615 }
589 } 616 }
590 else /* IB port */ 617 else /* IB port */
591 /* port-up event will be sent to a slave when the 618 /* port-up event will be sent to a slave when the
@@ -594,6 +621,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
594 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); 621 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
595 } 622 }
596 break; 623 break;
624 }
597 625
598 case MLX4_EVENT_TYPE_CQ_ERROR: 626 case MLX4_EVENT_TYPE_CQ_ERROR:
599 mlx4_warn(dev, "CQ %s on CQN %06x\n", 627 mlx4_warn(dev, "CQ %s on CQN %06x\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index d0d8dd832557..6bd33e2fc17c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -225,13 +225,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
225#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 225#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
226 226
227 if (vhcr->op_modifier == 1) { 227 if (vhcr->op_modifier == 1) {
228 struct mlx4_active_ports actv_ports =
229 mlx4_get_active_ports(dev, slave);
230 int converted_port = mlx4_slave_convert_port(
231 dev, slave, vhcr->in_modifier);
232
233 if (converted_port < 0)
234 return -EINVAL;
235
236 vhcr->in_modifier = converted_port;
228 /* Set nic_info bit to mark new fields support */ 237 /* Set nic_info bit to mark new fields support */
229 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; 238 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
230 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); 239 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
231 240
232 field = vhcr->in_modifier; /* phys-port = logical-port */ 241 /* phys-port = logical-port */
242 field = vhcr->in_modifier -
243 find_first_bit(actv_ports.ports, dev->caps.num_ports);
233 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 244 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
234 245
246 field = vhcr->in_modifier;
235 /* size is now the QP number */ 247 /* size is now the QP number */
236 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1; 248 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
237 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); 249 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
@@ -249,12 +261,16 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
249 QUERY_FUNC_CAP_PHYS_PORT_ID); 261 QUERY_FUNC_CAP_PHYS_PORT_ID);
250 262
251 } else if (vhcr->op_modifier == 0) { 263 } else if (vhcr->op_modifier == 0) {
264 struct mlx4_active_ports actv_ports =
265 mlx4_get_active_ports(dev, slave);
252 /* enable rdma and ethernet interfaces, and new quota locations */ 266 /* enable rdma and ethernet interfaces, and new quota locations */
253 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 267 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
254 QUERY_FUNC_CAP_FLAG_QUOTAS); 268 QUERY_FUNC_CAP_FLAG_QUOTAS);
255 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 269 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
256 270
257 field = dev->caps.num_ports; 271 field = min(
272 bitmap_weight(actv_ports.ports, dev->caps.num_ports),
273 dev->caps.num_ports);
258 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 274 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
259 275
260 size = dev->caps.function_caps; /* set PF behaviours */ 276 size = dev->caps.function_caps; /* set PF behaviours */
@@ -840,6 +856,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
840 int err = 0; 856 int err = 0;
841 u8 field; 857 u8 field;
842 u32 bmme_flags; 858 u32 bmme_flags;
859 int real_port;
860 int slave_port;
861 int first_port;
862 struct mlx4_active_ports actv_ports;
843 863
844 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 864 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
845 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 865 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -852,8 +872,26 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
852 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 872 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
853 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; 873 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
854 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; 874 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
875 actv_ports = mlx4_get_active_ports(dev, slave);
876 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
877 for (slave_port = 0, real_port = first_port;
878 real_port < first_port +
879 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
880 ++real_port, ++slave_port) {
881 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
882 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
883 else
884 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
885 }
886 for (; slave_port < dev->caps.num_ports; ++slave_port)
887 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
855 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 888 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
856 889
890 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
891 field &= ~0x0F;
892 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
893 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
894
857 /* For guests, disable timestamp */ 895 /* For guests, disable timestamp */
858 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 896 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
859 field &= 0x7f; 897 field &= 0x7f;
@@ -903,12 +941,20 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
903 u16 short_field; 941 u16 short_field;
904 int err; 942 int err;
905 int admin_link_state; 943 int admin_link_state;
944 int port = mlx4_slave_convert_port(dev, slave,
945 vhcr->in_modifier & 0xFF);
906 946
907#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 947#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
908#define MLX4_PORT_LINK_UP_MASK 0x80 948#define MLX4_PORT_LINK_UP_MASK 0x80
909#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c 949#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
910#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e 950#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
911 951
952 if (port < 0)
953 return -EINVAL;
954
955 vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
956 (port & 0xFF);
957
912 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, 958 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
913 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 959 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
914 MLX4_CMD_NATIVE); 960 MLX4_CMD_NATIVE);
@@ -936,7 +982,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
936 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 982 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
937 983
938 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) 984 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
939 short_field = mlx4_get_slave_num_gids(dev, slave); 985 short_field = mlx4_get_slave_num_gids(dev, slave, port);
940 else 986 else
941 short_field = 1; /* slave max gids */ 987 short_field = 1; /* slave max gids */
942 MLX4_PUT(outbox->buf, short_field, 988 MLX4_PUT(outbox->buf, short_field,
@@ -1588,9 +1634,12 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1588 struct mlx4_cmd_info *cmd) 1634 struct mlx4_cmd_info *cmd)
1589{ 1635{
1590 struct mlx4_priv *priv = mlx4_priv(dev); 1636 struct mlx4_priv *priv = mlx4_priv(dev);
1591 int port = vhcr->in_modifier; 1637 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1592 int err; 1638 int err;
1593 1639
1640 if (port < 0)
1641 return -EINVAL;
1642
1594 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) 1643 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1595 return 0; 1644 return 0;
1596 1645
@@ -1680,9 +1729,12 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1680 struct mlx4_cmd_info *cmd) 1729 struct mlx4_cmd_info *cmd)
1681{ 1730{
1682 struct mlx4_priv *priv = mlx4_priv(dev); 1731 struct mlx4_priv *priv = mlx4_priv(dev);
1683 int port = vhcr->in_modifier; 1732 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1684 int err; 1733 int err;
1685 1734
1735 if (port < 0)
1736 return -EINVAL;
1737
1686 if (!(priv->mfunc.master.slave_state[slave].init_port_mask & 1738 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
1687 (1 << port))) 1739 (1 << port)))
1688 return 0; 1740 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index abd0b1d277aa..61d7bcff4533 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -77,13 +77,17 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
77 77
78#endif /* CONFIG_PCI_MSI */ 78#endif /* CONFIG_PCI_MSI */
79 79
80static int num_vfs; 80static uint8_t num_vfs[3] = {0, 0, 0};
81module_param(num_vfs, int, 0444); 81static int num_vfs_argc = 3;
82MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); 82module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
83 83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
84static int probe_vf; 84 "num_vfs=port1,port2,port1+2");
85module_param(probe_vf, int, 0644); 85
86MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); 86static uint8_t probe_vf[3] = {0, 0, 0};
87static int probe_vfs_argc = 3;
88module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
89MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2");
87 91
88int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 92int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
89module_param_named(log_num_mgm_entry_size, 93module_param_named(log_num_mgm_entry_size,
@@ -1471,7 +1475,7 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1471 for (i = 1; i <= dev->caps.num_ports; i++) { 1475 for (i = 1; i <= dev->caps.num_ports; i++) {
1472 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 1476 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
1473 dev->caps.gid_table_len[i] = 1477 dev->caps.gid_table_len[i] =
1474 mlx4_get_slave_num_gids(dev, 0); 1478 mlx4_get_slave_num_gids(dev, 0, i);
1475 else 1479 else
1476 dev->caps.gid_table_len[i] = 1; 1480 dev->caps.gid_table_len[i] = 1;
1477 dev->caps.pkey_table_len[i] = 1481 dev->caps.pkey_table_len[i] =
@@ -1498,7 +1502,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1498 if (mlx4_log_num_mgm_entry_size == -1 && 1502 if (mlx4_log_num_mgm_entry_size == -1 &&
1499 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1503 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1500 (!mlx4_is_mfunc(dev) || 1504 (!mlx4_is_mfunc(dev) ||
1501 (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) && 1505 (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
1502 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1506 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1503 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1507 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1504 dev->oper_log_mgm_entry_size = 1508 dev->oper_log_mgm_entry_size =
@@ -2193,6 +2197,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2193 struct mlx4_dev *dev; 2197 struct mlx4_dev *dev;
2194 int err; 2198 int err;
2195 int port; 2199 int port;
2200 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
2201 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
2202 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
2203 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
2204 unsigned total_vfs = 0;
2205 int sriov_initialized = 0;
2206 unsigned int i;
2196 2207
2197 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 2208 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
2198 2209
@@ -2207,17 +2218,40 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2207 * per port, we must limit the number of VFs to 63 (since their are 2218 * per port, we must limit the number of VFs to 63 (since their are
2208 * 128 MACs) 2219 * 128 MACs)
2209 */ 2220 */
2210 if (num_vfs >= MLX4_MAX_NUM_VF) { 2221 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
2222 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
2223 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
2224 if (nvfs[i] < 0) {
2225 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
2226 return -EINVAL;
2227 }
2228 }
2229 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
2230 i++) {
2231 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
2232 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
2233 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
2234 return -EINVAL;
2235 }
2236 }
2237 if (total_vfs >= MLX4_MAX_NUM_VF) {
2211 dev_err(&pdev->dev, 2238 dev_err(&pdev->dev,
2212 "Requested more VF's (%d) than allowed (%d)\n", 2239 "Requested more VF's (%d) than allowed (%d)\n",
2213 num_vfs, MLX4_MAX_NUM_VF - 1); 2240 total_vfs, MLX4_MAX_NUM_VF - 1);
2214 return -EINVAL; 2241 return -EINVAL;
2215 } 2242 }
2216 2243
2217 if (num_vfs < 0) { 2244 for (i = 0; i < MLX4_MAX_PORTS; i++) {
2218 pr_err("num_vfs module parameter cannot be negative\n"); 2245 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
2219 return -EINVAL; 2246 dev_err(&pdev->dev,
2247 "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
2248 nvfs[i] + nvfs[2], i + 1,
2249 MLX4_MAX_NUM_VF_P_PORT - 1);
2250 return -EINVAL;
2251 }
2220 } 2252 }
2253
2254
2221 /* 2255 /*
2222 * Check for BARs. 2256 * Check for BARs.
2223 */ 2257 */
@@ -2292,11 +2326,23 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2292 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2326 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2293 /* When acting as pf, we normally skip vfs unless explicitly 2327 /* When acting as pf, we normally skip vfs unless explicitly
2294 * requested to probe them. */ 2328 * requested to probe them. */
2295 if (num_vfs && extended_func_num(pdev) > probe_vf) { 2329 if (total_vfs) {
2296 mlx4_warn(dev, "Skipping virtual function:%d\n", 2330 unsigned vfs_offset = 0;
2297 extended_func_num(pdev)); 2331 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
2298 err = -ENODEV; 2332 vfs_offset + nvfs[i] < extended_func_num(pdev);
2299 goto err_free_dev; 2333 vfs_offset += nvfs[i], i++)
2334 ;
2335 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
2336 err = -ENODEV;
2337 goto err_free_dev;
2338 }
2339 if ((extended_func_num(pdev) - vfs_offset)
2340 > prb_vf[i]) {
2341 mlx4_warn(dev, "Skipping virtual function:%d\n",
2342 extended_func_num(pdev));
2343 err = -ENODEV;
2344 goto err_free_dev;
2345 }
2300 } 2346 }
2301 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 2347 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
2302 dev->flags |= MLX4_FLAG_SLAVE; 2348 dev->flags |= MLX4_FLAG_SLAVE;
@@ -2316,22 +2362,30 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2316 } 2362 }
2317 } 2363 }
2318 2364
2319 if (num_vfs) { 2365 if (total_vfs) {
2320 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); 2366 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
2321 2367 total_vfs);
2322 atomic_inc(&pf_loading); 2368 dev->dev_vfs = kzalloc(
2323 err = pci_enable_sriov(pdev, num_vfs); 2369 total_vfs * sizeof(*dev->dev_vfs),
2324 atomic_dec(&pf_loading); 2370 GFP_KERNEL);
2325 2371 if (NULL == dev->dev_vfs) {
2326 if (err) { 2372 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2327 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2328 err);
2329 err = 0; 2373 err = 0;
2330 } else { 2374 } else {
2331 mlx4_warn(dev, "Running in master mode\n"); 2375 atomic_inc(&pf_loading);
2332 dev->flags |= MLX4_FLAG_SRIOV | 2376 err = pci_enable_sriov(pdev, total_vfs);
2333 MLX4_FLAG_MASTER; 2377 atomic_dec(&pf_loading);
2334 dev->num_vfs = num_vfs; 2378 if (err) {
2379 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2380 err);
2381 err = 0;
2382 } else {
2383 mlx4_warn(dev, "Running in master mode\n");
2384 dev->flags |= MLX4_FLAG_SRIOV |
2385 MLX4_FLAG_MASTER;
2386 dev->num_vfs = total_vfs;
2387 sriov_initialized = 1;
2388 }
2335 } 2389 }
2336 } 2390 }
2337 2391
@@ -2396,12 +2450,37 @@ slave_start:
2396 /* In master functions, the communication channel must be initialized 2450 /* In master functions, the communication channel must be initialized
2397 * after obtaining its address from fw */ 2451 * after obtaining its address from fw */
2398 if (mlx4_is_master(dev)) { 2452 if (mlx4_is_master(dev)) {
2453 unsigned sum = 0;
2399 err = mlx4_multi_func_init(dev); 2454 err = mlx4_multi_func_init(dev);
2400 if (err) { 2455 if (err) {
2401 mlx4_err(dev, "Failed to init master mfunc" 2456 mlx4_err(dev, "Failed to init master mfunc"
2402 "interface, aborting.\n"); 2457 "interface, aborting.\n");
2403 goto err_close; 2458 goto err_close;
2404 } 2459 }
2460 if (sriov_initialized) {
2461 int ib_ports = 0;
2462 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2463 ib_ports++;
2464
2465 if (ib_ports &&
2466 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
2467 mlx4_err(dev,
2468 "Invalid syntax of num_vfs/probe_vfs "
2469 "with IB port. Single port VFs syntax"
2470 " is only supported when all ports "
2471 "are configured as ethernet\n");
2472 goto err_close;
2473 }
2474 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
2475 unsigned j;
2476 for (j = 0; j < nvfs[i]; ++sum, ++j) {
2477 dev->dev_vfs[sum].min_port =
2478 i < 2 ? i + 1 : 1;
2479 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
2480 dev->caps.num_ports;
2481 }
2482 }
2483 }
2405 } 2484 }
2406 2485
2407 err = mlx4_alloc_eq_table(dev); 2486 err = mlx4_alloc_eq_table(dev);
@@ -2509,6 +2588,8 @@ err_rel_own:
2509 if (!mlx4_is_slave(dev)) 2588 if (!mlx4_is_slave(dev))
2510 mlx4_free_ownership(dev); 2589 mlx4_free_ownership(dev);
2511 2590
2591 kfree(priv->dev.dev_vfs);
2592
2512err_free_dev: 2593err_free_dev:
2513 kfree(priv); 2594 kfree(priv);
2514 2595
@@ -2595,6 +2676,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
2595 kfree(dev->caps.qp0_proxy); 2676 kfree(dev->caps.qp0_proxy);
2596 kfree(dev->caps.qp1_tunnel); 2677 kfree(dev->caps.qp1_tunnel);
2597 kfree(dev->caps.qp1_proxy); 2678 kfree(dev->caps.qp1_proxy);
2679 kfree(dev->dev_vfs);
2598 2680
2599 kfree(priv); 2681 kfree(priv);
2600 pci_release_regions(pdev); 2682 pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index db7dc0b6667d..80ccb4edf825 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1387,9 +1387,12 @@ int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1387 struct mlx4_cmd_info *cmd) 1387 struct mlx4_cmd_info *cmd)
1388{ 1388{
1389 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1389 u32 qpn = (u32) vhcr->in_param & 0xffffffff;
1390 u8 port = vhcr->in_param >> 62; 1390 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62);
1391 enum mlx4_steer_type steer = vhcr->in_modifier; 1391 enum mlx4_steer_type steer = vhcr->in_modifier;
1392 1392
1393 if (port < 0)
1394 return -EINVAL;
1395
1393 /* Promiscuous unicast is not allowed in mfunc */ 1396 /* Promiscuous unicast is not allowed in mfunc */
1394 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) 1397 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
1395 return 0; 1398 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 6ba38c98c492..9fca6c150de3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1287,7 +1287,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
1287 1287
1288void mlx4_init_quotas(struct mlx4_dev *dev); 1288void mlx4_init_quotas(struct mlx4_dev *dev);
1289 1289
1290int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave); 1290int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
1291int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave); 1291/* Returns the VF index of slave */
1292int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
1292 1293
1293#endif /* MLX4_H */ 1294#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index ece328166e94..2705b9ab9463 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -507,30 +507,82 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
507} 507}
508static struct mlx4_roce_gid_entry zgid_entry; 508static struct mlx4_roce_gid_entry zgid_entry;
509 509
510int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave) 510int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
511{ 511{
512 int vfs;
513 int slave_gid = slave;
514 unsigned i;
515 struct mlx4_slaves_pport slaves_pport;
516 struct mlx4_active_ports actv_ports;
517 unsigned max_port_p_one;
518
512 if (slave == 0) 519 if (slave == 0)
513 return MLX4_ROCE_PF_GIDS; 520 return MLX4_ROCE_PF_GIDS;
514 if (slave <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % dev->num_vfs)) 521
515 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / dev->num_vfs) + 1; 522 /* Slave is a VF */
516 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / dev->num_vfs; 523 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
524 actv_ports = mlx4_get_active_ports(dev, slave);
525 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
526 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
527
528 for (i = 1; i < max_port_p_one; i++) {
529 struct mlx4_active_ports exclusive_ports;
530 struct mlx4_slaves_pport slaves_pport_actv;
531 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
532 set_bit(i - 1, exclusive_ports.ports);
533 if (i == port)
534 continue;
535 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
536 dev, &exclusive_ports);
537 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
538 dev->num_vfs + 1);
539 }
540 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
541 if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
542 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
543 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
517} 544}
518 545
519int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave) 546int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
520{ 547{
521 int gids; 548 int gids;
549 unsigned i;
550 int slave_gid = slave;
522 int vfs; 551 int vfs;
523 552
524 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; 553 struct mlx4_slaves_pport slaves_pport;
525 vfs = dev->num_vfs; 554 struct mlx4_active_ports actv_ports;
555 unsigned max_port_p_one;
526 556
527 if (slave == 0) 557 if (slave == 0)
528 return 0; 558 return 0;
529 if (slave <= gids % vfs)
530 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave - 1);
531 559
532 return MLX4_ROCE_PF_GIDS + (gids % vfs) + ((gids / vfs) * (slave - 1)); 560 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
561 actv_ports = mlx4_get_active_ports(dev, slave);
562 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
563 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
564
565 for (i = 1; i < max_port_p_one; i++) {
566 struct mlx4_active_ports exclusive_ports;
567 struct mlx4_slaves_pport slaves_pport_actv;
568 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
569 set_bit(i - 1, exclusive_ports.ports);
570 if (i == port)
571 continue;
572 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
573 dev, &exclusive_ports);
574 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
575 dev->num_vfs + 1);
576 }
577 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
578 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
579 if (slave_gid <= gids % vfs)
580 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
581
582 return MLX4_ROCE_PF_GIDS + (gids % vfs) +
583 ((gids / vfs) * (slave_gid - 1));
533} 584}
585EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
534 586
535static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, 587static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
536 u8 op_mod, struct mlx4_cmd_mailbox *inbox) 588 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
@@ -617,8 +669,8 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
617 * need a FOR-loop here over number of gids the guest has. 669 * need a FOR-loop here over number of gids the guest has.
618 * 1. Check no duplicates in gids passed by slave 670 * 1. Check no duplicates in gids passed by slave
619 */ 671 */
620 num_gids = mlx4_get_slave_num_gids(dev, slave); 672 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
621 base = mlx4_get_base_gid_ix(dev, slave); 673 base = mlx4_get_base_gid_ix(dev, slave, port);
622 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 674 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
623 for (i = 0; i < num_gids; gid_entry_mbox++, i++) { 675 for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
624 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, 676 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
@@ -738,6 +790,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
738 struct mlx4_cmd_mailbox *outbox, 790 struct mlx4_cmd_mailbox *outbox,
739 struct mlx4_cmd_info *cmd) 791 struct mlx4_cmd_info *cmd)
740{ 792{
793 int port = mlx4_slave_convert_port(
794 dev, slave, vhcr->in_modifier & 0xFF);
795
796 if (port < 0)
797 return -EINVAL;
798
799 vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
800 (port & 0xFF);
801
741 return mlx4_common_set_port(dev, slave, vhcr->in_modifier, 802 return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
742 vhcr->op_modifier, inbox); 803 vhcr->op_modifier, inbox);
743} 804}
@@ -1026,10 +1087,16 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1026 struct mlx4_priv *priv = mlx4_priv(dev); 1087 struct mlx4_priv *priv = mlx4_priv(dev);
1027 int i, found_ix = -1; 1088 int i, found_ix = -1;
1028 int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; 1089 int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1090 struct mlx4_slaves_pport slaves_pport;
1091 unsigned num_vfs;
1092 int slave_gid;
1029 1093
1030 if (!mlx4_is_mfunc(dev)) 1094 if (!mlx4_is_mfunc(dev))
1031 return -EINVAL; 1095 return -EINVAL;
1032 1096
1097 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1098 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
1099
1033 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 1100 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1034 if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) { 1101 if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) {
1035 found_ix = i; 1102 found_ix = i;
@@ -1039,16 +1106,67 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1039 1106
1040 if (found_ix >= 0) { 1107 if (found_ix >= 0) {
1041 if (found_ix < MLX4_ROCE_PF_GIDS) 1108 if (found_ix < MLX4_ROCE_PF_GIDS)
1042 *slave_id = 0; 1109 slave_gid = 0;
1043 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % dev->num_vfs) * 1110 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
1044 (vf_gids / dev->num_vfs + 1)) 1111 (vf_gids / num_vfs + 1))
1045 *slave_id = ((found_ix - MLX4_ROCE_PF_GIDS) / 1112 slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
1046 (vf_gids / dev->num_vfs + 1)) + 1; 1113 (vf_gids / num_vfs + 1)) + 1;
1047 else 1114 else
1048 *slave_id = 1115 slave_gid =
1049 ((found_ix - MLX4_ROCE_PF_GIDS - 1116 ((found_ix - MLX4_ROCE_PF_GIDS -
1050 ((vf_gids % dev->num_vfs) * ((vf_gids / dev->num_vfs + 1)))) / 1117 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1051 (vf_gids / dev->num_vfs)) + vf_gids % dev->num_vfs + 1; 1118 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1119
1120 if (slave_gid) {
1121 struct mlx4_active_ports exclusive_ports;
1122 struct mlx4_active_ports actv_ports;
1123 struct mlx4_slaves_pport slaves_pport_actv;
1124 unsigned max_port_p_one;
1125 int num_slaves_before = 1;
1126
1127 for (i = 1; i < port; i++) {
1128 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1129 set_bit(i, exclusive_ports.ports);
1130 slaves_pport_actv =
1131 mlx4_phys_to_slaves_pport_actv(
1132 dev, &exclusive_ports);
1133 num_slaves_before += bitmap_weight(
1134 slaves_pport_actv.slaves,
1135 dev->num_vfs + 1);
1136 }
1137
1138 if (slave_gid < num_slaves_before) {
1139 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1140 set_bit(port - 1, exclusive_ports.ports);
1141 slaves_pport_actv =
1142 mlx4_phys_to_slaves_pport_actv(
1143 dev, &exclusive_ports);
1144 slave_gid += bitmap_weight(
1145 slaves_pport_actv.slaves,
1146 dev->num_vfs + 1) -
1147 num_slaves_before;
1148 }
1149 actv_ports = mlx4_get_active_ports(dev, slave_gid);
1150 max_port_p_one = find_first_bit(
1151 actv_ports.ports, dev->caps.num_ports) +
1152 bitmap_weight(actv_ports.ports,
1153 dev->caps.num_ports) + 1;
1154
1155 for (i = 1; i < max_port_p_one; i++) {
1156 if (i == port)
1157 continue;
1158 bitmap_zero(exclusive_ports.ports,
1159 dev->caps.num_ports);
1160 set_bit(i - 1, exclusive_ports.ports);
1161 slaves_pport_actv =
1162 mlx4_phys_to_slaves_pport_actv(
1163 dev, &exclusive_ports);
1164 slave_gid += bitmap_weight(
1165 slaves_pport_actv.slaves,
1166 dev->num_vfs + 1);
1167 }
1168 }
1169 *slave_id = slave_gid;
1052 } 1170 }
1053 1171
1054 return (found_ix >= 0) ? 0 : -EINVAL; 1172 return (found_ix >= 0) ? 0 : -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 74e490d70184..2a33513a0e31 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -468,6 +468,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
468 468
469 spin_lock_init(&res_alloc->alloc_lock); 469 spin_lock_init(&res_alloc->alloc_lock);
470 for (t = 0; t < dev->num_vfs + 1; t++) { 470 for (t = 0; t < dev->num_vfs + 1; t++) {
471 struct mlx4_active_ports actv_ports =
472 mlx4_get_active_ports(dev, t);
471 switch (i) { 473 switch (i) {
472 case RES_QP: 474 case RES_QP:
473 initialize_res_quotas(dev, res_alloc, RES_QP, 475 initialize_res_quotas(dev, res_alloc, RES_QP,
@@ -497,10 +499,27 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
497 break; 499 break;
498 case RES_MAC: 500 case RES_MAC:
499 if (t == mlx4_master_func_num(dev)) { 501 if (t == mlx4_master_func_num(dev)) {
500 res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 502 int max_vfs_pport = 0;
503 /* Calculate the max vfs per port for */
504 /* both ports. */
505 for (j = 0; j < dev->caps.num_ports;
506 j++) {
507 struct mlx4_slaves_pport slaves_pport =
508 mlx4_phys_to_slaves_pport(dev, j + 1);
509 unsigned current_slaves =
510 bitmap_weight(slaves_pport.slaves,
511 dev->caps.num_ports) - 1;
512 if (max_vfs_pport < current_slaves)
513 max_vfs_pport =
514 current_slaves;
515 }
516 res_alloc->quota[t] =
517 MLX4_MAX_MAC_NUM -
518 2 * max_vfs_pport;
501 res_alloc->guaranteed[t] = 2; 519 res_alloc->guaranteed[t] = 2;
502 for (j = 0; j < MLX4_MAX_PORTS; j++) 520 for (j = 0; j < MLX4_MAX_PORTS; j++)
503 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM; 521 res_alloc->res_port_free[j] =
522 MLX4_MAX_MAC_NUM;
504 } else { 523 } else {
505 res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 524 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
506 res_alloc->guaranteed[t] = 2; 525 res_alloc->guaranteed[t] = 2;
@@ -528,9 +547,10 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
528 break; 547 break;
529 } 548 }
530 if (i == RES_MAC || i == RES_VLAN) { 549 if (i == RES_MAC || i == RES_VLAN) {
531 for (j = 0; j < MLX4_MAX_PORTS; j++) 550 for (j = 0; j < dev->caps.num_ports; j++)
532 res_alloc->res_port_rsvd[j] += 551 if (test_bit(j, actv_ports.ports))
533 res_alloc->guaranteed[t]; 552 res_alloc->res_port_rsvd[j] +=
553 res_alloc->guaranteed[t];
534 } else { 554 } else {
535 res_alloc->res_reserved += res_alloc->guaranteed[t]; 555 res_alloc->res_reserved += res_alloc->guaranteed[t];
536 } 556 }
@@ -612,7 +632,8 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
612 if (MLX4_QP_ST_UD == ts) { 632 if (MLX4_QP_ST_UD == ts) {
613 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 633 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
614 if (mlx4_is_eth(dev, port)) 634 if (mlx4_is_eth(dev, port))
615 qp_ctx->pri_path.mgid_index = mlx4_get_base_gid_ix(dev, slave) | 0x80; 635 qp_ctx->pri_path.mgid_index =
636 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
616 else 637 else
617 qp_ctx->pri_path.mgid_index = slave | 0x80; 638 qp_ctx->pri_path.mgid_index = slave | 0x80;
618 639
@@ -620,7 +641,8 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
620 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { 641 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
621 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 642 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
622 if (mlx4_is_eth(dev, port)) { 643 if (mlx4_is_eth(dev, port)) {
623 qp_ctx->pri_path.mgid_index += mlx4_get_base_gid_ix(dev, slave); 644 qp_ctx->pri_path.mgid_index +=
645 mlx4_get_base_gid_ix(dev, slave, port);
624 qp_ctx->pri_path.mgid_index &= 0x7f; 646 qp_ctx->pri_path.mgid_index &= 0x7f;
625 } else { 647 } else {
626 qp_ctx->pri_path.mgid_index = slave & 0x7F; 648 qp_ctx->pri_path.mgid_index = slave & 0x7F;
@@ -629,7 +651,8 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
629 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 651 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
630 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; 652 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
631 if (mlx4_is_eth(dev, port)) { 653 if (mlx4_is_eth(dev, port)) {
632 qp_ctx->alt_path.mgid_index += mlx4_get_base_gid_ix(dev, slave); 654 qp_ctx->alt_path.mgid_index +=
655 mlx4_get_base_gid_ix(dev, slave, port);
633 qp_ctx->alt_path.mgid_index &= 0x7f; 656 qp_ctx->alt_path.mgid_index &= 0x7f;
634 } else { 657 } else {
635 qp_ctx->alt_path.mgid_index = slave & 0x7F; 658 qp_ctx->alt_path.mgid_index = slave & 0x7F;
@@ -1780,6 +1803,11 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1780 return err; 1803 return err;
1781 1804
1782 port = !in_port ? get_param_l(out_param) : in_port; 1805 port = !in_port ? get_param_l(out_param) : in_port;
1806 port = mlx4_slave_convert_port(
1807 dev, slave, port);
1808
1809 if (port < 0)
1810 return -EINVAL;
1783 mac = in_param; 1811 mac = in_param;
1784 1812
1785 err = __mlx4_register_mac(dev, port, mac); 1813 err = __mlx4_register_mac(dev, port, mac);
@@ -1887,6 +1915,11 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1887 if (!port || op != RES_OP_RESERVE_AND_MAP) 1915 if (!port || op != RES_OP_RESERVE_AND_MAP)
1888 return -EINVAL; 1916 return -EINVAL;
1889 1917
1918 port = mlx4_slave_convert_port(
1919 dev, slave, port);
1920
1921 if (port < 0)
1922 return -EINVAL;
1890 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ 1923 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1891 if (!in_port && port > 0 && port <= dev->caps.num_ports) { 1924 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1892 slave_state[slave].old_vlan_api = true; 1925 slave_state[slave].old_vlan_api = true;
@@ -2184,6 +2217,11 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2184 switch (op) { 2217 switch (op) {
2185 case RES_OP_RESERVE_AND_MAP: 2218 case RES_OP_RESERVE_AND_MAP:
2186 port = !in_port ? get_param_l(out_param) : in_port; 2219 port = !in_port ? get_param_l(out_param) : in_port;
2220 port = mlx4_slave_convert_port(
2221 dev, slave, port);
2222
2223 if (port < 0)
2224 return -EINVAL;
2187 mac_del_from_slave(dev, slave, in_param, port); 2225 mac_del_from_slave(dev, slave, in_param, port);
2188 __mlx4_unregister_mac(dev, port, in_param); 2226 __mlx4_unregister_mac(dev, port, in_param);
2189 break; 2227 break;
@@ -2203,6 +2241,11 @@ static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2203 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 2241 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2204 int err = 0; 2242 int err = 0;
2205 2243
2244 port = mlx4_slave_convert_port(
2245 dev, slave, port);
2246
2247 if (port < 0)
2248 return -EINVAL;
2206 switch (op) { 2249 switch (op) {
2207 case RES_OP_RESERVE_AND_MAP: 2250 case RES_OP_RESERVE_AND_MAP:
2208 if (slave_state[slave].old_vlan_api) 2251 if (slave_state[slave].old_vlan_api)
@@ -2811,7 +2854,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
2811 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { 2854 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2812 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 2855 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2813 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) 2856 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2814 num_gids = mlx4_get_slave_num_gids(dev, slave); 2857 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2815 else 2858 else
2816 num_gids = 1; 2859 num_gids = 1;
2817 if (qp_ctx->pri_path.mgid_index >= num_gids) 2860 if (qp_ctx->pri_path.mgid_index >= num_gids)
@@ -2820,7 +2863,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
2820 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 2863 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2821 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; 2864 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2822 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) 2865 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2823 num_gids = mlx4_get_slave_num_gids(dev, slave); 2866 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2824 else 2867 else
2825 num_gids = 1; 2868 num_gids = 1;
2826 if (qp_ctx->alt_path.mgid_index >= num_gids) 2869 if (qp_ctx->alt_path.mgid_index >= num_gids)
@@ -3338,6 +3381,39 @@ int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3338 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3381 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3339} 3382}
3340 3383
3384static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3385 struct mlx4_qp_context *qpc,
3386 struct mlx4_cmd_mailbox *inbox)
3387{
3388 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3389 u8 pri_sched_queue;
3390 int port = mlx4_slave_convert_port(
3391 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3392
3393 if (port < 0)
3394 return -EINVAL;
3395
3396 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3397 ((port & 1) << 6);
3398
3399 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3400 mlx4_is_eth(dev, port + 1)) {
3401 qpc->pri_path.sched_queue = pri_sched_queue;
3402 }
3403
3404 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3405 port = mlx4_slave_convert_port(
3406 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3407 + 1) - 1;
3408 if (port < 0)
3409 return -EINVAL;
3410 qpc->alt_path.sched_queue =
3411 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3412 (port & 1) << 6;
3413 }
3414 return 0;
3415}
3416
3341static int roce_verify_mac(struct mlx4_dev *dev, int slave, 3417static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3342 struct mlx4_qp_context *qpc, 3418 struct mlx4_qp_context *qpc,
3343 struct mlx4_cmd_mailbox *inbox) 3419 struct mlx4_cmd_mailbox *inbox)
@@ -3375,6 +3451,9 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3375 u8 orig_vlan_index = qpc->pri_path.vlan_index; 3451 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3376 u8 orig_feup = qpc->pri_path.feup; 3452 u8 orig_feup = qpc->pri_path.feup;
3377 3453
3454 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3455 if (err)
3456 return err;
3378 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); 3457 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3379 if (err) 3458 if (err)
3380 return err; 3459 return err;
@@ -3426,6 +3505,9 @@ int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3426 int err; 3505 int err;
3427 struct mlx4_qp_context *context = inbox->buf + 8; 3506 struct mlx4_qp_context *context = inbox->buf + 8;
3428 3507
3508 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3509 if (err)
3510 return err;
3429 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave); 3511 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3430 if (err) 3512 if (err)
3431 return err; 3513 return err;
@@ -3445,6 +3527,9 @@ int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3445 int err; 3527 int err;
3446 struct mlx4_qp_context *context = inbox->buf + 8; 3528 struct mlx4_qp_context *context = inbox->buf + 8;
3447 3529
3530 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3531 if (err)
3532 return err;
3448 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave); 3533 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3449 if (err) 3534 if (err)
3450 return err; 3535 return err;
@@ -3463,6 +3548,9 @@ int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3463 struct mlx4_cmd_info *cmd) 3548 struct mlx4_cmd_info *cmd)
3464{ 3549{
3465 struct mlx4_qp_context *context = inbox->buf + 8; 3550 struct mlx4_qp_context *context = inbox->buf + 8;
3551 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3552 if (err)
3553 return err;
3466 adjust_proxy_tun_qkey(dev, vhcr, context); 3554 adjust_proxy_tun_qkey(dev, vhcr, context);
3467 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3555 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3468} 3556}
@@ -3476,6 +3564,9 @@ int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3476 int err; 3564 int err;
3477 struct mlx4_qp_context *context = inbox->buf + 8; 3565 struct mlx4_qp_context *context = inbox->buf + 8;
3478 3566
3567 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3568 if (err)
3569 return err;
3479 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave); 3570 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3480 if (err) 3571 if (err)
3481 return err; 3572 return err;
@@ -3495,6 +3586,9 @@ int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3495 int err; 3586 int err;
3496 struct mlx4_qp_context *context = inbox->buf + 8; 3587 struct mlx4_qp_context *context = inbox->buf + 8;
3497 3588
3589 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3590 if (err)
3591 return err;
3498 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave); 3592 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3499 if (err) 3593 if (err)
3500 return err; 3594 return err;
@@ -3598,16 +3692,26 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3598 return err; 3692 return err;
3599} 3693}
3600 3694
3601static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 3695static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3602 int block_loopback, enum mlx4_protocol prot, 3696 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3603 enum mlx4_steer_type type, u64 *reg_id) 3697 enum mlx4_steer_type type, u64 *reg_id)
3604{ 3698{
3605 switch (dev->caps.steering_mode) { 3699 switch (dev->caps.steering_mode) {
3606 case MLX4_STEERING_MODE_DEVICE_MANAGED: 3700 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3607 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5], 3701 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3702 if (port < 0)
3703 return port;
3704 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3608 block_loopback, prot, 3705 block_loopback, prot,
3609 reg_id); 3706 reg_id);
3707 }
3610 case MLX4_STEERING_MODE_B0: 3708 case MLX4_STEERING_MODE_B0:
3709 if (prot == MLX4_PROT_ETH) {
3710 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3711 if (port < 0)
3712 return port;
3713 gid[5] = port;
3714 }
3611 return mlx4_qp_attach_common(dev, qp, gid, 3715 return mlx4_qp_attach_common(dev, qp, gid,
3612 block_loopback, prot, type); 3716 block_loopback, prot, type);
3613 default: 3717 default:
@@ -3615,9 +3719,9 @@ static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3615 } 3719 }
3616} 3720}
3617 3721
3618static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 3722static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3619 enum mlx4_protocol prot, enum mlx4_steer_type type, 3723 u8 gid[16], enum mlx4_protocol prot,
3620 u64 reg_id) 3724 enum mlx4_steer_type type, u64 reg_id)
3621{ 3725{
3622 switch (dev->caps.steering_mode) { 3726 switch (dev->caps.steering_mode) {
3623 case MLX4_STEERING_MODE_DEVICE_MANAGED: 3727 case MLX4_STEERING_MODE_DEVICE_MANAGED:
@@ -3654,7 +3758,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3654 3758
3655 qp.qpn = qpn; 3759 qp.qpn = qpn;
3656 if (attach) { 3760 if (attach) {
3657 err = qp_attach(dev, &qp, gid, block_loopback, prot, 3761 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3658 type, &reg_id); 3762 type, &reg_id);
3659 if (err) { 3763 if (err) {
3660 pr_err("Fail to attach rule to qp 0x%x\n", qpn); 3764 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
@@ -3790,6 +3894,9 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3790 return -EOPNOTSUPP; 3894 return -EOPNOTSUPP;
3791 3895
3792 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 3896 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3897 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
3898 if (ctrl->port <= 0)
3899 return -EINVAL;
3793 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 3900 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3794 err = get_res(dev, slave, qpn, RES_QP, &rqp); 3901 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3795 if (err) { 3902 if (err) {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index f211b51dc726..6b3998396b99 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -84,6 +84,7 @@ enum {
84enum { 84enum {
85 MLX4_MAX_NUM_PF = 16, 85 MLX4_MAX_NUM_PF = 16,
86 MLX4_MAX_NUM_VF = 64, 86 MLX4_MAX_NUM_VF = 64,
87 MLX4_MAX_NUM_VF_P_PORT = 64,
87 MLX4_MFUNC_MAX = 80, 88 MLX4_MFUNC_MAX = 80,
88 MLX4_MAX_EQ_NUM = 1024, 89 MLX4_MAX_EQ_NUM = 1024,
89 MLX4_MFUNC_EQ_NUM = 4, 90 MLX4_MFUNC_EQ_NUM = 4,
@@ -664,6 +665,11 @@ struct mlx4_quotas {
664 int xrcd; 665 int xrcd;
665}; 666};
666 667
668struct mlx4_vf_dev {
669 u8 min_port;
670 u8 n_ports;
671};
672
667struct mlx4_dev { 673struct mlx4_dev {
668 struct pci_dev *pdev; 674 struct pci_dev *pdev;
669 unsigned long flags; 675 unsigned long flags;
@@ -679,6 +685,7 @@ struct mlx4_dev {
679 int oper_log_mgm_entry_size; 685 int oper_log_mgm_entry_size;
680 u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; 686 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
681 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; 687 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
688 struct mlx4_vf_dev *dev_vfs;
682}; 689};
683 690
684struct mlx4_eqe { 691struct mlx4_eqe {
@@ -1197,4 +1204,32 @@ int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
1197 1204
1198cycle_t mlx4_read_clock(struct mlx4_dev *dev); 1205cycle_t mlx4_read_clock(struct mlx4_dev *dev);
1199 1206
1207struct mlx4_active_ports {
1208 DECLARE_BITMAP(ports, MLX4_MAX_PORTS);
1209};
1210/* Returns a bitmap of the physical ports which are assigned to slave */
1211struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave);
1212
1213/* Returns the physical port that represents the virtual port of the slave, */
1214/* or a value < 0 in case of an error. If a slave has 2 ports, the identity */
1215/* mapping is returned. */
1216int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port);
1217
1218struct mlx4_slaves_pport {
1219 DECLARE_BITMAP(slaves, MLX4_MFUNC_MAX);
1220};
1221/* Returns a bitmap of all slaves that are assigned to port. */
1222struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
1223 int port);
1224
1225/* Returns a bitmap of all slaves that are assigned exactly to all the */
1226/* the ports that are set in crit_ports. */
1227struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
1228 struct mlx4_dev *dev,
1229 const struct mlx4_active_ports *crit_ports);
1230
1231/* Returns the slave's virtual port that represents the physical port. */
1232int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
1233
1234int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
1200#endif /* MLX4_DEVICE_H */ 1235#endif /* MLX4_DEVICE_H */