aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx4/cmd.c
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@dev.mellanox.co.il>2011-12-12 23:18:30 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-13 13:56:08 -0500
commitab9c17a009ee8eb8c667f22dc0be0709effceab9 (patch)
treeb85acd5d8cf28a42ef5f7361ef43b4195b89912d /drivers/net/ethernet/mellanox/mlx4/cmd.c
parentd81c7186aa16a0da9e39961af6bad0c855a5d684 (diff)
mlx4_core: Modify driver initialization flow to accommodate SRIOV for Ethernet
1. Added module parameters sr_iov and probe_vf for controlling enablement of SRIOV mode. 2. Increased default max num-qps, num-mpts and log_num_macs to accomodate SRIOV mode 3. Added port_type_array as a module parameter to allow driver startup with ports configured as desired. In SRIOV mode, only ETH is supported, and this array is ignored; otherwise, for the case where the FW supports both port types (ETH and IB), the port_type_array parameter is used. By default, the port_type_array is set to configure both ports as IB. 4. When running in sriov mode, the master needs to initialize the ICM eq table to hold the eq's for itself and also for all the slaves. 5. mlx4_set_port_mask() now invoked from mlx4_init_hca, instead of in mlx4_dev_cap. 6. Introduced sriov VF (slave) device startup/teardown logic (mainly procedures mlx4_init_slave, mlx4_slave_exit, mlx4_slave_cap, mlx4_slave_exit and flow modifications in __mlx4_init_one, mlx4_init_hca, and mlx4_setup_hca). VFs obtain their startup information from the PF (master) device via the comm channel. 7. In SRIOV mode (both PF and VF), MSI_X must be enabled, or the driver aborts loading the device. 8. Do not allow setting port type via sysfs when running in SRIOV mode. 9. mlx4_get_ownership: Currently, only one PF is supported by the driver. If the HCA is burned with FW which enables more than one PF, only one of the PFs is allowed to run. The first one up grabs a FW ownership semaphone -- all other PFs will find that semaphore taken, and the driver will not allow them to run. Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: Liran Liss <liranl@mellanox.co.il> Signed-off-by: Marcel Apfelbaum <marcela@mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/cmd.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c170
1 files changed, 169 insertions, 1 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 8e6e4b20b0e2..c4fef839168c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -257,7 +257,7 @@ out:
257 return err; 257 return err;
258} 258}
259 259
260static int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, 260int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
261 unsigned long timeout) 261 unsigned long timeout)
262{ 262{
263 if (mlx4_priv(dev)->cmd.use_events) 263 if (mlx4_priv(dev)->cmd.use_events)
@@ -1390,6 +1390,153 @@ void mlx4_master_comm_channel(struct work_struct *work)
1390 mlx4_warn(dev, "Failed to arm comm channel events\n"); 1390 mlx4_warn(dev, "Failed to arm comm channel events\n");
1391} 1391}
1392 1392
1393static int sync_toggles(struct mlx4_dev *dev)
1394{
1395 struct mlx4_priv *priv = mlx4_priv(dev);
1396 int wr_toggle;
1397 int rd_toggle;
1398 unsigned long end;
1399
1400 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
1401 end = jiffies + msecs_to_jiffies(5000);
1402
1403 while (time_before(jiffies, end)) {
1404 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
1405 if (rd_toggle == wr_toggle) {
1406 priv->cmd.comm_toggle = rd_toggle;
1407 return 0;
1408 }
1409
1410 cond_resched();
1411 }
1412
1413 /*
1414 * we could reach here if for example the previous VM using this
1415 * function misbehaved and left the channel with unsynced state. We
1416 * should fix this here and give this VM a chance to use a properly
1417 * synced channel
1418 */
1419 mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
1420 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
1421 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
1422 priv->cmd.comm_toggle = 0;
1423
1424 return 0;
1425}
1426
1427int mlx4_multi_func_init(struct mlx4_dev *dev)
1428{
1429 struct mlx4_priv *priv = mlx4_priv(dev);
1430 struct mlx4_slave_state *s_state;
1431 int i, err, port;
1432
1433 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
1434 &priv->mfunc.vhcr_dma,
1435 GFP_KERNEL);
1436 if (!priv->mfunc.vhcr) {
1437 mlx4_err(dev, "Couldn't allocate vhcr.\n");
1438 return -ENOMEM;
1439 }
1440
1441 if (mlx4_is_master(dev))
1442 priv->mfunc.comm =
1443 ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
1444 priv->fw.comm_base, MLX4_COMM_PAGESIZE);
1445 else
1446 priv->mfunc.comm =
1447 ioremap(pci_resource_start(dev->pdev, 2) +
1448 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
1449 if (!priv->mfunc.comm) {
1450 mlx4_err(dev, "Couldn't map communication vector.\n");
1451 goto err_vhcr;
1452 }
1453
1454 if (mlx4_is_master(dev)) {
1455 priv->mfunc.master.slave_state =
1456 kzalloc(dev->num_slaves *
1457 sizeof(struct mlx4_slave_state), GFP_KERNEL);
1458 if (!priv->mfunc.master.slave_state)
1459 goto err_comm;
1460
1461 for (i = 0; i < dev->num_slaves; ++i) {
1462 s_state = &priv->mfunc.master.slave_state[i];
1463 s_state->last_cmd = MLX4_COMM_CMD_RESET;
1464 __raw_writel((__force u32) 0,
1465 &priv->mfunc.comm[i].slave_write);
1466 __raw_writel((__force u32) 0,
1467 &priv->mfunc.comm[i].slave_read);
1468 mmiowb();
1469 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1470 s_state->vlan_filter[port] =
1471 kzalloc(sizeof(struct mlx4_vlan_fltr),
1472 GFP_KERNEL);
1473 if (!s_state->vlan_filter[port]) {
1474 if (--port)
1475 kfree(s_state->vlan_filter[port]);
1476 goto err_slaves;
1477 }
1478 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
1479 }
1480 spin_lock_init(&s_state->lock);
1481 }
1482
1483 memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
1484 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
1485 INIT_WORK(&priv->mfunc.master.comm_work,
1486 mlx4_master_comm_channel);
1487 INIT_WORK(&priv->mfunc.master.slave_event_work,
1488 mlx4_gen_slave_eqe);
1489 INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
1490 mlx4_master_handle_slave_flr);
1491 spin_lock_init(&priv->mfunc.master.slave_state_lock);
1492 priv->mfunc.master.comm_wq =
1493 create_singlethread_workqueue("mlx4_comm");
1494 if (!priv->mfunc.master.comm_wq)
1495 goto err_slaves;
1496
1497 if (mlx4_init_resource_tracker(dev))
1498 goto err_thread;
1499
1500 sema_init(&priv->cmd.slave_sem, 1);
1501 err = mlx4_ARM_COMM_CHANNEL(dev);
1502 if (err) {
1503 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
1504 err);
1505 goto err_resource;
1506 }
1507
1508 } else {
1509 err = sync_toggles(dev);
1510 if (err) {
1511 mlx4_err(dev, "Couldn't sync toggles\n");
1512 goto err_comm;
1513 }
1514
1515 sema_init(&priv->cmd.slave_sem, 1);
1516 }
1517 return 0;
1518
1519err_resource:
1520 mlx4_free_resource_tracker(dev);
1521err_thread:
1522 flush_workqueue(priv->mfunc.master.comm_wq);
1523 destroy_workqueue(priv->mfunc.master.comm_wq);
1524err_slaves:
1525 while (--i) {
1526 for (port = 1; port <= MLX4_MAX_PORTS; port++)
1527 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1528 }
1529 kfree(priv->mfunc.master.slave_state);
1530err_comm:
1531 iounmap(priv->mfunc.comm);
1532err_vhcr:
1533 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1534 priv->mfunc.vhcr,
1535 priv->mfunc.vhcr_dma);
1536 priv->mfunc.vhcr = NULL;
1537 return -ENOMEM;
1538}
1539
1393int mlx4_cmd_init(struct mlx4_dev *dev) 1540int mlx4_cmd_init(struct mlx4_dev *dev)
1394{ 1541{
1395 struct mlx4_priv *priv = mlx4_priv(dev); 1542 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1425,6 +1572,27 @@ err_hcr:
1425 return -ENOMEM; 1572 return -ENOMEM;
1426} 1573}
1427 1574
1575void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
1576{
1577 struct mlx4_priv *priv = mlx4_priv(dev);
1578 int i, port;
1579
1580 if (mlx4_is_master(dev)) {
1581 flush_workqueue(priv->mfunc.master.comm_wq);
1582 destroy_workqueue(priv->mfunc.master.comm_wq);
1583 for (i = 0; i < dev->num_slaves; i++) {
1584 for (port = 1; port <= MLX4_MAX_PORTS; port++)
1585 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1586 }
1587 kfree(priv->mfunc.master.slave_state);
1588 iounmap(priv->mfunc.comm);
1589 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1590 priv->mfunc.vhcr,
1591 priv->mfunc.vhcr_dma);
1592 priv->mfunc.vhcr = NULL;
1593 }
1594}
1595
1428void mlx4_cmd_cleanup(struct mlx4_dev *dev) 1596void mlx4_cmd_cleanup(struct mlx4_dev *dev)
1429{ 1597{
1430 struct mlx4_priv *priv = mlx4_priv(dev); 1598 struct mlx4_priv *priv = mlx4_priv(dev);