aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2014-05-07 15:52:57 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-08 23:42:02 -0400
commit1a91de28831a1bd913e14dacf25763f3672e24a9 (patch)
tree56f55430eea04df61b8fa71d4405908630aa6127 /drivers/net/ethernet
parent2db2a15abfb2c72dc2717236659dced6ba1c6d6b (diff)
mellanox: Logging message cleanups
Use a more current logging style. o Coalesce formats o Add missing spaces for coalesced formats o Align arguments for modified formats o Add missing newlines for some logging messages o Use DRV_NAME as part of format instead of %s, DRV_NAME to reduce overall text. o Use ..., ##__VA_ARGS__ instead of args... in macros o Correct a few format typos o Use a single line message where appropriate Signed-off-by: Joe Perches <joe@perches.com> Acked-By: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c246
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
26 files changed, 352 insertions, 437 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78099eab7673..24201033661b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
212 212
213 /* First, verify that the master reports correct status */ 213 /* First, verify that the master reports correct status */
214 if (comm_pending(dev)) { 214 if (comm_pending(dev)) {
215 mlx4_warn(dev, "Communication channel is not idle." 215 mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
216 "my toggle is %d (cmd:0x%x)\n",
217 priv->cmd.comm_toggle, cmd); 216 priv->cmd.comm_toggle, cmd);
218 return -EAGAIN; 217 return -EAGAIN;
219 } 218 }
@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
422 *out_param = 421 *out_param =
423 be64_to_cpu(vhcr->out_param); 422 be64_to_cpu(vhcr->out_param);
424 else { 423 else {
425 mlx4_err(dev, "response expected while" 424 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
426 "output mailbox is NULL for " 425 op);
427 "command 0x%x\n", op);
428 vhcr->status = CMD_STAT_BAD_PARAM; 426 vhcr->status = CMD_STAT_BAD_PARAM;
429 } 427 }
430 } 428 }
@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
439 *out_param = 437 *out_param =
440 be64_to_cpu(vhcr->out_param); 438 be64_to_cpu(vhcr->out_param);
441 else { 439 else {
442 mlx4_err(dev, "response expected while" 440 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
443 "output mailbox is NULL for " 441 op);
444 "command 0x%x\n", op);
445 vhcr->status = CMD_STAT_BAD_PARAM; 442 vhcr->status = CMD_STAT_BAD_PARAM;
446 } 443 }
447 } 444 }
448 ret = mlx4_status_to_errno(vhcr->status); 445 ret = mlx4_status_to_errno(vhcr->status);
449 } else 446 } else
450 mlx4_err(dev, "failed execution of VHCR_POST command" 447 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
451 "opcode 0x%x\n", op); 448 op);
452 } 449 }
453 450
454 mutex_unlock(&priv->cmd.slave_cmd_mutex); 451 mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -625,9 +622,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
625 622
626 if ((slave_addr & 0xfff) | (master_addr & 0xfff) | 623 if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
627 (slave & ~0x7f) | (size & 0xff)) { 624 (slave & ~0x7f) | (size & 0xff)) {
628 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx " 625 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
629 "master_addr:0x%llx slave_id:%d size:%d\n", 626 slave_addr, master_addr, slave, size);
630 slave_addr, master_addr, slave, size);
631 return -EINVAL; 627 return -EINVAL;
632 } 628 }
633 629
@@ -788,8 +784,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
788 ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) || 784 ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
789 (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && 785 (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
790 smp->method == IB_MGMT_METHOD_SET))) { 786 smp->method == IB_MGMT_METHOD_SET))) {
791 mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, " 787 mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x for attr 0x%x - Rejecting\n",
792 "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
793 slave, smp->method, smp->mgmt_class, 788 slave, smp->method, smp->mgmt_class,
794 be16_to_cpu(smp->attr_id)); 789 be16_to_cpu(smp->attr_id));
795 return -EPERM; 790 return -EPERM;
@@ -1409,8 +1404,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1409 ALIGN(sizeof(struct mlx4_vhcr_cmd), 1404 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1410 MLX4_ACCESS_MEM_ALIGN), 1); 1405 MLX4_ACCESS_MEM_ALIGN), 1);
1411 if (ret) { 1406 if (ret) {
1412 mlx4_err(dev, "%s:Failed reading vhcr" 1407 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1413 "ret: 0x%x\n", __func__, ret); 1408 __func__, ret);
1414 kfree(vhcr); 1409 kfree(vhcr);
1415 return ret; 1410 return ret;
1416 } 1411 }
@@ -1461,9 +1456,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1461 1456
1462 /* Apply permission and bound checks if applicable */ 1457 /* Apply permission and bound checks if applicable */
1463 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { 1458 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1464 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection " 1459 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1465 "checks for resource_id:%d\n", vhcr->op, slave, 1460 vhcr->op, slave, vhcr->in_modifier);
1466 vhcr->in_modifier);
1467 vhcr_cmd->status = CMD_STAT_BAD_OP; 1461 vhcr_cmd->status = CMD_STAT_BAD_OP;
1468 goto out_status; 1462 goto out_status;
1469 } 1463 }
@@ -1502,8 +1496,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1502 } 1496 }
1503 1497
1504 if (err) { 1498 if (err) {
1505 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with" 1499 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1506 " error:%d, status %d\n",
1507 vhcr->op, slave, vhcr->errno, err); 1500 vhcr->op, slave, vhcr->errno, err);
1508 vhcr_cmd->status = mlx4_errno_to_status(err); 1501 vhcr_cmd->status = mlx4_errno_to_status(err);
1509 goto out_status; 1502 goto out_status;
@@ -1537,8 +1530,8 @@ out_status:
1537 __func__); 1530 __func__);
1538 else if (vhcr->e_bit && 1531 else if (vhcr->e_bit &&
1539 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) 1532 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1540 mlx4_warn(dev, "Failed to generate command completion " 1533 mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1541 "eqe for slave %d\n", slave); 1534 slave);
1542 } 1535 }
1543 1536
1544out: 1537out:
@@ -1577,8 +1570,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1577 1570
1578 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", 1571 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1579 slave, port); 1572 slave, port);
1580 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan, 1573 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1581 vp_admin->default_qos, vp_admin->link_state); 1574 vp_admin->default_vlan, vp_admin->default_qos,
1575 vp_admin->link_state);
1582 1576
1583 work = kzalloc(sizeof(*work), GFP_KERNEL); 1577 work = kzalloc(sizeof(*work), GFP_KERNEL);
1584 if (!work) 1578 if (!work)
@@ -1591,7 +1585,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1591 &admin_vlan_ix); 1585 &admin_vlan_ix);
1592 if (err) { 1586 if (err) {
1593 kfree(work); 1587 kfree(work);
1594 mlx4_warn((&priv->dev), 1588 mlx4_warn(&priv->dev,
1595 "No vlan resources slave %d, port %d\n", 1589 "No vlan resources slave %d, port %d\n",
1596 slave, port); 1590 slave, port);
1597 return err; 1591 return err;
@@ -1600,7 +1594,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1600 admin_vlan_ix = NO_INDX; 1594 admin_vlan_ix = NO_INDX;
1601 } 1595 }
1602 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; 1596 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1603 mlx4_dbg((&(priv->dev)), 1597 mlx4_dbg(&priv->dev,
1604 "alloc vlan %d idx %d slave %d port %d\n", 1598 "alloc vlan %d idx %d slave %d port %d\n",
1605 (int)(vp_admin->default_vlan), 1599 (int)(vp_admin->default_vlan),
1606 admin_vlan_ix, slave, port); 1600 admin_vlan_ix, slave, port);
@@ -1661,12 +1655,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1661 vp_admin->default_vlan, &(vp_oper->vlan_idx)); 1655 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1662 if (err) { 1656 if (err) {
1663 vp_oper->vlan_idx = NO_INDX; 1657 vp_oper->vlan_idx = NO_INDX;
1664 mlx4_warn((&priv->dev), 1658 mlx4_warn(&priv->dev,
1665 "No vlan resorces slave %d, port %d\n", 1659 "No vlan resorces slave %d, port %d\n",
1666 slave, port); 1660 slave, port);
1667 return err; 1661 return err;
1668 } 1662 }
1669 mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n", 1663 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
1670 (int)(vp_oper->state.default_vlan), 1664 (int)(vp_oper->state.default_vlan),
1671 vp_oper->vlan_idx, slave, port); 1665 vp_oper->vlan_idx, slave, port);
1672 } 1666 }
@@ -1677,12 +1671,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1677 if (0 > vp_oper->mac_idx) { 1671 if (0 > vp_oper->mac_idx) {
1678 err = vp_oper->mac_idx; 1672 err = vp_oper->mac_idx;
1679 vp_oper->mac_idx = NO_INDX; 1673 vp_oper->mac_idx = NO_INDX;
1680 mlx4_warn((&priv->dev), 1674 mlx4_warn(&priv->dev,
1681 "No mac resorces slave %d, port %d\n", 1675 "No mac resorces slave %d, port %d\n",
1682 slave, port); 1676 slave, port);
1683 return err; 1677 return err;
1684 } 1678 }
1685 mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n", 1679 mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
1686 vp_oper->state.mac, vp_oper->mac_idx, slave, port); 1680 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1687 } 1681 }
1688 } 1682 }
@@ -1731,8 +1725,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1731 slave_state[slave].comm_toggle ^= 1; 1725 slave_state[slave].comm_toggle ^= 1;
1732 reply = (u32) slave_state[slave].comm_toggle << 31; 1726 reply = (u32) slave_state[slave].comm_toggle << 31;
1733 if (toggle != slave_state[slave].comm_toggle) { 1727 if (toggle != slave_state[slave].comm_toggle) {
1734 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER" 1728 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
1735 "STATE COMPROMISIED ***\n", toggle, slave); 1729 toggle, slave);
1736 goto reset_slave; 1730 goto reset_slave;
1737 } 1731 }
1738 if (cmd == MLX4_COMM_CMD_RESET) { 1732 if (cmd == MLX4_COMM_CMD_RESET) {
@@ -1759,8 +1753,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1759 /*command from slave in the middle of FLR*/ 1753 /*command from slave in the middle of FLR*/
1760 if (cmd != MLX4_COMM_CMD_RESET && 1754 if (cmd != MLX4_COMM_CMD_RESET &&
1761 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { 1755 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1762 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) " 1756 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
1763 "in the middle of FLR\n", slave, cmd); 1757 slave, cmd);
1764 return; 1758 return;
1765 } 1759 }
1766 1760
@@ -1798,8 +1792,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1798 1792
1799 mutex_lock(&priv->cmd.slave_cmd_mutex); 1793 mutex_lock(&priv->cmd.slave_cmd_mutex);
1800 if (mlx4_master_process_vhcr(dev, slave, NULL)) { 1794 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1801 mlx4_err(dev, "Failed processing vhcr for slave:%d," 1795 mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
1802 " resetting slave.\n", slave); 1796 slave);
1803 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1797 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1804 goto reset_slave; 1798 goto reset_slave;
1805 } 1799 }
@@ -1816,8 +1810,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1816 is_going_down = 1; 1810 is_going_down = 1;
1817 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 1811 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1818 if (is_going_down) { 1812 if (is_going_down) {
1819 mlx4_warn(dev, "Slave is going down aborting command(%d)" 1813 mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
1820 " executing from slave:%d\n",
1821 cmd, slave); 1814 cmd, slave);
1822 return; 1815 return;
1823 } 1816 }
@@ -1880,9 +1873,8 @@ void mlx4_master_comm_channel(struct work_struct *work)
1880 if (toggle != slt) { 1873 if (toggle != slt) {
1881 if (master->slave_state[slave].comm_toggle 1874 if (master->slave_state[slave].comm_toggle
1882 != slt) { 1875 != slt) {
1883 printk(KERN_INFO "slave %d out of sync." 1876 printk(KERN_INFO "slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
1884 " read toggle %d, state toggle %d. " 1877 slave, slt,
1885 "Resynching.\n", slave, slt,
1886 master->slave_state[slave].comm_toggle); 1878 master->slave_state[slave].comm_toggle);
1887 master->slave_state[slave].comm_toggle = 1879 master->slave_state[slave].comm_toggle =
1888 slt; 1880 slt;
@@ -1896,8 +1888,7 @@ void mlx4_master_comm_channel(struct work_struct *work)
1896 } 1888 }
1897 1889
1898 if (reported && reported != served) 1890 if (reported && reported != served)
1899 mlx4_warn(dev, "Got command event with bitmask from %d slaves" 1891 mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
1900 " but %d were served\n",
1901 reported, served); 1892 reported, served);
1902 1893
1903 if (mlx4_ARM_COMM_CHANNEL(dev)) 1894 if (mlx4_ARM_COMM_CHANNEL(dev))
@@ -1953,7 +1944,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1953 ioremap(pci_resource_start(dev->pdev, 2) + 1944 ioremap(pci_resource_start(dev->pdev, 2) +
1954 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); 1945 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
1955 if (!priv->mfunc.comm) { 1946 if (!priv->mfunc.comm) {
1956 mlx4_err(dev, "Couldn't map communication vector.\n"); 1947 mlx4_err(dev, "Couldn't map communication vector\n");
1957 goto err_vhcr; 1948 goto err_vhcr;
1958 } 1949 }
1959 1950
@@ -2080,7 +2071,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2080 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + 2071 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
2081 MLX4_HCR_BASE, MLX4_HCR_SIZE); 2072 MLX4_HCR_BASE, MLX4_HCR_SIZE);
2082 if (!priv->cmd.hcr) { 2073 if (!priv->cmd.hcr) {
2083 mlx4_err(dev, "Couldn't map command register.\n"); 2074 mlx4_err(dev, "Couldn't map command register\n");
2084 return -ENOMEM; 2075 return -ENOMEM;
2085 } 2076 }
2086 } 2077 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index c2cd8d31bcad..636963db598a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
125 &cq->vector)) { 125 &cq->vector)) {
126 cq->vector = (cq->ring + 1 + priv->port) 126 cq->vector = (cq->ring + 1 + priv->port)
127 % mdev->dev->caps.num_comp_vectors; 127 % mdev->dev->caps.num_comp_vectors;
128 mlx4_warn(mdev, "Failed Assigning an EQ to " 128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
129 "%s ,Falling back to legacy EQ's\n",
130 name); 129 name);
131 } 130 }
132 } 131 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3e8d33605fe7..c3736045e7af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev,
925 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); 925 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
926 } else { 926 } else {
927 if (cmd->fs.ring_cookie >= priv->rx_ring_num) { 927 if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
928 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n", 928 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
929 cmd->fs.ring_cookie); 929 cmd->fs.ring_cookie);
930 return -EINVAL; 930 return -EINVAL;
931 } 931 }
932 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; 932 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
933 if (!qpn) { 933 if (!qpn) {
934 en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n", 934 en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
935 cmd->fs.ring_cookie); 935 cmd->fs.ring_cookie);
936 return -EINVAL; 936 return -EINVAL;
937 } 937 }
@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
956 } 956 }
957 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id); 957 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
958 if (err) { 958 if (err) {
959 en_err(priv, "Fail to attach network rule at location %d.\n", 959 en_err(priv, "Fail to attach network rule at location %d\n",
960 cmd->fs.location); 960 cmd->fs.location);
961 goto out_free_list; 961 goto out_free_list;
962 } 962 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0c59d4fe7e3a..f953c1d7eae6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
133 MLX4_EN_MAX_TX_RING_P_UP); 133 MLX4_EN_MAX_TX_RING_P_UP);
134 if (params->udp_rss && !(mdev->dev->caps.flags 134 if (params->udp_rss && !(mdev->dev->caps.flags
135 & MLX4_DEV_CAP_FLAG_UDP_RSS)) { 135 & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
136 mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); 136 mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
137 params->udp_rss = 0; 137 params->udp_rss = 0;
138 } 138 }
139 for (i = 1; i <= MLX4_MAX_PORTS; i++) { 139 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
251 251
252 mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); 252 mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
253 if (!mdev->LSO_support) 253 if (!mdev->LSO_support)
254 mlx4_warn(mdev, "LSO not supported, please upgrade to later " 254 mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
255 "FW version to enable LSO\n");
256 255
257 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, 256 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
258 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 257 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
268 /* Build device profile according to supplied module parameters */ 267 /* Build device profile according to supplied module parameters */
269 err = mlx4_en_get_profile(mdev); 268 err = mlx4_en_get_profile(mdev);
270 if (err) { 269 if (err) {
271 mlx4_err(mdev, "Bad module parameters, aborting.\n"); 270 mlx4_err(mdev, "Bad module parameters, aborting\n");
272 goto err_mr; 271 goto err_mr;
273 } 272 }
274 273
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e4b1720c3d1..fba3c8e77626 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1576,7 +1576,7 @@ int mlx4_en_start_port(struct net_device *dev)
1576 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1576 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1577 err = mlx4_en_set_cq_moder(priv, cq); 1577 err = mlx4_en_set_cq_moder(priv, cq);
1578 if (err) { 1578 if (err) {
1579 en_err(priv, "Failed setting cq moderation parameters"); 1579 en_err(priv, "Failed setting cq moderation parameters\n");
1580 mlx4_en_deactivate_cq(priv, cq); 1580 mlx4_en_deactivate_cq(priv, cq);
1581 goto cq_err; 1581 goto cq_err;
1582 } 1582 }
@@ -1615,7 +1615,7 @@ int mlx4_en_start_port(struct net_device *dev)
1615 } 1615 }
1616 err = mlx4_en_set_cq_moder(priv, cq); 1616 err = mlx4_en_set_cq_moder(priv, cq);
1617 if (err) { 1617 if (err) {
1618 en_err(priv, "Failed setting cq moderation parameters"); 1618 en_err(priv, "Failed setting cq moderation parameters\n");
1619 mlx4_en_deactivate_cq(priv, cq); 1619 mlx4_en_deactivate_cq(priv, cq);
1620 goto tx_err; 1620 goto tx_err;
1621 } 1621 }
@@ -2594,8 +2594,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2594 prof->tx_pause, prof->tx_ppp, 2594 prof->tx_pause, prof->tx_ppp,
2595 prof->rx_pause, prof->rx_ppp); 2595 prof->rx_pause, prof->rx_ppp);
2596 if (err) { 2596 if (err) {
2597 en_err(priv, "Failed setting port general configurations " 2597 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
2598 "for port %d, with error %d\n", priv->port, err); 2598 priv->port, err);
2599 goto out; 2599 goto out;
2600 } 2600 }
2601 2601
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index ba049ae88749..a1512450816d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
270 ring->actual_size, 270 ring->actual_size,
271 GFP_KERNEL)) { 271 GFP_KERNEL)) {
272 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { 272 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
273 en_err(priv, "Failed to allocate " 273 en_err(priv, "Failed to allocate enough rx buffers\n");
274 "enough rx buffers\n");
275 return -ENOMEM; 274 return -ENOMEM;
276 } else { 275 } else {
277 new_size = rounddown_pow_of_two(ring->actual_size); 276 new_size = rounddown_pow_of_two(ring->actual_size);
278 en_warn(priv, "Only %d buffers allocated " 277 en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
279 "reducing ring size to %d",
280 ring->actual_size, new_size); 278 ring->actual_size, new_size);
281 goto reduce_rings; 279 goto reduce_rings;
282 } 280 }
@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
685 /* Drop packet on bad receive or bad checksum */ 683 /* Drop packet on bad receive or bad checksum */
686 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 684 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
687 MLX4_CQE_OPCODE_ERROR)) { 685 MLX4_CQE_OPCODE_ERROR)) {
688 en_err(priv, "CQE completed in error - vendor " 686 en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
689 "syndrom:%d syndrom:%d\n", 687 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
690 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, 688 ((struct mlx4_err_cqe *)cqe)->syndrome);
691 ((struct mlx4_err_cqe *) cqe)->syndrome);
692 goto next; 689 goto next;
693 } 690 }
694 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 691 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
@@ -944,8 +941,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
944 priv->rx_skb_size = eff_mtu; 941 priv->rx_skb_size = eff_mtu;
945 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); 942 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
946 943
947 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " 944 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
948 "num_frags:%d):\n", eff_mtu, priv->num_frags); 945 eff_mtu, priv->num_frags);
949 for (i = 0; i < priv->num_frags; i++) { 946 for (i = 0; i < priv->num_frags; i++) {
950 en_err(priv, 947 en_err(priv,
951 " frag:%d - size:%d prefix:%d align:%d stride:%d\n", 948 " frag:%d - size:%d prefix:%d align:%d stride:%d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index dd1f6d346459..89585c6311c3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -108,9 +108,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
108 108
109 ring->buf = ring->wqres.buf.direct.buf; 109 ring->buf = ring->wqres.buf.direct.buf;
110 110
111 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 111 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
112 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 112 ring, ring->buf, ring->size, ring->buf_size,
113 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 113 (unsigned long long) ring->wqres.buf.direct.map);
114 114
115 ring->qpn = qpn; 115 ring->qpn = qpn;
116 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 116 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
122 122
123 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); 123 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
124 if (err) { 124 if (err) {
125 en_dbg(DRV, priv, "working without blueflame (%d)", err); 125 en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
126 ring->bf.uar = &mdev->priv_uar; 126 ring->bf.uar = &mdev->priv_uar;
127 ring->bf.uar->map = mdev->uar_map; 127 ring->bf.uar->map = mdev->uar_map;
128 ring->bf_enabled = false; 128 ring->bf_enabled = false;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d501a2b0fb79..6c088bc1845b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -152,14 +152,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
152 if (i != dev->caps.function && 152 if (i != dev->caps.function &&
153 master->slave_state[i].active) 153 master->slave_state[i].active)
154 if (mlx4_GEN_EQE(dev, i, eqe)) 154 if (mlx4_GEN_EQE(dev, i, eqe))
155 mlx4_warn(dev, "Failed to " 155 mlx4_warn(dev, "Failed to generate event for slave %d\n",
156 " generate event " 156 i);
157 "for slave %d\n", i);
158 } 157 }
159 } else { 158 } else {
160 if (mlx4_GEN_EQE(dev, slave, eqe)) 159 if (mlx4_GEN_EQE(dev, slave, eqe))
161 mlx4_warn(dev, "Failed to generate event " 160 mlx4_warn(dev, "Failed to generate event for slave %d\n",
162 "for slave %d\n", slave); 161 slave);
163 } 162 }
164 ++slave_eq->cons; 163 ++slave_eq->cons;
165 } 164 }
@@ -177,8 +176,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
177 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; 176 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
178 if ((!!(s_eqe->owner & 0x80)) ^ 177 if ((!!(s_eqe->owner & 0x80)) ^
179 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { 178 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
180 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. " 179 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
181 "No free EQE on slave events queue\n", slave); 180 slave);
182 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 181 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
183 return; 182 return;
184 } 183 }
@@ -375,9 +374,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
375 } 374 }
376 break; 375 break;
377 default: 376 default:
378 pr_err("%s: BUG!!! UNKNOWN state: " 377 pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
379 "slave:%d, port:%d\n", __func__, slave, port); 378 __func__, slave, port);
380 goto out; 379 goto out;
381 } 380 }
382 ret = mlx4_get_slave_port_state(dev, slave, port); 381 ret = mlx4_get_slave_port_state(dev, slave, port);
383 382
@@ -425,8 +424,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
425 for (i = 0 ; i < dev->num_slaves; i++) { 424 for (i = 0 ; i < dev->num_slaves; i++) {
426 425
427 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { 426 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
428 mlx4_dbg(dev, "mlx4_handle_slave_flr: " 427 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
429 "clean slave: %d\n", i); 428 i);
430 429
431 mlx4_delete_all_resources_for_slave(dev, i); 430 mlx4_delete_all_resources_for_slave(dev, i);
432 /*return the slave to running mode*/ 431 /*return the slave to running mode*/
@@ -438,8 +437,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
438 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, 437 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
439 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 438 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
440 if (err) 439 if (err)
441 mlx4_warn(dev, "Failed to notify FW on " 440 mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
442 "FLR done (slave:%d)\n", i); 441 i);
443 } 442 }
444 } 443 }
445} 444}
@@ -490,9 +489,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
490 be32_to_cpu(eqe->event.qp.qpn) 489 be32_to_cpu(eqe->event.qp.qpn)
491 & 0xffffff, &slave); 490 & 0xffffff, &slave);
492 if (ret && ret != -ENOENT) { 491 if (ret && ret != -ENOENT) {
493 mlx4_dbg(dev, "QP event %02x(%02x) on " 492 mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
494 "EQ %d at index %u: could "
495 "not get slave id (%d)\n",
496 eqe->type, eqe->subtype, 493 eqe->type, eqe->subtype,
497 eq->eqn, eq->cons_index, ret); 494 eq->eqn, eq->cons_index, ret);
498 break; 495 break;
@@ -520,23 +517,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
520 & 0xffffff, 517 & 0xffffff,
521 &slave); 518 &slave);
522 if (ret && ret != -ENOENT) { 519 if (ret && ret != -ENOENT) {
523 mlx4_warn(dev, "SRQ event %02x(%02x) " 520 mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
524 "on EQ %d at index %u: could"
525 " not get slave id (%d)\n",
526 eqe->type, eqe->subtype, 521 eqe->type, eqe->subtype,
527 eq->eqn, eq->cons_index, ret); 522 eq->eqn, eq->cons_index, ret);
528 break; 523 break;
529 } 524 }
530 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x," 525 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
531 " event: %02x(%02x)\n", __func__, 526 __func__, slave,
532 slave,
533 be32_to_cpu(eqe->event.srq.srqn), 527 be32_to_cpu(eqe->event.srq.srqn),
534 eqe->type, eqe->subtype); 528 eqe->type, eqe->subtype);
535 529
536 if (!ret && slave != dev->caps.function) { 530 if (!ret && slave != dev->caps.function) {
537 mlx4_warn(dev, "%s: sending event " 531 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
538 "%02x(%02x) to slave:%d\n", 532 __func__, eqe->type,
539 __func__, eqe->type,
540 eqe->subtype, slave); 533 eqe->subtype, slave);
541 mlx4_slave_event(dev, slave, eqe); 534 mlx4_slave_event(dev, slave, eqe);
542 break; 535 break;
@@ -569,8 +562,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
569 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 562 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
570 if (i == mlx4_master_func_num(dev)) 563 if (i == mlx4_master_func_num(dev))
571 continue; 564 continue;
572 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" 565 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
573 " to slave: %d, port:%d\n",
574 __func__, i, port); 566 __func__, i, port);
575 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 567 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
576 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 568 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
@@ -634,11 +626,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
634 be32_to_cpu(eqe->event.cq_err.cqn) 626 be32_to_cpu(eqe->event.cq_err.cqn)
635 & 0xffffff, &slave); 627 & 0xffffff, &slave);
636 if (ret && ret != -ENOENT) { 628 if (ret && ret != -ENOENT) {
637 mlx4_dbg(dev, "CQ event %02x(%02x) on " 629 mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
638 "EQ %d at index %u: could " 630 eqe->type, eqe->subtype,
639 "not get slave id (%d)\n", 631 eq->eqn, eq->cons_index, ret);
640 eqe->type, eqe->subtype,
641 eq->eqn, eq->cons_index, ret);
642 break; 632 break;
643 } 633 }
644 634
@@ -667,8 +657,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
667 657
668 case MLX4_EVENT_TYPE_COMM_CHANNEL: 658 case MLX4_EVENT_TYPE_COMM_CHANNEL:
669 if (!mlx4_is_master(dev)) { 659 if (!mlx4_is_master(dev)) {
670 mlx4_warn(dev, "Received comm channel event " 660 mlx4_warn(dev, "Received comm channel event for non master device\n");
671 "for non master device\n");
672 break; 661 break;
673 } 662 }
674 memcpy(&priv->mfunc.master.comm_arm_bit_vector, 663 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
@@ -681,8 +670,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
681 case MLX4_EVENT_TYPE_FLR_EVENT: 670 case MLX4_EVENT_TYPE_FLR_EVENT:
682 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); 671 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
683 if (!mlx4_is_master(dev)) { 672 if (!mlx4_is_master(dev)) {
684 mlx4_warn(dev, "Non-master function received" 673 mlx4_warn(dev, "Non-master function received FLR event\n");
685 "FLR event\n");
686 break; 674 break;
687 } 675 }
688 676
@@ -711,22 +699,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
711 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { 699 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
712 if (mlx4_is_master(dev)) 700 if (mlx4_is_master(dev))
713 for (i = 0; i < dev->num_slaves; i++) { 701 for (i = 0; i < dev->num_slaves; i++) {
714 mlx4_dbg(dev, "%s: Sending " 702 mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
715 "MLX4_FATAL_WARNING_SUBTYPE_WARMING" 703 __func__, i);
716 " to slave: %d\n", __func__, i);
717 if (i == dev->caps.function) 704 if (i == dev->caps.function)
718 continue; 705 continue;
719 mlx4_slave_event(dev, i, eqe); 706 mlx4_slave_event(dev, i, eqe);
720 } 707 }
721 mlx4_err(dev, "Temperature Threshold was reached! " 708 mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
722 "Threshold: %d celsius degrees; " 709 be16_to_cpu(eqe->event.warming.warning_threshold),
723 "Current Temperature: %d\n", 710 be16_to_cpu(eqe->event.warming.current_temperature));
724 be16_to_cpu(eqe->event.warming.warning_threshold),
725 be16_to_cpu(eqe->event.warming.current_temperature));
726 } else 711 } else
727 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), " 712 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
728 "subtype %02x on EQ %d at index %u. owner=%x, "
729 "nent=0x%x, slave=%x, ownership=%s\n",
730 eqe->type, eqe->subtype, eq->eqn, 713 eqe->type, eqe->subtype, eq->eqn,
731 eq->cons_index, eqe->owner, eq->nent, 714 eq->cons_index, eqe->owner, eq->nent,
732 eqe->slave_id, 715 eqe->slave_id,
@@ -743,9 +726,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
743 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 726 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
744 case MLX4_EVENT_TYPE_ECC_DETECT: 727 case MLX4_EVENT_TYPE_ECC_DETECT:
745 default: 728 default:
746 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at " 729 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
747 "index %u. owner=%x, nent=0x%x, slave=%x, "
748 "ownership=%s\n",
749 eqe->type, eqe->subtype, eq->eqn, 730 eqe->type, eqe->subtype, eq->eqn,
750 eq->cons_index, eqe->owner, eq->nent, 731 eq->cons_index, eqe->owner, eq->nent,
751 eqe->slave_id, 732 eqe->slave_id,
@@ -1088,7 +1069,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
1088 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + 1069 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
1089 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 1070 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
1090 if (!priv->clr_base) { 1071 if (!priv->clr_base) {
1091 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); 1072 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
1092 return -ENOMEM; 1073 return -ENOMEM;
1093 } 1074 }
1094 1075
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index d16a4d118903..c52e04891317 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -428,8 +428,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
428 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { 428 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
429 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 429 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
430 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { 430 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
431 mlx4_err(dev, "phy_wqe_gid is " 431 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
432 "enforced on this ib port\n");
433 err = -EPROTONOSUPPORT; 432 err = -EPROTONOSUPPORT;
434 goto out; 433 goto out;
435 } 434 }
@@ -1054,10 +1053,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1054 */ 1053 */
1055 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; 1054 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1056 if (lg < MLX4_ICM_PAGE_SHIFT) { 1055 if (lg < MLX4_ICM_PAGE_SHIFT) {
1057 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", 1056 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1058 MLX4_ICM_PAGE_SIZE, 1057 MLX4_ICM_PAGE_SIZE,
1059 (unsigned long long) mlx4_icm_addr(&iter), 1058 (unsigned long long) mlx4_icm_addr(&iter),
1060 mlx4_icm_size(&iter)); 1059 mlx4_icm_size(&iter));
1061 err = -EINVAL; 1060 err = -EINVAL;
1062 goto out; 1061 goto out;
1063 } 1062 }
@@ -1093,14 +1092,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1093 1092
1094 switch (op) { 1093 switch (op) {
1095 case MLX4_CMD_MAP_FA: 1094 case MLX4_CMD_MAP_FA:
1096 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); 1095 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1097 break; 1096 break;
1098 case MLX4_CMD_MAP_ICM_AUX: 1097 case MLX4_CMD_MAP_ICM_AUX:
1099 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); 1098 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1100 break; 1099 break;
1101 case MLX4_CMD_MAP_ICM: 1100 case MLX4_CMD_MAP_ICM:
1102 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", 1101 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1103 tc, ts, (unsigned long long) virt - (ts << 10)); 1102 tc, ts, (unsigned long long) virt - (ts << 10));
1104 break; 1103 break;
1105 } 1104 }
1106 1105
@@ -1186,14 +1185,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
1186 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 1185 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1187 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || 1186 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1188 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { 1187 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1189 mlx4_err(dev, "Installed FW has unsupported " 1188 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1190 "command interface revision %d.\n",
1191 cmd_if_rev); 1189 cmd_if_rev);
1192 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", 1190 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1193 (int) (dev->caps.fw_ver >> 32), 1191 (int) (dev->caps.fw_ver >> 32),
1194 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1192 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1195 (int) dev->caps.fw_ver & 0xffff); 1193 (int) dev->caps.fw_ver & 0xffff);
1196 mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", 1194 mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1197 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); 1195 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1198 err = -ENODEV; 1196 err = -ENODEV;
1199 goto out; 1197 goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index cef267e24f9c..df2c1fbf75ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -163,8 +163,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
163 for (i = 0; i < dev->caps.num_ports - 1; i++) { 163 for (i = 0; i < dev->caps.num_ports - 1; i++) {
164 if (port_type[i] != port_type[i + 1]) { 164 if (port_type[i] != port_type[i + 1]) {
165 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 165 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
166 mlx4_err(dev, "Only same port types supported " 166 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
167 "on this HCA, aborting.\n");
168 return -EINVAL; 167 return -EINVAL;
169 } 168 }
170 } 169 }
@@ -172,8 +171,8 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
172 171
173 for (i = 0; i < dev->caps.num_ports; i++) { 172 for (i = 0; i < dev->caps.num_ports; i++) {
174 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 173 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
175 mlx4_err(dev, "Requested port type for port %d is not " 174 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
176 "supported on this HCA\n", i + 1); 175 i + 1);
177 return -EINVAL; 176 return -EINVAL;
178 } 177 }
179 } 178 }
@@ -195,26 +194,23 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
195 194
196 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 195 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
197 if (err) { 196 if (err) {
198 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 197 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
199 return err; 198 return err;
200 } 199 }
201 200
202 if (dev_cap->min_page_sz > PAGE_SIZE) { 201 if (dev_cap->min_page_sz > PAGE_SIZE) {
203 mlx4_err(dev, "HCA minimum page size of %d bigger than " 202 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
204 "kernel PAGE_SIZE of %ld, aborting.\n",
205 dev_cap->min_page_sz, PAGE_SIZE); 203 dev_cap->min_page_sz, PAGE_SIZE);
206 return -ENODEV; 204 return -ENODEV;
207 } 205 }
208 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 206 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
209 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 207 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
210 "aborting.\n",
211 dev_cap->num_ports, MLX4_MAX_PORTS); 208 dev_cap->num_ports, MLX4_MAX_PORTS);
212 return -ENODEV; 209 return -ENODEV;
213 } 210 }
214 211
215 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 212 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
216 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 213 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
217 "PCI resource 2 size of 0x%llx, aborting.\n",
218 dev_cap->uar_size, 214 dev_cap->uar_size,
219 (unsigned long long) pci_resource_len(dev->pdev, 2)); 215 (unsigned long long) pci_resource_len(dev->pdev, 2));
220 return -ENODEV; 216 return -ENODEV;
@@ -347,14 +343,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
347 343
348 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 344 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
349 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 345 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
350 mlx4_warn(dev, "Requested number of MACs is too much " 346 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
351 "for port %d, reducing to %d.\n",
352 i, 1 << dev->caps.log_num_macs); 347 i, 1 << dev->caps.log_num_macs);
353 } 348 }
354 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { 349 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
355 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; 350 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
356 mlx4_warn(dev, "Requested number of VLANs is too much " 351 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
357 "for port %d, reducing to %d.\n",
358 i, 1 << dev->caps.log_num_vlans); 352 i, 1 << dev->caps.log_num_vlans);
359 } 353 }
360 } 354 }
@@ -584,7 +578,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
584 memset(&hca_param, 0, sizeof(hca_param)); 578 memset(&hca_param, 0, sizeof(hca_param));
585 err = mlx4_QUERY_HCA(dev, &hca_param); 579 err = mlx4_QUERY_HCA(dev, &hca_param);
586 if (err) { 580 if (err) {
587 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); 581 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
588 return err; 582 return err;
589 } 583 }
590 584
@@ -603,19 +597,18 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
603 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 597 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
604 err = mlx4_dev_cap(dev, &dev_cap); 598 err = mlx4_dev_cap(dev, &dev_cap);
605 if (err) { 599 if (err) {
606 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 600 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
607 return err; 601 return err;
608 } 602 }
609 603
610 err = mlx4_QUERY_FW(dev); 604 err = mlx4_QUERY_FW(dev);
611 if (err) 605 if (err)
612 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); 606 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
613 607
614 page_size = ~dev->caps.page_size_cap + 1; 608 page_size = ~dev->caps.page_size_cap + 1;
615 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 609 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
616 if (page_size > PAGE_SIZE) { 610 if (page_size > PAGE_SIZE) {
617 mlx4_err(dev, "HCA minimum page size of %d bigger than " 611 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
618 "kernel PAGE_SIZE of %ld, aborting.\n",
619 page_size, PAGE_SIZE); 612 page_size, PAGE_SIZE);
620 return -ENODEV; 613 return -ENODEV;
621 } 614 }
@@ -633,8 +626,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
633 memset(&func_cap, 0, sizeof(func_cap)); 626 memset(&func_cap, 0, sizeof(func_cap));
634 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 627 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
635 if (err) { 628 if (err) {
636 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n", 629 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
637 err); 630 err);
638 return err; 631 return err;
639 } 632 }
640 633
@@ -661,8 +654,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
661 dev->caps.num_amgms = 0; 654 dev->caps.num_amgms = 0;
662 655
663 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 656 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
664 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 657 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
665 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 658 dev->caps.num_ports, MLX4_MAX_PORTS);
666 return -ENODEV; 659 return -ENODEV;
667 } 660 }
668 661
@@ -680,8 +673,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
680 for (i = 1; i <= dev->caps.num_ports; ++i) { 673 for (i = 1; i <= dev->caps.num_ports; ++i) {
681 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); 674 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
682 if (err) { 675 if (err) {
683 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for" 676 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
684 " port %d, aborting (%d).\n", i, err); 677 i, err);
685 goto err_mem; 678 goto err_mem;
686 } 679 }
687 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 680 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
@@ -699,8 +692,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
699 if (dev->caps.uar_page_size * (dev->caps.num_uars - 692 if (dev->caps.uar_page_size * (dev->caps.num_uars -
700 dev->caps.reserved_uars) > 693 dev->caps.reserved_uars) >
701 pci_resource_len(dev->pdev, 2)) { 694 pci_resource_len(dev->pdev, 2)) {
702 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " 695 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
703 "PCI resource 2 size of 0x%llx, aborting.\n",
704 dev->caps.uar_page_size * dev->caps.num_uars, 696 dev->caps.uar_page_size * dev->caps.num_uars,
705 (unsigned long long) pci_resource_len(dev->pdev, 2)); 697 (unsigned long long) pci_resource_len(dev->pdev, 2));
706 goto err_mem; 698 goto err_mem;
@@ -722,7 +714,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
722 } 714 }
723 715
724 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 716 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
725 mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); 717 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
726 718
727 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 719 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
728 720
@@ -784,8 +776,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
784 dev->caps.port_type[port] = port_types[port - 1]; 776 dev->caps.port_type[port] = port_types[port - 1];
785 err = mlx4_SET_PORT(dev, port, -1); 777 err = mlx4_SET_PORT(dev, port, -1);
786 if (err) { 778 if (err) {
787 mlx4_err(dev, "Failed to set port %d, " 779 mlx4_err(dev, "Failed to set port %d, aborting\n",
788 "aborting\n", port); 780 port);
789 goto out; 781 goto out;
790 } 782 }
791 } 783 }
@@ -868,9 +860,7 @@ static ssize_t set_port_type(struct device *dev,
868 } 860 }
869 } 861 }
870 if (err) { 862 if (err) {
871 mlx4_err(mdev, "Auto sensing is not supported on this HCA. " 863 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
872 "Set only 'eth' or 'ib' for both ports "
873 "(should be the same)\n");
874 goto out; 864 goto out;
875 } 865 }
876 866
@@ -975,8 +965,8 @@ static ssize_t set_port_ib_mtu(struct device *dev,
975 mlx4_CLOSE_PORT(mdev, port); 965 mlx4_CLOSE_PORT(mdev, port);
976 err = mlx4_SET_PORT(mdev, port, -1); 966 err = mlx4_SET_PORT(mdev, port, -1);
977 if (err) { 967 if (err) {
978 mlx4_err(mdev, "Failed to set port %d, " 968 mlx4_err(mdev, "Failed to set port %d, aborting\n",
979 "aborting\n", port); 969 port);
980 goto err_set_port; 970 goto err_set_port;
981 } 971 }
982 } 972 }
@@ -995,19 +985,19 @@ static int mlx4_load_fw(struct mlx4_dev *dev)
995 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 985 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
996 GFP_HIGHUSER | __GFP_NOWARN, 0); 986 GFP_HIGHUSER | __GFP_NOWARN, 0);
997 if (!priv->fw.fw_icm) { 987 if (!priv->fw.fw_icm) {
998 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 988 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
999 return -ENOMEM; 989 return -ENOMEM;
1000 } 990 }
1001 991
1002 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 992 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1003 if (err) { 993 if (err) {
1004 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 994 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1005 goto err_free; 995 goto err_free;
1006 } 996 }
1007 997
1008 err = mlx4_RUN_FW(dev); 998 err = mlx4_RUN_FW(dev);
1009 if (err) { 999 if (err) {
1010 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 1000 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1011 goto err_unmap_fa; 1001 goto err_unmap_fa;
1012 } 1002 }
1013 1003
@@ -1091,30 +1081,30 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1091 1081
1092 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1082 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1093 if (err) { 1083 if (err) {
1094 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 1084 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1095 return err; 1085 return err;
1096 } 1086 }
1097 1087
1098 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 1088 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1099 (unsigned long long) icm_size >> 10, 1089 (unsigned long long) icm_size >> 10,
1100 (unsigned long long) aux_pages << 2); 1090 (unsigned long long) aux_pages << 2);
1101 1091
1102 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1092 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1103 GFP_HIGHUSER | __GFP_NOWARN, 0); 1093 GFP_HIGHUSER | __GFP_NOWARN, 0);
1104 if (!priv->fw.aux_icm) { 1094 if (!priv->fw.aux_icm) {
1105 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 1095 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1106 return -ENOMEM; 1096 return -ENOMEM;
1107 } 1097 }
1108 1098
1109 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1099 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1110 if (err) { 1100 if (err) {
1111 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 1101 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1112 goto err_free_aux; 1102 goto err_free_aux;
1113 } 1103 }
1114 1104
1115 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1105 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1116 if (err) { 1106 if (err) {
1117 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 1107 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1118 goto err_unmap_aux; 1108 goto err_unmap_aux;
1119 } 1109 }
1120 1110
@@ -1125,7 +1115,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1125 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1115 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1126 num_eqs, num_eqs, 0, 0); 1116 num_eqs, num_eqs, 0, 0);
1127 if (err) { 1117 if (err) {
1128 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 1118 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1129 goto err_unmap_cmpt; 1119 goto err_unmap_cmpt;
1130 } 1120 }
1131 1121
@@ -1146,7 +1136,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1146 dev->caps.num_mtts, 1136 dev->caps.num_mtts,
1147 dev->caps.reserved_mtts, 1, 0); 1137 dev->caps.reserved_mtts, 1, 0);
1148 if (err) { 1138 if (err) {
1149 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 1139 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1150 goto err_unmap_eq; 1140 goto err_unmap_eq;
1151 } 1141 }
1152 1142
@@ -1156,7 +1146,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1156 dev->caps.num_mpts, 1146 dev->caps.num_mpts,
1157 dev->caps.reserved_mrws, 1, 1); 1147 dev->caps.reserved_mrws, 1, 1);
1158 if (err) { 1148 if (err) {
1159 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 1149 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1160 goto err_unmap_mtt; 1150 goto err_unmap_mtt;
1161 } 1151 }
1162 1152
@@ -1167,7 +1157,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1167 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1157 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1168 0, 0); 1158 0, 0);
1169 if (err) { 1159 if (err) {
1170 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 1160 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1171 goto err_unmap_dmpt; 1161 goto err_unmap_dmpt;
1172 } 1162 }
1173 1163
@@ -1178,7 +1168,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1178 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1168 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1179 0, 0); 1169 0, 0);
1180 if (err) { 1170 if (err) {
1181 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 1171 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1182 goto err_unmap_qp; 1172 goto err_unmap_qp;
1183 } 1173 }
1184 1174
@@ -1189,7 +1179,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1189 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1179 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1190 0, 0); 1180 0, 0);
1191 if (err) { 1181 if (err) {
1192 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 1182 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1193 goto err_unmap_auxc; 1183 goto err_unmap_auxc;
1194 } 1184 }
1195 1185
@@ -1210,7 +1200,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1210 dev->caps.num_cqs, 1200 dev->caps.num_cqs,
1211 dev->caps.reserved_cqs, 0, 0); 1201 dev->caps.reserved_cqs, 0, 0);
1212 if (err) { 1202 if (err) {
1213 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 1203 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1214 goto err_unmap_rdmarc; 1204 goto err_unmap_rdmarc;
1215 } 1205 }
1216 1206
@@ -1220,7 +1210,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1220 dev->caps.num_srqs, 1210 dev->caps.num_srqs,
1221 dev->caps.reserved_srqs, 0, 0); 1211 dev->caps.reserved_srqs, 0, 0);
1222 if (err) { 1212 if (err) {
1223 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 1213 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1224 goto err_unmap_cq; 1214 goto err_unmap_cq;
1225 } 1215 }
1226 1216
@@ -1238,7 +1228,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1238 dev->caps.num_mgms + dev->caps.num_amgms, 1228 dev->caps.num_mgms + dev->caps.num_amgms,
1239 0, 0); 1229 0, 0);
1240 if (err) { 1230 if (err) {
1241 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 1231 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1242 goto err_unmap_srq; 1232 goto err_unmap_srq;
1243 } 1233 }
1244 1234
@@ -1315,7 +1305,7 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
1315 1305
1316 mutex_lock(&priv->cmd.slave_cmd_mutex); 1306 mutex_lock(&priv->cmd.slave_cmd_mutex);
1317 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1307 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1318 mlx4_warn(dev, "Failed to close slave function.\n"); 1308 mlx4_warn(dev, "Failed to close slave function\n");
1319 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1309 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1320} 1310}
1321 1311
@@ -1413,7 +1403,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1413 u32 cmd_channel_ver; 1403 u32 cmd_channel_ver;
1414 1404
1415 if (atomic_read(&pf_loading)) { 1405 if (atomic_read(&pf_loading)) {
1416 mlx4_warn(dev, "PF is not ready. Deferring probe\n"); 1406 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1417 return -EPROBE_DEFER; 1407 return -EPROBE_DEFER;
1418 } 1408 }
1419 1409
@@ -1426,8 +1416,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1426 * NUM_OF_RESET_RETRIES times before leaving.*/ 1416 * NUM_OF_RESET_RETRIES times before leaving.*/
1427 if (ret_from_reset) { 1417 if (ret_from_reset) {
1428 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1418 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1429 mlx4_warn(dev, "slave is currently in the " 1419 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
1430 "middle of FLR. Deferring probe.\n");
1431 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1420 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1432 return -EPROBE_DEFER; 1421 return -EPROBE_DEFER;
1433 } else 1422 } else
@@ -1441,8 +1430,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1441 1430
1442 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1431 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1443 MLX4_COMM_GET_IF_REV(slave_read)) { 1432 MLX4_COMM_GET_IF_REV(slave_read)) {
1444 mlx4_err(dev, "slave driver version is not supported" 1433 mlx4_err(dev, "slave driver version is not supported by the master\n");
1445 " by the master\n");
1446 goto err; 1434 goto err;
1447 } 1435 }
1448 1436
@@ -1520,8 +1508,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1520 1508
1521 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 1509 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1522 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1510 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1523 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags " 1511 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1524 "set to use B0 steering. Falling back to A0 steering mode.\n");
1525 } 1512 }
1526 dev->oper_log_mgm_entry_size = 1513 dev->oper_log_mgm_entry_size =
1527 mlx4_log_num_mgm_entry_size > 0 ? 1514 mlx4_log_num_mgm_entry_size > 0 ?
@@ -1529,8 +1516,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1529 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 1516 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1530 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 1517 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1531 } 1518 }
1532 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, " 1519 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1533 "modparam log_num_mgm_entry_size = %d\n",
1534 mlx4_steering_mode_str(dev->caps.steering_mode), 1520 mlx4_steering_mode_str(dev->caps.steering_mode),
1535 dev->oper_log_mgm_entry_size, 1521 dev->oper_log_mgm_entry_size,
1536 mlx4_log_num_mgm_entry_size); 1522 mlx4_log_num_mgm_entry_size);
@@ -1564,15 +1550,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1564 err = mlx4_QUERY_FW(dev); 1550 err = mlx4_QUERY_FW(dev);
1565 if (err) { 1551 if (err) {
1566 if (err == -EACCES) 1552 if (err == -EACCES)
1567 mlx4_info(dev, "non-primary physical function, skipping.\n"); 1553 mlx4_info(dev, "non-primary physical function, skipping\n");
1568 else 1554 else
1569 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 1555 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
1570 return err; 1556 return err;
1571 } 1557 }
1572 1558
1573 err = mlx4_load_fw(dev); 1559 err = mlx4_load_fw(dev);
1574 if (err) { 1560 if (err) {
1575 mlx4_err(dev, "Failed to start FW, aborting.\n"); 1561 mlx4_err(dev, "Failed to start FW, aborting\n");
1576 return err; 1562 return err;
1577 } 1563 }
1578 1564
@@ -1584,7 +1570,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1584 1570
1585 err = mlx4_dev_cap(dev, &dev_cap); 1571 err = mlx4_dev_cap(dev, &dev_cap);
1586 if (err) { 1572 if (err) {
1587 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 1573 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
1588 goto err_stop_fw; 1574 goto err_stop_fw;
1589 } 1575 }
1590 1576
@@ -1625,7 +1611,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1625 1611
1626 err = mlx4_INIT_HCA(dev, &init_hca); 1612 err = mlx4_INIT_HCA(dev, &init_hca);
1627 if (err) { 1613 if (err) {
1628 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 1614 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
1629 goto err_free_icm; 1615 goto err_free_icm;
1630 } 1616 }
1631 /* 1617 /*
@@ -1636,7 +1622,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1636 memset(&init_hca, 0, sizeof(init_hca)); 1622 memset(&init_hca, 0, sizeof(init_hca));
1637 err = mlx4_QUERY_HCA(dev, &init_hca); 1623 err = mlx4_QUERY_HCA(dev, &init_hca);
1638 if (err) { 1624 if (err) {
1639 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n"); 1625 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
1640 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1626 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1641 } else { 1627 } else {
1642 dev->caps.hca_core_clock = 1628 dev->caps.hca_core_clock =
@@ -1649,14 +1635,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1649 if (!dev->caps.hca_core_clock) { 1635 if (!dev->caps.hca_core_clock) {
1650 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1636 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1651 mlx4_err(dev, 1637 mlx4_err(dev,
1652 "HCA frequency is 0. Timestamping is not supported."); 1638 "HCA frequency is 0 - timestamping is not supported\n");
1653 } else if (map_internal_clock(dev)) { 1639 } else if (map_internal_clock(dev)) {
1654 /* 1640 /*
1655 * Map internal clock, 1641 * Map internal clock,
1656 * in case of failure disable timestamping 1642 * in case of failure disable timestamping
1657 */ 1643 */
1658 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 1644 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1659 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n"); 1645 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
1660 } 1646 }
1661 } 1647 }
1662 } else { 1648 } else {
@@ -1683,7 +1669,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1683 1669
1684 err = mlx4_QUERY_ADAPTER(dev, &adapter); 1670 err = mlx4_QUERY_ADAPTER(dev, &adapter);
1685 if (err) { 1671 if (err) {
1686 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 1672 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
1687 goto unmap_bf; 1673 goto unmap_bf;
1688 } 1674 }
1689 1675
@@ -1793,79 +1779,69 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1793 1779
1794 err = mlx4_init_uar_table(dev); 1780 err = mlx4_init_uar_table(dev);
1795 if (err) { 1781 if (err) {
1796 mlx4_err(dev, "Failed to initialize " 1782 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
1797 "user access region table, aborting.\n"); 1783 return err;
1798 return err;
1799 } 1784 }
1800 1785
1801 err = mlx4_uar_alloc(dev, &priv->driver_uar); 1786 err = mlx4_uar_alloc(dev, &priv->driver_uar);
1802 if (err) { 1787 if (err) {
1803 mlx4_err(dev, "Failed to allocate driver access region, " 1788 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
1804 "aborting.\n");
1805 goto err_uar_table_free; 1789 goto err_uar_table_free;
1806 } 1790 }
1807 1791
1808 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1792 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1809 if (!priv->kar) { 1793 if (!priv->kar) {
1810 mlx4_err(dev, "Couldn't map kernel access region, " 1794 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
1811 "aborting.\n");
1812 err = -ENOMEM; 1795 err = -ENOMEM;
1813 goto err_uar_free; 1796 goto err_uar_free;
1814 } 1797 }
1815 1798
1816 err = mlx4_init_pd_table(dev); 1799 err = mlx4_init_pd_table(dev);
1817 if (err) { 1800 if (err) {
1818 mlx4_err(dev, "Failed to initialize " 1801 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
1819 "protection domain table, aborting.\n");
1820 goto err_kar_unmap; 1802 goto err_kar_unmap;
1821 } 1803 }
1822 1804
1823 err = mlx4_init_xrcd_table(dev); 1805 err = mlx4_init_xrcd_table(dev);
1824 if (err) { 1806 if (err) {
1825 mlx4_err(dev, "Failed to initialize " 1807 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
1826 "reliable connection domain table, aborting.\n");
1827 goto err_pd_table_free; 1808 goto err_pd_table_free;
1828 } 1809 }
1829 1810
1830 err = mlx4_init_mr_table(dev); 1811 err = mlx4_init_mr_table(dev);
1831 if (err) { 1812 if (err) {
1832 mlx4_err(dev, "Failed to initialize " 1813 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
1833 "memory region table, aborting.\n");
1834 goto err_xrcd_table_free; 1814 goto err_xrcd_table_free;
1835 } 1815 }
1836 1816
1837 if (!mlx4_is_slave(dev)) { 1817 if (!mlx4_is_slave(dev)) {
1838 err = mlx4_init_mcg_table(dev); 1818 err = mlx4_init_mcg_table(dev);
1839 if (err) { 1819 if (err) {
1840 mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n"); 1820 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
1841 goto err_mr_table_free; 1821 goto err_mr_table_free;
1842 } 1822 }
1843 } 1823 }
1844 1824
1845 err = mlx4_init_eq_table(dev); 1825 err = mlx4_init_eq_table(dev);
1846 if (err) { 1826 if (err) {
1847 mlx4_err(dev, "Failed to initialize " 1827 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
1848 "event queue table, aborting.\n");
1849 goto err_mcg_table_free; 1828 goto err_mcg_table_free;
1850 } 1829 }
1851 1830
1852 err = mlx4_cmd_use_events(dev); 1831 err = mlx4_cmd_use_events(dev);
1853 if (err) { 1832 if (err) {
1854 mlx4_err(dev, "Failed to switch to event-driven " 1833 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
1855 "firmware commands, aborting.\n");
1856 goto err_eq_table_free; 1834 goto err_eq_table_free;
1857 } 1835 }
1858 1836
1859 err = mlx4_NOP(dev); 1837 err = mlx4_NOP(dev);
1860 if (err) { 1838 if (err) {
1861 if (dev->flags & MLX4_FLAG_MSI_X) { 1839 if (dev->flags & MLX4_FLAG_MSI_X) {
1862 mlx4_warn(dev, "NOP command failed to generate MSI-X " 1840 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
1863 "interrupt IRQ %d).\n",
1864 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1841 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1865 mlx4_warn(dev, "Trying again without MSI-X.\n"); 1842 mlx4_warn(dev, "Trying again without MSI-X\n");
1866 } else { 1843 } else {
1867 mlx4_err(dev, "NOP command failed to generate interrupt " 1844 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
1868 "(IRQ %d), aborting.\n",
1869 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1845 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1870 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 1846 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
1871 } 1847 }
@@ -1877,28 +1853,25 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1877 1853
1878 err = mlx4_init_cq_table(dev); 1854 err = mlx4_init_cq_table(dev);
1879 if (err) { 1855 if (err) {
1880 mlx4_err(dev, "Failed to initialize " 1856 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
1881 "completion queue table, aborting.\n");
1882 goto err_cmd_poll; 1857 goto err_cmd_poll;
1883 } 1858 }
1884 1859
1885 err = mlx4_init_srq_table(dev); 1860 err = mlx4_init_srq_table(dev);
1886 if (err) { 1861 if (err) {
1887 mlx4_err(dev, "Failed to initialize " 1862 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
1888 "shared receive queue table, aborting.\n");
1889 goto err_cq_table_free; 1863 goto err_cq_table_free;
1890 } 1864 }
1891 1865
1892 err = mlx4_init_qp_table(dev); 1866 err = mlx4_init_qp_table(dev);
1893 if (err) { 1867 if (err) {
1894 mlx4_err(dev, "Failed to initialize " 1868 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
1895 "queue pair table, aborting.\n");
1896 goto err_srq_table_free; 1869 goto err_srq_table_free;
1897 } 1870 }
1898 1871
1899 err = mlx4_init_counters_table(dev); 1872 err = mlx4_init_counters_table(dev);
1900 if (err && err != -ENOENT) { 1873 if (err && err != -ENOENT) {
1901 mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); 1874 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
1902 goto err_qp_table_free; 1875 goto err_qp_table_free;
1903 } 1876 }
1904 1877
@@ -1908,9 +1881,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1908 err = mlx4_get_port_ib_caps(dev, port, 1881 err = mlx4_get_port_ib_caps(dev, port,
1909 &ib_port_default_caps); 1882 &ib_port_default_caps);
1910 if (err) 1883 if (err)
1911 mlx4_warn(dev, "failed to get port %d default " 1884 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
1912 "ib capabilities (%d). Continuing " 1885 port, err);
1913 "with caps = 0\n", port, err);
1914 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 1886 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1915 1887
1916 /* initialize per-slave default ib port capabilities */ 1888 /* initialize per-slave default ib port capabilities */
@@ -1920,7 +1892,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1920 if (i == mlx4_master_func_num(dev)) 1892 if (i == mlx4_master_func_num(dev))
1921 continue; 1893 continue;
1922 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 1894 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
1923 ib_port_default_caps; 1895 ib_port_default_caps;
1924 } 1896 }
1925 } 1897 }
1926 1898
@@ -1933,7 +1905,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
1933 dev->caps.pkey_table_len[port] : -1); 1905 dev->caps.pkey_table_len[port] : -1);
1934 if (err) { 1906 if (err) {
1935 mlx4_err(dev, "Failed to set port %d, aborting\n", 1907 mlx4_err(dev, "Failed to set port %d, aborting\n",
1936 port); 1908 port);
1937 goto err_counters_table_free; 1909 goto err_counters_table_free;
1938 } 1910 }
1939 } 1911 }
@@ -2009,7 +1981,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2009 kfree(entries); 1981 kfree(entries);
2010 goto no_msi; 1982 goto no_msi;
2011 } else if (nreq < MSIX_LEGACY_SZ + 1983 } else if (nreq < MSIX_LEGACY_SZ +
2012 dev->caps.num_ports * MIN_MSIX_P_PORT) { 1984 dev->caps.num_ports * MIN_MSIX_P_PORT) {
2013 /*Working in legacy mode , all EQ's shared*/ 1985 /*Working in legacy mode , all EQ's shared*/
2014 dev->caps.comp_pool = 0; 1986 dev->caps.comp_pool = 0;
2015 dev->caps.num_comp_vectors = nreq - 1; 1987 dev->caps.num_comp_vectors = nreq - 1;
@@ -2209,8 +2181,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2209 2181
2210 err = pci_enable_device(pdev); 2182 err = pci_enable_device(pdev);
2211 if (err) { 2183 if (err) {
2212 dev_err(&pdev->dev, "Cannot enable PCI device, " 2184 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
2213 "aborting.\n");
2214 return err; 2185 return err;
2215 } 2186 }
2216 2187
@@ -2257,14 +2228,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2257 */ 2228 */
2258 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 2229 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
2259 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2230 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2260 dev_err(&pdev->dev, "Missing DCS, aborting." 2231 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2261 "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2262 pci_dev_data, pci_resource_flags(pdev, 0)); 2232 pci_dev_data, pci_resource_flags(pdev, 0));
2263 err = -ENODEV; 2233 err = -ENODEV;
2264 goto err_disable_pdev; 2234 goto err_disable_pdev;
2265 } 2235 }
2266 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 2236 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
2267 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 2237 dev_err(&pdev->dev, "Missing UAR, aborting\n");
2268 err = -ENODEV; 2238 err = -ENODEV;
2269 goto err_disable_pdev; 2239 goto err_disable_pdev;
2270 } 2240 }
@@ -2279,21 +2249,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2279 2249
2280 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2250 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2281 if (err) { 2251 if (err) {
2282 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 2252 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
2283 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2253 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2284 if (err) { 2254 if (err) {
2285 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 2255 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
2286 goto err_release_regions; 2256 goto err_release_regions;
2287 } 2257 }
2288 } 2258 }
2289 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2259 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2290 if (err) { 2260 if (err) {
2291 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 2261 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
2292 "consistent PCI DMA mask.\n");
2293 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2262 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2294 if (err) { 2263 if (err) {
2295 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 2264 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
2296 "aborting.\n");
2297 goto err_release_regions; 2265 goto err_release_regions;
2298 } 2266 }
2299 } 2267 }
@@ -2324,7 +2292,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2324 if (total_vfs) { 2292 if (total_vfs) {
2325 unsigned vfs_offset = 0; 2293 unsigned vfs_offset = 0;
2326 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 2294 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
2327 vfs_offset + nvfs[i] < extended_func_num(pdev); 2295 vfs_offset + nvfs[i] < extended_func_num(pdev);
2328 vfs_offset += nvfs[i], i++) 2296 vfs_offset += nvfs[i], i++)
2329 ; 2297 ;
2330 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 2298 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
@@ -2350,8 +2318,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2350 if (err < 0) 2318 if (err < 0)
2351 goto err_free_dev; 2319 goto err_free_dev;
2352 else { 2320 else {
2353 mlx4_warn(dev, "Multiple PFs not yet supported." 2321 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
2354 " Skipping PF.\n");
2355 err = -EINVAL; 2322 err = -EINVAL;
2356 goto err_free_dev; 2323 goto err_free_dev;
2357 } 2324 }
@@ -2361,8 +2328,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2361 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", 2328 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
2362 total_vfs); 2329 total_vfs);
2363 dev->dev_vfs = kzalloc( 2330 dev->dev_vfs = kzalloc(
2364 total_vfs * sizeof(*dev->dev_vfs), 2331 total_vfs * sizeof(*dev->dev_vfs),
2365 GFP_KERNEL); 2332 GFP_KERNEL);
2366 if (NULL == dev->dev_vfs) { 2333 if (NULL == dev->dev_vfs) {
2367 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2334 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2368 err = 0; 2335 err = 0;
@@ -2370,14 +2337,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2370 atomic_inc(&pf_loading); 2337 atomic_inc(&pf_loading);
2371 err = pci_enable_sriov(pdev, total_vfs); 2338 err = pci_enable_sriov(pdev, total_vfs);
2372 if (err) { 2339 if (err) {
2373 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", 2340 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2374 err); 2341 err);
2375 atomic_dec(&pf_loading); 2342 atomic_dec(&pf_loading);
2376 err = 0; 2343 err = 0;
2377 } else { 2344 } else {
2378 mlx4_warn(dev, "Running in master mode\n"); 2345 mlx4_warn(dev, "Running in master mode\n");
2379 dev->flags |= MLX4_FLAG_SRIOV | 2346 dev->flags |= MLX4_FLAG_SRIOV |
2380 MLX4_FLAG_MASTER; 2347 MLX4_FLAG_MASTER;
2381 dev->num_vfs = total_vfs; 2348 dev->num_vfs = total_vfs;
2382 sriov_initialized = 1; 2349 sriov_initialized = 1;
2383 } 2350 }
@@ -2394,7 +2361,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2394 */ 2361 */
2395 err = mlx4_reset(dev); 2362 err = mlx4_reset(dev);
2396 if (err) { 2363 if (err) {
2397 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2364 mlx4_err(dev, "Failed to reset HCA, aborting\n");
2398 goto err_rel_own; 2365 goto err_rel_own;
2399 } 2366 }
2400 } 2367 }
@@ -2402,7 +2369,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2402slave_start: 2369slave_start:
2403 err = mlx4_cmd_init(dev); 2370 err = mlx4_cmd_init(dev);
2404 if (err) { 2371 if (err) {
2405 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 2372 mlx4_err(dev, "Failed to init command interface, aborting\n");
2406 goto err_sriov; 2373 goto err_sriov;
2407 } 2374 }
2408 2375
@@ -2416,8 +2383,7 @@ slave_start:
2416 dev->num_slaves = 0; 2383 dev->num_slaves = 0;
2417 err = mlx4_multi_func_init(dev); 2384 err = mlx4_multi_func_init(dev);
2418 if (err) { 2385 if (err) {
2419 mlx4_err(dev, "Failed to init slave mfunc" 2386 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
2420 " interface, aborting.\n");
2421 goto err_cmd; 2387 goto err_cmd;
2422 } 2388 }
2423 } 2389 }
@@ -2448,8 +2414,7 @@ slave_start:
2448 unsigned sum = 0; 2414 unsigned sum = 0;
2449 err = mlx4_multi_func_init(dev); 2415 err = mlx4_multi_func_init(dev);
2450 if (err) { 2416 if (err) {
2451 mlx4_err(dev, "Failed to init master mfunc" 2417 mlx4_err(dev, "Failed to init master mfunc interface, aborting\n");
2452 "interface, aborting.\n");
2453 goto err_close; 2418 goto err_close;
2454 } 2419 }
2455 if (sriov_initialized) { 2420 if (sriov_initialized) {
@@ -2460,10 +2425,7 @@ slave_start:
2460 if (ib_ports && 2425 if (ib_ports &&
2461 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2426 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
2462 mlx4_err(dev, 2427 mlx4_err(dev,
2463 "Invalid syntax of num_vfs/probe_vfs " 2428 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
2464 "with IB port. Single port VFs syntax"
2465 " is only supported when all ports "
2466 "are configured as ethernet\n");
2467 goto err_close; 2429 goto err_close;
2468 } 2430 }
2469 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { 2431 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
@@ -2489,8 +2451,7 @@ slave_start:
2489 if ((mlx4_is_mfunc(dev)) && 2451 if ((mlx4_is_mfunc(dev)) &&
2490 !(dev->flags & MLX4_FLAG_MSI_X)) { 2452 !(dev->flags & MLX4_FLAG_MSI_X)) {
2491 err = -ENOSYS; 2453 err = -ENOSYS;
2492 mlx4_err(dev, "INTx is not supported in multi-function mode." 2454 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
2493 " aborting.\n");
2494 goto err_free_eq; 2455 goto err_free_eq;
2495 } 2456 }
2496 2457
@@ -2828,11 +2789,10 @@ static int __init mlx4_verify_params(void)
2828 if (mlx4_log_num_mgm_entry_size != -1 && 2789 if (mlx4_log_num_mgm_entry_size != -1 &&
2829 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 2790 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
2830 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { 2791 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
2831 pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not " 2792 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
2832 "in legal range (-1 or %d..%d)\n", 2793 mlx4_log_num_mgm_entry_size,
2833 mlx4_log_num_mgm_entry_size, 2794 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
2834 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 2795 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2835 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2836 return -1; 2796 return -1;
2837 } 2797 }
2838 2798
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 80ccb4edf825..7c6eba622186 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
638 638
639 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 639 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
640 if (*index != hash) { 640 if (*index != hash) {
641 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 641 mlx4_err(dev, "Found zero MGID in AMGM\n");
642 err = -EINVAL; 642 err = -EINVAL;
643 } 643 }
644 return err; 644 return err;
@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
874 mlx4_err(dev, "%s", buf); 874 mlx4_err(dev, "%s", buf);
875 875
876 if (len >= BUF_SIZE) 876 if (len >= BUF_SIZE)
877 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 877 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
878} 878}
879 879
880int mlx4_flow_attach(struct mlx4_dev *dev, 880int mlx4_flow_attach(struct mlx4_dev *dev,
@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
905 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 905 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
906 if (ret == -ENOMEM) 906 if (ret == -ENOMEM)
907 mlx4_err_rule(dev, 907 mlx4_err_rule(dev,
908 "mcg table is full. Fail to register network rule.\n", 908 "mcg table is full. Fail to register network rule\n",
909 rule); 909 rule);
910 else if (ret) 910 else if (ret)
911 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 911 mlx4_err_rule(dev, "Fail to register network rule\n", rule);
912 912
913 mlx4_free_cmd_mailbox(dev, mailbox); 913 mlx4_free_cmd_mailbox(dev, mailbox);
914 914
@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
994 994
995 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 995 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
996 if (members_count == dev->caps.num_qp_per_mgm) { 996 if (members_count == dev->caps.num_qp_per_mgm) {
997 mlx4_err(dev, "MGM at index %x is full.\n", index); 997 mlx4_err(dev, "MGM at index %x is full\n", index);
998 err = -ENOMEM; 998 err = -ENOMEM;
999 goto out; 999 goto out;
1000 } 1000 }
@@ -1042,7 +1042,7 @@ out:
1042 } 1042 }
1043 if (err && link && index != -1) { 1043 if (err && link && index != -1) {
1044 if (index < dev->caps.num_mgms) 1044 if (index < dev->caps.num_mgms)
1045 mlx4_warn(dev, "Got AMGM index %d < %d", 1045 mlx4_warn(dev, "Got AMGM index %d < %d\n",
1046 index, dev->caps.num_mgms); 1046 index, dev->caps.num_mgms);
1047 else 1047 else
1048 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1048 mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1133 1133
1134 if (amgm_index) { 1134 if (amgm_index) {
1135 if (amgm_index < dev->caps.num_mgms) 1135 if (amgm_index < dev->caps.num_mgms)
1136 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1136 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
1137 index, amgm_index, dev->caps.num_mgms); 1137 index, amgm_index, dev->caps.num_mgms);
1138 else 1138 else
1139 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1139 mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1153 goto out; 1153 goto out;
1154 1154
1155 if (index < dev->caps.num_mgms) 1155 if (index < dev->caps.num_mgms)
1156 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1156 mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
1157 prev, index, dev->caps.num_mgms); 1157 prev, index, dev->caps.num_mgms);
1158 else 1158 else
1159 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1159 mlx4_bitmap_free(&priv->mcg_table.bitmap,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f9c465101963..52c1e7da74c4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -216,18 +216,19 @@ extern int mlx4_debug_level;
216#define mlx4_debug_level (0) 216#define mlx4_debug_level (0)
217#endif /* CONFIG_MLX4_DEBUG */ 217#endif /* CONFIG_MLX4_DEBUG */
218 218
219#define mlx4_dbg(mdev, format, arg...) \ 219#define mlx4_dbg(mdev, format, ...) \
220do { \ 220do { \
221 if (mlx4_debug_level) \ 221 if (mlx4_debug_level) \
222 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \ 222 dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \
223 ##__VA_ARGS__); \
223} while (0) 224} while (0)
224 225
225#define mlx4_err(mdev, format, arg...) \ 226#define mlx4_err(mdev, format, ...) \
226 dev_err(&mdev->pdev->dev, format, ##arg) 227 dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
227#define mlx4_info(mdev, format, arg...) \ 228#define mlx4_info(mdev, format, ...) \
228 dev_info(&mdev->pdev->dev, format, ##arg) 229 dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
229#define mlx4_warn(mdev, format, arg...) \ 230#define mlx4_warn(mdev, format, ...) \
230 dev_warn(&mdev->pdev->dev, format, ##arg) 231 dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
231 232
232extern int mlx4_log_num_mgm_entry_size; 233extern int mlx4_log_num_mgm_entry_size;
233extern int log_mtts_per_seg; 234extern int log_mtts_per_seg;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 04d9b6fe3e80..b5db1bf361dc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -830,26 +830,26 @@ __printf(3, 4)
830int en_print(const char *level, const struct mlx4_en_priv *priv, 830int en_print(const char *level, const struct mlx4_en_priv *priv,
831 const char *format, ...); 831 const char *format, ...);
832 832
833#define en_dbg(mlevel, priv, format, arg...) \ 833#define en_dbg(mlevel, priv, format, ...) \
834do { \ 834do { \
835 if (NETIF_MSG_##mlevel & priv->msg_enable) \ 835 if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
836 en_print(KERN_DEBUG, priv, format, ##arg); \ 836 en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
837} while (0) 837} while (0)
838#define en_warn(priv, format, arg...) \ 838#define en_warn(priv, format, ...) \
839 en_print(KERN_WARNING, priv, format, ##arg) 839 en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
840#define en_err(priv, format, arg...) \ 840#define en_err(priv, format, ...) \
841 en_print(KERN_ERR, priv, format, ##arg) 841 en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
842#define en_info(priv, format, arg...) \ 842#define en_info(priv, format, ...) \
843 en_print(KERN_INFO, priv, format, ## arg) 843 en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
844 844
845#define mlx4_err(mdev, format, arg...) \ 845#define mlx4_err(mdev, format, ...) \
846 pr_err("%s %s: " format, DRV_NAME, \ 846 pr_err(DRV_NAME " %s: " format, \
847 dev_name(&mdev->pdev->dev), ##arg) 847 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
848#define mlx4_info(mdev, format, arg...) \ 848#define mlx4_info(mdev, format, ...) \
849 pr_info("%s %s: " format, DRV_NAME, \ 849 pr_info(DRV_NAME " %s: " format, \
850 dev_name(&mdev->pdev->dev), ##arg) 850 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
851#define mlx4_warn(mdev, format, arg...) \ 851#define mlx4_warn(mdev, format, ...) \
852 pr_warning("%s %s: " format, DRV_NAME, \ 852 pr_warn(DRV_NAME " %s: " format, \
853 dev_name(&mdev->pdev->dev), ##arg) 853 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
854 854
855#endif 855#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 24835853b753..64fb3e6431a0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
250 MLX4_CMD_TIME_CLASS_A, 250 MLX4_CMD_TIME_CLASS_A,
251 MLX4_CMD_WRAPPED); 251 MLX4_CMD_WRAPPED);
252 if (err) 252 if (err)
253 mlx4_warn(dev, "Failed to free mtt range at:" 253 mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
254 "%d order:%d\n", offset, order); 254 offset, order);
255 return; 255 return;
256 } 256 }
257 __mlx4_free_mtt_range(dev, offset, order); 257 __mlx4_free_mtt_range(dev, offset, order);
@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
436 key_to_hw_index(mr->key) & 436 key_to_hw_index(mr->key) &
437 (dev->caps.num_mpts - 1)); 437 (dev->caps.num_mpts - 1));
438 if (err) { 438 if (err) {
439 mlx4_warn(dev, "HW2SW_MPT failed (%d),", err); 439 mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
440 mlx4_warn(dev, "MR has MWs bound to it.\n"); 440 err);
441 return err; 441 return err;
442 } 442 }
443 443
@@ -773,7 +773,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
773 mlx4_alloc_mtt_range(dev, 773 mlx4_alloc_mtt_range(dev,
774 fls(dev->caps.reserved_mtts - 1)); 774 fls(dev->caps.reserved_mtts - 1));
775 if (priv->reserved_mtts < 0) { 775 if (priv->reserved_mtts < 0) {
776 mlx4_warn(dev, "MTT table of order %u is too small.\n", 776 mlx4_warn(dev, "MTT table of order %u is too small\n",
777 mr_table->mtt_buddy.max_order); 777 mr_table->mtt_buddy.max_order);
778 err = -ENOMEM; 778 err = -ENOMEM;
779 goto err_reserve_mtts; 779 goto err_reserve_mtts;
@@ -954,8 +954,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
954 mailbox = mlx4_alloc_cmd_mailbox(dev); 954 mailbox = mlx4_alloc_cmd_mailbox(dev);
955 if (IS_ERR(mailbox)) { 955 if (IS_ERR(mailbox)) {
956 err = PTR_ERR(mailbox); 956 err = PTR_ERR(mailbox);
957 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox" 957 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n",
958 " failed (%d)\n", err); 958 err);
959 return; 959 return;
960 } 960 }
961 961
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index cfcad26ed40f..1f6d29183f1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -244,8 +244,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
244 if (validate_index(dev, table, index)) 244 if (validate_index(dev, table, index))
245 goto out; 245 goto out;
246 if (--table->refs[index]) { 246 if (--table->refs[index]) {
247 mlx4_dbg(dev, "Have more references for index %d," 247 mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
248 "no need to modify mac table\n", index); 248 index);
249 goto out; 249 goto out;
250 } 250 }
251 251
@@ -443,9 +443,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
443 } 443 }
444 444
445 if (--table->refs[index]) { 445 if (--table->refs[index]) {
446 mlx4_dbg(dev, "Have %d more references for index %d," 446 mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
447 "no need to modify vlan table\n", table->refs[index], 447 table->refs[index], index);
448 index);
449 goto out; 448 goto out;
450 } 449 }
451 table->entries[index] = 0; 450 table->entries[index] = 0;
@@ -706,8 +705,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
706 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, 705 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
707 sizeof(gid_entry_tbl->raw))) { 706 sizeof(gid_entry_tbl->raw))) {
708 /* found duplicate */ 707 /* found duplicate */
709 mlx4_warn(dev, "requested gid entry for slave:%d " 708 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
710 "is a duplicate of gid at index %d\n",
711 slave, i); 709 slave, i);
712 return -EINVAL; 710 return -EINVAL;
713 } 711 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 8e0c3cc2a1ec..14089d9e1667 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
164 } 164 }
165 165
166 if (total_size > dev_cap->max_icm_sz) { 166 if (total_size > dev_cap->max_icm_sz) {
167 mlx4_err(dev, "Profile requires 0x%llx bytes; " 167 mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
168 "won't fit in 0x%llx bytes of context memory.\n", 168 (unsigned long long) total_size,
169 (unsigned long long) total_size, 169 (unsigned long long) dev_cap->max_icm_sz);
170 (unsigned long long) dev_cap->max_icm_sz);
171 kfree(profile); 170 kfree(profile);
172 return -ENOMEM; 171 return -ENOMEM;
173 } 172 }
174 173
175 if (profile[i].size) 174 if (profile[i].size)
176 mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, " 175 mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
177 "size 0x%10llx\n", 176 i, res_name[profile[i].type],
178 i, res_name[profile[i].type], profile[i].log_num, 177 profile[i].log_num,
179 (unsigned long long) profile[i].start, 178 (unsigned long long) profile[i].start,
180 (unsigned long long) profile[i].size); 179 (unsigned long long) profile[i].size);
181 } 180 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 61d64ebffd56..9bdb6aeb3721 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -264,8 +264,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
264 MLX4_CMD_FREE_RES, 264 MLX4_CMD_FREE_RES,
265 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 265 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
266 if (err) { 266 if (err) {
267 mlx4_warn(dev, "Failed to release qp range" 267 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
268 " base:%d cnt:%d\n", base_qpn, cnt); 268 base_qpn, cnt);
269 } 269 }
270 } else 270 } else
271 __mlx4_qp_release_range(dev, base_qpn, cnt); 271 __mlx4_qp_release_range(dev, base_qpn, cnt);
@@ -577,8 +577,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
577 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], 577 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
578 context, 0, 0, qp); 578 context, 0, 0, qp);
579 if (err) { 579 if (err) {
580 mlx4_err(dev, "Failed to bring QP to state: " 580 mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
581 "%d with error: %d\n",
582 states[i + 1], err); 581 states[i + 1], err);
583 return err; 582 return err;
584 } 583 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index dd1b5093d8b1..ea1c6d092145 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev)
72 hca_header = kmalloc(256, GFP_KERNEL); 72 hca_header = kmalloc(256, GFP_KERNEL);
73 if (!hca_header) { 73 if (!hca_header) {
74 err = -ENOMEM; 74 err = -ENOMEM;
75 mlx4_err(dev, "Couldn't allocate memory to save HCA " 75 mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
76 "PCI header, aborting.\n");
77 goto out; 76 goto out;
78 } 77 }
79 78
@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev)
84 continue; 83 continue;
85 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { 84 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
86 err = -ENODEV; 85 err = -ENODEV;
87 mlx4_err(dev, "Couldn't save HCA " 86 mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
88 "PCI header, aborting.\n");
89 goto out; 87 goto out;
90 } 88 }
91 } 89 }
@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev)
94 MLX4_RESET_SIZE); 92 MLX4_RESET_SIZE);
95 if (!reset) { 93 if (!reset) {
96 err = -ENOMEM; 94 err = -ENOMEM;
97 mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n"); 95 mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
98 goto out; 96 goto out;
99 } 97 }
100 98
@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev)
133 131
134 if (vendor == 0xffff) { 132 if (vendor == 0xffff) {
135 err = -ENODEV; 133 err = -ENODEV;
136 mlx4_err(dev, "PCI device did not come back after reset, " 134 mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
137 "aborting.\n");
138 goto out; 135 goto out;
139 } 136 }
140 137
@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev)
144 if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, 141 if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
145 devctl)) { 142 devctl)) {
146 err = -ENODEV; 143 err = -ENODEV;
147 mlx4_err(dev, "Couldn't restore HCA PCI Express " 144 mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
148 "Device Control register, aborting.\n");
149 goto out; 145 goto out;
150 } 146 }
151 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; 147 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
152 if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, 148 if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
153 linkctl)) { 149 linkctl)) {
154 err = -ENODEV; 150 err = -ENODEV;
155 mlx4_err(dev, "Couldn't restore HCA PCI Express " 151 mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
156 "Link control register, aborting.\n");
157 goto out; 152 goto out;
158 } 153 }
159 } 154 }
@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev)
164 159
165 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { 160 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
166 err = -ENODEV; 161 err = -ENODEV;
167 mlx4_err(dev, "Couldn't restore HCA reg %x, " 162 mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
168 "aborting.\n", i); 163 i);
169 goto out; 164 goto out;
170 } 165 }
171 } 166 }
@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev)
173 if (pci_write_config_dword(dev->pdev, PCI_COMMAND, 168 if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
174 hca_header[PCI_COMMAND / 4])) { 169 hca_header[PCI_COMMAND / 4])) {
175 err = -ENODEV; 170 err = -ENODEV;
176 mlx4_err(dev, "Couldn't restore HCA COMMAND, " 171 mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
177 "aborting.\n");
178 goto out; 172 goto out;
179 } 173 }
180 174
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 3b5f53ef29b2..12fa515a7dd8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3857,7 +3857,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
3857 } 3857 }
3858 } 3858 }
3859 if (!be_mac) { 3859 if (!be_mac) {
3860 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", 3860 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
3861 port); 3861 port);
3862 return -EINVAL; 3862 return -EINVAL;
3863 } 3863 }
@@ -3900,7 +3900,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3900 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 3900 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3901 err = get_res(dev, slave, qpn, RES_QP, &rqp); 3901 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3902 if (err) { 3902 if (err) {
3903 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); 3903 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
3904 return err; 3904 return err;
3905 } 3905 }
3906 rule_header = (struct _rule_hw *)(ctrl + 1); 3906 rule_header = (struct _rule_hw *)(ctrl + 1);
@@ -3918,7 +3918,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3918 case MLX4_NET_TRANS_RULE_ID_IPV4: 3918 case MLX4_NET_TRANS_RULE_ID_IPV4:
3919 case MLX4_NET_TRANS_RULE_ID_TCP: 3919 case MLX4_NET_TRANS_RULE_ID_TCP:
3920 case MLX4_NET_TRANS_RULE_ID_UDP: 3920 case MLX4_NET_TRANS_RULE_ID_UDP:
3921 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); 3921 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
3922 if (add_eth_header(dev, slave, inbox, rlist, header_id)) { 3922 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3923 err = -EINVAL; 3923 err = -EINVAL;
3924 goto err_put; 3924 goto err_put;
@@ -3927,7 +3927,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3927 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; 3927 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3928 break; 3928 break;
3929 default: 3929 default:
3930 pr_err("Corrupted mailbox.\n"); 3930 pr_err("Corrupted mailbox\n");
3931 err = -EINVAL; 3931 err = -EINVAL;
3932 goto err_put; 3932 goto err_put;
3933 } 3933 }
@@ -3941,7 +3941,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3941 3941
3942 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); 3942 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3943 if (err) { 3943 if (err) {
3944 mlx4_err(dev, "Fail to add flow steering resources.\n "); 3944 mlx4_err(dev, "Fail to add flow steering resources\n");
3945 /* detach rule*/ 3945 /* detach rule*/
3946 mlx4_cmd(dev, vhcr->out_param, 0, 0, 3946 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3947 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 3947 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@ -3979,7 +3979,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3979 3979
3980 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); 3980 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3981 if (err) { 3981 if (err) {
3982 mlx4_err(dev, "Fail to remove flow steering resources.\n "); 3982 mlx4_err(dev, "Fail to remove flow steering resources\n");
3983 goto out; 3983 goto out;
3984 } 3984 }
3985 3985
@@ -4108,8 +4108,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4108 4108
4109 err = move_all_busy(dev, slave, RES_QP); 4109 err = move_all_busy(dev, slave, RES_QP);
4110 if (err) 4110 if (err)
4111 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" 4111 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4112 "for slave %d\n", slave); 4112 slave);
4113 4113
4114 spin_lock_irq(mlx4_tlock(dev)); 4114 spin_lock_irq(mlx4_tlock(dev));
4115 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4115 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4147,10 +4147,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4147 MLX4_CMD_TIME_CLASS_A, 4147 MLX4_CMD_TIME_CLASS_A,
4148 MLX4_CMD_NATIVE); 4148 MLX4_CMD_NATIVE);
4149 if (err) 4149 if (err)
4150 mlx4_dbg(dev, "rem_slave_qps: failed" 4150 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4151 " to move slave %d qpn %d to" 4151 slave, qp->local_qpn);
4152 " reset\n", slave,
4153 qp->local_qpn);
4154 atomic_dec(&qp->rcq->ref_count); 4152 atomic_dec(&qp->rcq->ref_count);
4155 atomic_dec(&qp->scq->ref_count); 4153 atomic_dec(&qp->scq->ref_count);
4156 atomic_dec(&qp->mtt->ref_count); 4154 atomic_dec(&qp->mtt->ref_count);
@@ -4184,8 +4182,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4184 4182
4185 err = move_all_busy(dev, slave, RES_SRQ); 4183 err = move_all_busy(dev, slave, RES_SRQ);
4186 if (err) 4184 if (err)
4187 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " 4185 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4188 "busy for slave %d\n", slave); 4186 slave);
4189 4187
4190 spin_lock_irq(mlx4_tlock(dev)); 4188 spin_lock_irq(mlx4_tlock(dev));
4191 list_for_each_entry_safe(srq, tmp, srq_list, com.list) { 4189 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@ -4215,9 +4213,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4215 MLX4_CMD_TIME_CLASS_A, 4213 MLX4_CMD_TIME_CLASS_A,
4216 MLX4_CMD_NATIVE); 4214 MLX4_CMD_NATIVE);
4217 if (err) 4215 if (err)
4218 mlx4_dbg(dev, "rem_slave_srqs: failed" 4216 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4219 " to move slave %d srq %d to"
4220 " SW ownership\n",
4221 slave, srqn); 4217 slave, srqn);
4222 4218
4223 atomic_dec(&srq->mtt->ref_count); 4219 atomic_dec(&srq->mtt->ref_count);
@@ -4252,8 +4248,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4252 4248
4253 err = move_all_busy(dev, slave, RES_CQ); 4249 err = move_all_busy(dev, slave, RES_CQ);
4254 if (err) 4250 if (err)
4255 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " 4251 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4256 "busy for slave %d\n", slave); 4252 slave);
4257 4253
4258 spin_lock_irq(mlx4_tlock(dev)); 4254 spin_lock_irq(mlx4_tlock(dev));
4259 list_for_each_entry_safe(cq, tmp, cq_list, com.list) { 4255 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@ -4283,9 +4279,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4283 MLX4_CMD_TIME_CLASS_A, 4279 MLX4_CMD_TIME_CLASS_A,
4284 MLX4_CMD_NATIVE); 4280 MLX4_CMD_NATIVE);
4285 if (err) 4281 if (err)
4286 mlx4_dbg(dev, "rem_slave_cqs: failed" 4282 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4287 " to move slave %d cq %d to"
4288 " SW ownership\n",
4289 slave, cqn); 4283 slave, cqn);
4290 atomic_dec(&cq->mtt->ref_count); 4284 atomic_dec(&cq->mtt->ref_count);
4291 state = RES_CQ_ALLOCATED; 4285 state = RES_CQ_ALLOCATED;
@@ -4317,8 +4311,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4317 4311
4318 err = move_all_busy(dev, slave, RES_MPT); 4312 err = move_all_busy(dev, slave, RES_MPT);
4319 if (err) 4313 if (err)
4320 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " 4314 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4321 "busy for slave %d\n", slave); 4315 slave);
4322 4316
4323 spin_lock_irq(mlx4_tlock(dev)); 4317 spin_lock_irq(mlx4_tlock(dev));
4324 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { 4318 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@ -4353,9 +4347,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4353 MLX4_CMD_TIME_CLASS_A, 4347 MLX4_CMD_TIME_CLASS_A,
4354 MLX4_CMD_NATIVE); 4348 MLX4_CMD_NATIVE);
4355 if (err) 4349 if (err)
4356 mlx4_dbg(dev, "rem_slave_mrs: failed" 4350 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4357 " to move slave %d mpt %d to"
4358 " SW ownership\n",
4359 slave, mptn); 4351 slave, mptn);
4360 if (mpt->mtt) 4352 if (mpt->mtt)
4361 atomic_dec(&mpt->mtt->ref_count); 4353 atomic_dec(&mpt->mtt->ref_count);
@@ -4387,8 +4379,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4387 4379
4388 err = move_all_busy(dev, slave, RES_MTT); 4380 err = move_all_busy(dev, slave, RES_MTT);
4389 if (err) 4381 if (err)
4390 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " 4382 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4391 "busy for slave %d\n", slave); 4383 slave);
4392 4384
4393 spin_lock_irq(mlx4_tlock(dev)); 4385 spin_lock_irq(mlx4_tlock(dev));
4394 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { 4386 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@ -4490,8 +4482,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4490 4482
4491 err = move_all_busy(dev, slave, RES_EQ); 4483 err = move_all_busy(dev, slave, RES_EQ);
4492 if (err) 4484 if (err)
4493 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " 4485 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4494 "busy for slave %d\n", slave); 4486 slave);
4495 4487
4496 spin_lock_irq(mlx4_tlock(dev)); 4488 spin_lock_irq(mlx4_tlock(dev));
4497 list_for_each_entry_safe(eq, tmp, eq_list, com.list) { 4489 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@ -4523,9 +4515,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4523 MLX4_CMD_TIME_CLASS_A, 4515 MLX4_CMD_TIME_CLASS_A,
4524 MLX4_CMD_NATIVE); 4516 MLX4_CMD_NATIVE);
4525 if (err) 4517 if (err)
4526 mlx4_dbg(dev, "rem_slave_eqs: failed" 4518 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4527 " to move slave %d eqs %d to" 4519 slave, eqn);
4528 " SW ownership\n", slave, eqn);
4529 mlx4_free_cmd_mailbox(dev, mailbox); 4520 mlx4_free_cmd_mailbox(dev, mailbox);
4530 atomic_dec(&eq->mtt->ref_count); 4521 atomic_dec(&eq->mtt->ref_count);
4531 state = RES_EQ_RESERVED; 4522 state = RES_EQ_RESERVED;
@@ -4554,8 +4545,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4554 4545
4555 err = move_all_busy(dev, slave, RES_COUNTER); 4546 err = move_all_busy(dev, slave, RES_COUNTER);
4556 if (err) 4547 if (err)
4557 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to " 4548 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4558 "busy for slave %d\n", slave); 4549 slave);
4559 4550
4560 spin_lock_irq(mlx4_tlock(dev)); 4551 spin_lock_irq(mlx4_tlock(dev));
4561 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 4552 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@ -4585,8 +4576,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4585 4576
4586 err = move_all_busy(dev, slave, RES_XRCD); 4577 err = move_all_busy(dev, slave, RES_XRCD);
4587 if (err) 4578 if (err)
4588 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to " 4579 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4589 "busy for slave %d\n", slave); 4580 slave);
4590 4581
4591 spin_lock_irq(mlx4_tlock(dev)); 4582 spin_lock_irq(mlx4_tlock(dev));
4592 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { 4583 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@ -4731,10 +4722,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4731 0, MLX4_CMD_UPDATE_QP, 4722 0, MLX4_CMD_UPDATE_QP,
4732 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 4723 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4733 if (err) { 4724 if (err) {
4734 mlx4_info(dev, "UPDATE_QP failed for slave %d, " 4725 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4735 "port %d, qpn %d (%d)\n", 4726 work->slave, port, qp->local_qpn, err);
4736 work->slave, port, qp->local_qpn,
4737 err);
4738 errors++; 4727 errors++;
4739 } 4728 }
4740 } 4729 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 405c4fbcd0ad..87d1b018a9c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
620 mlx5_command_str(msg_to_opcode(ent->in)), 620 mlx5_command_str(msg_to_opcode(ent->in)),
621 msg_to_opcode(ent->in)); 621 msg_to_opcode(ent->in));
622 } 622 }
623 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, 623 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
624 deliv_status_to_str(ent->status), ent->status); 624 err, deliv_status_to_str(ent->status), ent->status);
625 625
626 return err; 626 return err;
627} 627}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 64a61b286b2c..7f39ebcd6ad0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
208 */ 208 */
209 rmb(); 209 rmb();
210 210
211 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type)); 211 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
212 eq->eqn, eqe_type_str(eqe->type));
212 switch (eqe->type) { 213 switch (eqe->type) {
213 case MLX5_EVENT_TYPE_COMP: 214 case MLX5_EVENT_TYPE_COMP:
214 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; 215 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 271 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
271 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); 272 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
272 273
273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); 274 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
275 func_id, npages);
274 mlx5_core_req_pages_handler(dev, func_id, npages); 276 mlx5_core_req_pages_handler(dev, func_id, npages);
275 } 277 }
276 break; 278 break;
277 279
278 280
279 default: 281 default:
280 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); 282 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
283 eqe->type, eq->eqn);
281 break; 284 break;
282 } 285 }
283 286
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c3eee5f70051..ee24f132e319 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev)
66 66
67 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 67 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
68 if (err) { 68 if (err) {
69 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 69 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
70 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 70 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
71 if (err) { 71 if (err) {
72 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 72 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
73 return err; 73 return err;
74 } 74 }
75 } 75 }
@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev)
77 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 77 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
78 if (err) { 78 if (err) {
79 dev_warn(&pdev->dev, 79 dev_warn(&pdev->dev,
80 "Warning: couldn't set 64-bit consistent PCI DMA mask.\n"); 80 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
81 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 81 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
82 if (err) { 82 if (err) {
83 dev_err(&pdev->dev, 83 dev_err(&pdev->dev,
84 "Can't set consistent PCI DMA mask, aborting.\n"); 84 "Can't set consistent PCI DMA mask, aborting\n");
85 return err; 85 return err;
86 } 86 }
87 } 87 }
@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev)
95 int err = 0; 95 int err = 0;
96 96
97 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 97 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
98 dev_err(&pdev->dev, "Missing registers BAR, aborting.\n"); 98 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
99 return -ENODEV; 99 return -ENODEV;
100 } 100 }
101 101
@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
319 319
320 err = pci_enable_device(pdev); 320 err = pci_enable_device(pdev);
321 if (err) { 321 if (err) {
322 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); 322 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
323 goto err_dbg; 323 goto err_dbg;
324 } 324 }
325 325
326 err = request_bar(pdev); 326 err = request_bar(pdev);
327 if (err) { 327 if (err) {
328 dev_err(&pdev->dev, "error requesting BARs, aborting.\n"); 328 dev_err(&pdev->dev, "error requesting BARs, aborting\n");
329 goto err_disable; 329 goto err_disable;
330 } 330 }
331 331
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 68b74e1ae1b0..f0c9f9a7a361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,24 +39,26 @@
39 39
40extern int mlx5_core_debug_mask; 40extern int mlx5_core_debug_mask;
41 41
42#define mlx5_core_dbg(dev, format, arg...) \ 42#define mlx5_core_dbg(dev, format, ...) \
43pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 43 pr_debug("%s:%s:%d:(pid %d): " format, \
44 current->pid, ##arg) 44 (dev)->priv.name, __func__, __LINE__, current->pid, \
45 ##__VA_ARGS__)
45 46
46#define mlx5_core_dbg_mask(dev, mask, format, arg...) \ 47#define mlx5_core_dbg_mask(dev, mask, format, ...) \
47do { \ 48do { \
48 if ((mask) & mlx5_core_debug_mask) \ 49 if ((mask) & mlx5_core_debug_mask) \
49 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \ 50 mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
50 __func__, __LINE__, current->pid, ##arg); \
51} while (0) 51} while (0)
52 52
53#define mlx5_core_err(dev, format, arg...) \ 53#define mlx5_core_err(dev, format, ...) \
54pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 54 pr_err("%s:%s:%d:(pid %d): " format, \
55 current->pid, ##arg) 55 (dev)->priv.name, __func__, __LINE__, current->pid, \
56 ##__VA_ARGS__)
56 57
57#define mlx5_core_warn(dev, format, arg...) \ 58#define mlx5_core_warn(dev, format, ...) \
58pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ 59 pr_warn("%s:%s:%d:(pid %d): " format, \
59 current->pid, ##arg) 60 (dev)->priv.name, __func__, __LINE__, current->pid, \
61 ##__VA_ARGS__)
60 62
61enum { 63enum {
62 MLX5_CMD_DATA, /* print command payload only */ 64 MLX5_CMD_DATA, /* print command payload only */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 4cc927649404..0a11b3fe9c19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
73 } 73 }
74 74
75 if (err) { 75 if (err) {
76 mlx5_core_dbg(dev, "cmd exec faile %d\n", err); 76 mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
77 return err; 77 return err;
78 } 78 }
79 79
@@ -191,7 +191,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
191 } 191 }
192 192
193 if (out.hdr.status) { 193 if (out.hdr.status) {
194 mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status); 194 mlx5_core_err(dev, "create_psv bad status %d\n",
195 out.hdr.status);
195 return mlx5_cmd_status_to_err(&out.hdr); 196 return mlx5_cmd_status_to_err(&out.hdr);
196 } 197 }
197 198
@@ -220,7 +221,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
220 } 221 }
221 222
222 if (out.hdr.status) { 223 if (out.hdr.status) {
223 mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status); 224 mlx5_core_err(dev, "destroy_psv bad status %d\n",
225 out.hdr.status);
224 err = mlx5_cmd_status_to_err(&out.hdr); 226 err = mlx5_cmd_status_to_err(&out.hdr);
225 goto out; 227 goto out;
226 } 228 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d59790a82bc3..c2a953ef0e67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -311,7 +311,8 @@ retry:
311 in->num_entries = cpu_to_be32(npages); 311 in->num_entries = cpu_to_be32(npages);
312 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 312 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
313 if (err) { 313 if (err) {
314 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); 314 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
315 func_id, npages, err);
315 goto out_alloc; 316 goto out_alloc;
316 } 317 }
317 dev->priv.fw_pages += npages; 318 dev->priv.fw_pages += npages;
@@ -319,7 +320,8 @@ retry:
319 if (out.hdr.status) { 320 if (out.hdr.status) {
320 err = mlx5_cmd_status_to_err(&out.hdr); 321 err = mlx5_cmd_status_to_err(&out.hdr);
321 if (err) { 322 if (err) {
322 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); 323 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
324 func_id, npages, out.hdr.status);
323 goto out_alloc; 325 goto out_alloc;
324 } 326 }
325 } 327 }
@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
378 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 380 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
379 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 381 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
380 if (err) { 382 if (err) {
381 mlx5_core_err(dev, "failed recliaming pages\n"); 383 mlx5_core_err(dev, "failed reclaiming pages\n");
382 goto out_free; 384 goto out_free;
383 } 385 }
384 dev->priv.fw_pages -= npages; 386 dev->priv.fw_pages -= npages;
@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work)
414 err = give_pages(dev, req->func_id, req->npages, 1); 416 err = give_pages(dev, req->func_id, req->npages, 1);
415 417
416 if (err) 418 if (err)
417 mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? 419 mlx5_core_warn(dev, "%s fail %d\n",
418 "reclaim" : "give", err); 420 req->npages < 0 ? "reclaim" : "give", err);
419 421
420 kfree(req); 422 kfree(req);
421} 423}
@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
487 optimal_reclaimed_pages(), 489 optimal_reclaimed_pages(),
488 &nclaimed); 490 &nclaimed);
489 if (err) { 491 if (err) {
490 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); 492 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
493 err);
491 return err; 494 return err;
492 } 495 }
493 if (nclaimed) 496 if (nclaimed)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 510576213dd0..8145b4668229 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
79 79
80 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 80 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
81 if (err) { 81 if (err) {
82 mlx5_core_warn(dev, "ret %d", err); 82 mlx5_core_warn(dev, "ret %d\n", err);
83 return err; 83 return err;
84 } 84 }
85 85
@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
96 err = radix_tree_insert(&table->tree, qp->qpn, qp); 96 err = radix_tree_insert(&table->tree, qp->qpn, qp);
97 spin_unlock_irq(&table->lock); 97 spin_unlock_irq(&table->lock);
98 if (err) { 98 if (err) {
99 mlx5_core_warn(dev, "err %d", err); 99 mlx5_core_warn(dev, "err %d\n", err);
100 goto err_cmd; 100 goto err_cmd;
101 } 101 }
102 102