aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-01-27 20:13:01 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-27 20:13:01 -0500
commit5cce1cf718b02c8ea89576230db2bc2fc2e2a35a (patch)
tree2e6934bb871f78f669e996c91bfd35f766bfabc2
parent4967082b469320eeba54ffbca632af1962858fb7 (diff)
parent6d6e996c20f27091c09c813e2e73c507602839e3 (diff)
Merge branch 'mlx4-next'
Amir Vadai says: ==================== Mellanox ethernet driver updates Jan-27-2015 This patchset introduces some bug fixes, code cleanups and support in a new firmware event called recoverable error events. Patches were applied and tested against commit b8665c6 ("net: dsa/mv88e6352: make mv88e6352_wait generic") Changes from V0: - Patch 6/11 ("net/mlx4_core: Fix struct mlx4_vhcr_cmd to make implicit padding explicit"): - Removed __packed - Rephrased commit message - Added a new patch by Majd ("net/mlx4_core: Update the HCA core clock frequency after INIT_PORT") ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c16
-rw-r--r--include/linux/mlx4/cmd.h6
-rw-r--r--include/linux/mlx4/device.h14
12 files changed, 136 insertions, 72 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 2b48932855e7..154effbfd8be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -901,7 +901,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
901 index = be32_to_cpu(smp->attr_mod); 901 index = be32_to_cpu(smp->attr_mod);
902 if (port < 1 || port > dev->caps.num_ports) 902 if (port < 1 || port > dev->caps.num_ports)
903 return -EINVAL; 903 return -EINVAL;
904 table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL); 904 table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
905 sizeof(*table) * 32, GFP_KERNEL);
906
905 if (!table) 907 if (!table)
906 return -ENOMEM; 908 return -ENOMEM;
907 /* need to get the full pkey table because the paravirtualized 909 /* need to get the full pkey table because the paravirtualized
@@ -1221,7 +1223,7 @@ static struct mlx4_cmd_info cmd_info[] = {
1221 { 1223 {
1222 .opcode = MLX4_CMD_HW2SW_EQ, 1224 .opcode = MLX4_CMD_HW2SW_EQ,
1223 .has_inbox = false, 1225 .has_inbox = false,
1224 .has_outbox = true, 1226 .has_outbox = false,
1225 .out_is_imm = false, 1227 .out_is_imm = false,
1226 .encode_slave_id = true, 1228 .encode_slave_id = true,
1227 .verify = NULL, 1229 .verify = NULL,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 569eda9e83d6..a7b58ba8492b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -770,22 +770,20 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
770 return 0; 770 return 0;
771 } 771 }
772 772
773 proto_admin = cpu_to_be32(ptys_adv); 773 proto_admin = cmd->autoneg == AUTONEG_ENABLE ?
774 if (speed >= 0 && speed != priv->port_state.link_speed) 774 cpu_to_be32(ptys_adv) :
775 /* If speed was set then speed decides :-) */ 775 speed_set_ptys_admin(priv, speed,
776 proto_admin = speed_set_ptys_admin(priv, speed, 776 ptys_reg.eth_proto_cap);
777 ptys_reg.eth_proto_cap);
778 777
779 proto_admin &= ptys_reg.eth_proto_cap; 778 proto_admin &= ptys_reg.eth_proto_cap;
780
781 if (proto_admin == ptys_reg.eth_proto_admin)
782 return 0; /* Nothing to change */
783
784 if (!proto_admin) { 779 if (!proto_admin) {
785 en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); 780 en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
786 return -EINVAL; /* nothing to change due to bad input */ 781 return -EINVAL; /* nothing to change due to bad input */
787 } 782 }
788 783
784 if (proto_admin == ptys_reg.eth_proto_admin)
785 return 0; /* Nothing to change */
786
789 en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", 787 en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
790 be32_to_cpu(proto_admin)); 788 be32_to_cpu(proto_admin));
791 789
@@ -798,9 +796,9 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
798 return ret; 796 return ret;
799 } 797 }
800 798
801 en_warn(priv, "Port link mode changed, restarting port...\n");
802 mutex_lock(&priv->mdev->state_lock); 799 mutex_lock(&priv->mdev->state_lock);
803 if (priv->port_up) { 800 if (priv->port_up) {
801 en_warn(priv, "Port link mode changed, restarting port...\n");
804 mlx4_en_stop_port(dev, 1); 802 mlx4_en_stop_port(dev, 1);
805 if (mlx4_en_start_port(dev)) 803 if (mlx4_en_start_port(dev))
806 en_err(priv, "Failed restarting port %d\n", priv->port); 804 en_err(priv, "Failed restarting port %d\n", priv->port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 2f2e6067426d..264bc15c1ff2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -88,6 +88,8 @@ static u64 get_async_ev_mask(struct mlx4_dev *dev)
88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; 88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) 89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); 90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
91 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
92 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
91 93
92 return async_ev_mask; 94 return async_ev_mask;
93} 95}
@@ -736,6 +738,26 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
736 (unsigned long) eqe); 738 (unsigned long) eqe);
737 break; 739 break;
738 740
741 case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
742 switch (eqe->subtype) {
743 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
744 mlx4_warn(dev, "Bad cable detected on port %u\n",
745 eqe->event.bad_cable.port);
746 break;
747 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
748 mlx4_warn(dev, "Unsupported cable detected\n");
749 break;
750 default:
751 mlx4_dbg(dev,
752 "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
753 eqe->type, eqe->subtype, eq->eqn,
754 eq->cons_index, eqe->owner, eq->nent,
755 !!(eqe->owner & 0x80) ^
756 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
757 break;
758 }
759 break;
760
739 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 761 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
740 case MLX4_EVENT_TYPE_ECC_DETECT: 762 case MLX4_EVENT_TYPE_ECC_DETECT:
741 default: 763 default:
@@ -846,12 +868,10 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
846 MLX4_CMD_WRAPPED); 868 MLX4_CMD_WRAPPED);
847} 869}
848 870
849static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 871static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num)
850 int eq_num)
851{ 872{
852 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 873 return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ,
853 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, 874 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
854 MLX4_CMD_WRAPPED);
855} 875}
856 876
857static int mlx4_num_eq_uar(struct mlx4_dev *dev) 877static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -1024,7 +1044,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
1024 struct mlx4_eq *eq) 1044 struct mlx4_eq *eq)
1025{ 1045{
1026 struct mlx4_priv *priv = mlx4_priv(dev); 1046 struct mlx4_priv *priv = mlx4_priv(dev);
1027 struct mlx4_cmd_mailbox *mailbox;
1028 int err; 1047 int err;
1029 int i; 1048 int i;
1030 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with 1049 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
@@ -1032,24 +1051,10 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
1032 */ 1051 */
1033 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; 1052 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
1034 1053
1035 mailbox = mlx4_alloc_cmd_mailbox(dev); 1054 err = mlx4_HW2SW_EQ(dev, eq->eqn);
1036 if (IS_ERR(mailbox))
1037 return;
1038
1039 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
1040 if (err) 1055 if (err)
1041 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); 1056 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
1042 1057
1043 if (0) {
1044 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
1045 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
1046 if (i % 4 == 0)
1047 pr_cont("[%02x] ", i * 4);
1048 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
1049 if ((i + 1) % 4 == 0)
1050 pr_cont("\n");
1051 }
1052 }
1053 synchronize_irq(eq->irq); 1058 synchronize_irq(eq->irq);
1054 tasklet_disable(&eq->tasklet_ctx.task); 1059 tasklet_disable(&eq->tasklet_ctx.task);
1055 1060
@@ -1061,7 +1066,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
1061 1066
1062 kfree(eq->page_list); 1067 kfree(eq->page_list);
1063 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); 1068 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
1064 mlx4_free_cmd_mailbox(dev, mailbox);
1065} 1069}
1066 1070
1067static void mlx4_free_irqs(struct mlx4_dev *dev) 1071static void mlx4_free_irqs(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 982861d1df44..dbabfae3a3de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -84,13 +84,10 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
84 [ 1] = "UC transport", 84 [ 1] = "UC transport",
85 [ 2] = "UD transport", 85 [ 2] = "UD transport",
86 [ 3] = "XRC transport", 86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
89 [ 6] = "SRQ support", 87 [ 6] = "SRQ support",
90 [ 7] = "IPoIB checksum offload", 88 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter", 89 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter", 90 [ 9] = "Q_Key violation counter",
93 [10] = "VMM",
94 [12] = "Dual Port Different Protocol (DPDP) support", 91 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers", 92 [15] = "Big LSO headers",
96 [16] = "MW support", 93 [16] = "MW support",
@@ -99,12 +96,11 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
99 [19] = "Raw multicast support", 96 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support", 97 [20] = "Address vector port checking support",
101 [21] = "UD multicast support", 98 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support", 99 [30] = "IBoE support",
105 [32] = "Unicast loopback support", 100 [32] = "Unicast loopback support",
106 [34] = "FCS header control", 101 [34] = "FCS header control",
107 [38] = "Wake On LAN support", 102 [37] = "Wake On LAN (port1) support",
103 [38] = "Wake On LAN (port2) support",
108 [40] = "UDP RSS support", 104 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support", 105 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support", 106 [42] = "Multicast VEP steering support",
@@ -145,7 +141,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
145 [16] = "CONFIG DEV support", 141 [16] = "CONFIG DEV support",
146 [17] = "Asymmetric EQs support", 142 [17] = "Asymmetric EQs support",
147 [18] = "More than 80 VFs support", 143 [18] = "More than 80 VFs support",
148 [19] = "Performance optimized for limited rule configuration flow steering support" 144 [19] = "Performance optimized for limited rule configuration flow steering support",
145 [20] = "Recoverable error events support"
149 }; 146 };
150 int i; 147 int i;
151 148
@@ -259,6 +256,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
259#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 256#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
260#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 257#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
261#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 258#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
259#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
262 260
263#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 261#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
264#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 262#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
@@ -273,6 +271,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
273#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 271#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
274#define QUERY_FUNC_CAP_FLAG_ETH 0x80 272#define QUERY_FUNC_CAP_FLAG_ETH 0x80
275#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 273#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
274#define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
276#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 275#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
277 276
278#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) 277#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
@@ -344,9 +343,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
344 } else if (vhcr->op_modifier == 0) { 343 } else if (vhcr->op_modifier == 0) {
345 struct mlx4_active_ports actv_ports = 344 struct mlx4_active_ports actv_ports =
346 mlx4_get_active_ports(dev, slave); 345 mlx4_get_active_ports(dev, slave);
347 /* enable rdma and ethernet interfaces, and new quota locations */ 346 /* enable rdma and ethernet interfaces, new quota locations,
347 * and reserved lkey
348 */
348 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 349 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
349 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX); 350 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
351 QUERY_FUNC_CAP_FLAG_RESD_LKEY);
350 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 352 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
351 353
352 field = min( 354 field = min(
@@ -411,6 +413,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
411 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | 413 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
412 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; 414 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
413 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 415 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
416
417 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
418 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
414 } else 419 } else
415 err = -EINVAL; 420 err = -EINVAL;
416 421
@@ -503,6 +508,13 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
503 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 508 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
504 func_cap->reserved_eq = size & 0xFFFFFF; 509 func_cap->reserved_eq = size & 0xFFFFFF;
505 510
511 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
512 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
513 func_cap->reserved_lkey = size;
514 } else {
515 func_cap->reserved_lkey = 0;
516 }
517
506 func_cap->extra_flags = 0; 518 func_cap->extra_flags = 0;
507 519
508 /* Mailbox data from 0x6c and onward should only be treated if 520 /* Mailbox data from 0x6c and onward should only be treated if
@@ -859,6 +871,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
859 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); 871 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
860 if (field32 & (1 << 0)) 872 if (field32 & (1 << 0))
861 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; 873 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
874 if (field32 & (1 << 7))
875 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
862 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); 876 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
863 if (field & 1<<6) 877 if (field & 1<<6)
864 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; 878 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@ -1562,6 +1576,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1562#define INIT_HCA_VXLAN_OFFSET 0x0c 1576#define INIT_HCA_VXLAN_OFFSET 0x0c
1563#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e 1577#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1564#define INIT_HCA_FLAGS_OFFSET 0x014 1578#define INIT_HCA_FLAGS_OFFSET 0x014
1579#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1565#define INIT_HCA_QPC_OFFSET 0x020 1580#define INIT_HCA_QPC_OFFSET 0x020
1566#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 1581#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1567#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) 1582#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
@@ -1668,6 +1683,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1668 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1683 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1669 } 1684 }
1670 1685
1686 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1687 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1688
1671 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1689 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1672 1690
1673 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 1691 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@@ -1752,8 +1770,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1752 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); 1770 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
1753 } 1771 }
1754 1772
1755 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, 1773 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
1756 MLX4_CMD_NATIVE); 1774 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1757 1775
1758 if (err) 1776 if (err)
1759 mlx4_err(dev, "INIT_HCA returns %d\n", err); 1777 mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -1879,6 +1897,36 @@ out:
1879 return err; 1897 return err;
1880} 1898}
1881 1899
1900static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
1901{
1902 struct mlx4_cmd_mailbox *mailbox;
1903 __be32 *outbox;
1904 int err;
1905
1906 mailbox = mlx4_alloc_cmd_mailbox(dev);
1907 if (IS_ERR(mailbox)) {
1908 mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
1909 return PTR_ERR(mailbox);
1910 }
1911 outbox = mailbox->buf;
1912
1913 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1914 MLX4_CMD_QUERY_HCA,
1915 MLX4_CMD_TIME_CLASS_B,
1916 !mlx4_is_slave(dev));
1917 if (err) {
1918 mlx4_warn(dev, "hca_core_clock update failed\n");
1919 goto out;
1920 }
1921
1922 MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1923
1924out:
1925 mlx4_free_cmd_mailbox(dev, mailbox);
1926
1927 return err;
1928}
1929
1882/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 1930/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1883 * and real QP0 are active, so that the paravirtualized QP0 is ready 1931 * and real QP0 are active, so that the paravirtualized QP0 is ready
1884 * to operate */ 1932 * to operate */
@@ -1983,6 +2031,9 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1983 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2031 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1984 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2032 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1985 2033
2034 if (!err)
2035 mlx4_hca_core_clock_update(dev);
2036
1986 return err; 2037 return err;
1987} 2038}
1988EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); 2039EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
@@ -2007,7 +2058,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2007 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2058 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2008 if (priv->mfunc.master.init_port_ref[port] == 1) { 2059 if (priv->mfunc.master.init_port_ref[port] == 1) {
2009 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2060 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2010 1000, MLX4_CMD_NATIVE); 2061 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2011 if (err) 2062 if (err)
2012 return err; 2063 return err;
2013 } 2064 }
@@ -2018,7 +2069,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2018 if (!priv->mfunc.master.qp0_state[port].qp0_active && 2069 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
2019 priv->mfunc.master.qp0_state[port].port_active) { 2070 priv->mfunc.master.qp0_state[port].port_active) {
2020 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2071 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2021 1000, MLX4_CMD_NATIVE); 2072 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2022 if (err) 2073 if (err)
2023 return err; 2074 return err;
2024 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2075 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
@@ -2033,15 +2084,15 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2033 2084
2034int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) 2085int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
2035{ 2086{
2036 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, 2087 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2037 MLX4_CMD_WRAPPED); 2088 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2038} 2089}
2039EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); 2090EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
2040 2091
2041int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) 2092int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
2042{ 2093{
2043 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, 2094 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
2044 MLX4_CMD_NATIVE); 2095 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
2045} 2096}
2046 2097
2047struct mlx4_config_dev { 2098struct mlx4_config_dev {
@@ -2180,7 +2231,8 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2180int mlx4_NOP(struct mlx4_dev *dev) 2231int mlx4_NOP(struct mlx4_dev *dev)
2181{ 2232{
2182 /* Input modifier of 0x1f means "finish as soon as possible." */ 2233 /* Input modifier of 0x1f means "finish as soon as possible." */
2183 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE); 2234 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
2235 MLX4_CMD_NATIVE);
2184} 2236}
2185 2237
2186int mlx4_get_phys_port_id(struct mlx4_dev *dev) 2238int mlx4_get_phys_port_id(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 62562b60fa87..f44f7f6017ed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -147,6 +147,7 @@ struct mlx4_func_cap {
147 u32 qp0_proxy_qpn; 147 u32 qp0_proxy_qpn;
148 u32 qp1_tunnel_qpn; 148 u32 qp1_tunnel_qpn;
149 u32 qp1_proxy_qpn; 149 u32 qp1_proxy_qpn;
150 u32 reserved_lkey;
150 u8 physical_port; 151 u8 physical_port;
151 u8 port_flags; 152 u8 port_flags;
152 u8 flags1; 153 u8 flags1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index ff2fffeab4c7..cc9f48439244 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -797,6 +797,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
797 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 797 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
798 dev->caps.num_eqs = func_cap.max_eq; 798 dev->caps.num_eqs = func_cap.max_eq;
799 dev->caps.reserved_eqs = func_cap.reserved_eq; 799 dev->caps.reserved_eqs = func_cap.reserved_eq;
800 dev->caps.reserved_lkey = func_cap.reserved_lkey;
800 dev->caps.num_pds = MLX4_NUM_PDS; 801 dev->caps.num_pds = MLX4_NUM_PDS;
801 dev->caps.num_mgms = 0; 802 dev->caps.num_mgms = 0;
802 dev->caps.num_amgms = 0; 803 dev->caps.num_amgms = 0;
@@ -2978,8 +2979,10 @@ err_free_eq:
2978 mlx4_free_eq_table(dev); 2979 mlx4_free_eq_table(dev);
2979 2980
2980err_master_mfunc: 2981err_master_mfunc:
2981 if (mlx4_is_master(dev)) 2982 if (mlx4_is_master(dev)) {
2983 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
2982 mlx4_multi_func_cleanup(dev); 2984 mlx4_multi_func_cleanup(dev);
2985 }
2983 2986
2984 if (mlx4_is_slave(dev)) { 2987 if (mlx4_is_slave(dev)) {
2985 kfree(dev->caps.qp0_qkey); 2988 kfree(dev->caps.qp0_qkey);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 096a81c16a9b..148dc0945aab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -196,6 +196,7 @@ struct mlx4_vhcr {
196struct mlx4_vhcr_cmd { 196struct mlx4_vhcr_cmd {
197 __be64 in_param; 197 __be64 in_param;
198 __be32 in_modifier; 198 __be32 in_modifier;
199 u32 reserved1;
199 __be64 out_param; 200 __be64 out_param;
200 __be16 token; 201 __be16 token;
201 u16 reserved; 202 u16 reserved;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 8dbdf1d29357..d21e884a0838 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -1155,7 +1155,7 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free);
1155 1155
1156int mlx4_SYNC_TPT(struct mlx4_dev *dev) 1156int mlx4_SYNC_TPT(struct mlx4_dev *dev)
1157{ 1157{
1158 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, 1158 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
1159 MLX4_CMD_NATIVE); 1159 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1160} 1160}
1161EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); 1161EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index a42b4c0a9ed9..609c59dc854e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -214,7 +214,6 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
214 list_add(&uar->bf_list, &priv->bf_list); 214 list_add(&uar->bf_list, &priv->bf_list);
215 } 215 }
216 216
217 bf->uar = uar;
218 idx = ffz(uar->free_bf_bmap); 217 idx = ffz(uar->free_bf_bmap);
219 uar->free_bf_bmap |= 1 << idx; 218 uar->free_bf_bmap |= 1 << idx;
220 bf->uar = uar; 219 bf->uar = uar;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 3e93879bccce..79feeb6b0d87 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4677,7 +4677,6 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4677 int state; 4677 int state;
4678 LIST_HEAD(tlist); 4678 LIST_HEAD(tlist);
4679 int eqn; 4679 int eqn;
4680 struct mlx4_cmd_mailbox *mailbox;
4681 4680
4682 err = move_all_busy(dev, slave, RES_EQ); 4681 err = move_all_busy(dev, slave, RES_EQ);
4683 if (err) 4682 if (err)
@@ -4703,20 +4702,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4703 break; 4702 break;
4704 4703
4705 case RES_EQ_HW: 4704 case RES_EQ_HW:
4706 mailbox = mlx4_alloc_cmd_mailbox(dev); 4705 err = mlx4_cmd(dev, slave, eqn & 0xff,
4707 if (IS_ERR(mailbox)) { 4706 1, MLX4_CMD_HW2SW_EQ,
4708 cond_resched(); 4707 MLX4_CMD_TIME_CLASS_A,
4709 continue; 4708 MLX4_CMD_NATIVE);
4710 }
4711 err = mlx4_cmd_box(dev, slave, 0,
4712 eqn & 0xff, 0,
4713 MLX4_CMD_HW2SW_EQ,
4714 MLX4_CMD_TIME_CLASS_A,
4715 MLX4_CMD_NATIVE);
4716 if (err) 4709 if (err)
4717 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", 4710 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4718 slave, eqn); 4711 slave, eqn);
4719 mlx4_free_cmd_mailbox(dev, mailbox);
4720 atomic_dec(&eq->mtt->ref_count); 4712 atomic_dec(&eq->mtt->ref_count);
4721 state = RES_EQ_RESERVED; 4713 state = RES_EQ_RESERVED;
4722 break; 4714 break;
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index c989442ffc6a..ae95adc78509 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -165,9 +165,9 @@ enum {
165}; 165};
166 166
167enum { 167enum {
168 MLX4_CMD_TIME_CLASS_A = 10000, 168 MLX4_CMD_TIME_CLASS_A = 60000,
169 MLX4_CMD_TIME_CLASS_B = 10000, 169 MLX4_CMD_TIME_CLASS_B = 60000,
170 MLX4_CMD_TIME_CLASS_C = 10000, 170 MLX4_CMD_TIME_CLASS_C = 60000,
171}; 171};
172 172
173enum { 173enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 5ef54e145e4d..c95d659a39f2 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -200,7 +200,8 @@ enum {
200 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, 200 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
201 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, 201 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
202 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, 202 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
203 MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 203 MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
204 MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20
204}; 205};
205 206
206enum { 207enum {
@@ -280,6 +281,7 @@ enum mlx4_event {
280 MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, 281 MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
281 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, 282 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
282 MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, 283 MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
284 MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e,
283 MLX4_EVENT_TYPE_NONE = 0xff, 285 MLX4_EVENT_TYPE_NONE = 0xff,
284}; 286};
285 287
@@ -289,6 +291,11 @@ enum {
289}; 291};
290 292
291enum { 293enum {
294 MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1,
295 MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2,
296};
297
298enum {
292 MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, 299 MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
293}; 300};
294 301
@@ -860,6 +867,11 @@ struct mlx4_eqe {
860 } __packed tbl_change_info; 867 } __packed tbl_change_info;
861 } params; 868 } params;
862 } __packed port_mgmt_change; 869 } __packed port_mgmt_change;
870 struct {
871 u8 reserved[3];
872 u8 port;
873 u32 reserved1[5];
874 } __packed bad_cable;
863 } event; 875 } event;
864 u8 slave_id; 876 u8 slave_id;
865 u8 reserved3[2]; 877 u8 reserved3[2];