aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatan Barak <matanb@mellanox.com>2014-12-11 03:58:00 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-11 14:47:36 -0500
commit7d077cd34eabb2ffd05abe0f2cad01da1ef11712 (patch)
tree91215913abb8d16177517d254f677c7119461875
parent431df8c7e9708433459fd806a08308997de43121 (diff)
net/mlx4: Add support for A0 steering
Add the required firmware commands for A0 steering and a way to enable that. The firmware support focuses on INIT_HCA, QUERY_HCA, QUERY_PORT, QUERY_DEV_CAP and QUERY_FUNC_CAP commands. Those commands are used to configure and query the device. The different A0 DMFS (steering) modes are: Static - optimized performance, but flow steering rules are limited. This mode should be choosed explicitly by the user in order to be used. Dynamic - this mode should be explicitly choosed by the user. In this mode, the FW works in optimized steering mode as long as it can and afterwards automatically drops to classic (full) DMFS. Disable - this mode should be explicitly choosed by the user. The user instructs the system not to use optimized steering, even if the FW supports Dynamic A0 DMFS (and thus will be able to use optimized steering in Default A0 DMFS mode). Default - this mode is implicitly choosed. In this mode, if the FW supports Dynamic A0 DMFS, it'll work in this mode. Otherwise, it'll work at Disable A0 DMFS mode. Under SRIOV configuration, when the A0 steering mode is enabled, older guest VF drivers who aren't using the RX QP allocation flag (MLX4_RESERVE_A0_QP) will get a QP from the general range and fail when attempting to register a steering rule. To avoid that, the PF context behaviour is changed once on A0 static mode, to require support for the allocation flag in VF drivers too. In order to enable A0 steering, we use log_num_mgm_entry_size param. If the value of the parameter is not positive, we treat the absolute value of log_num_mgm_entry_size as a bit field. Setting bit 2 of this bit field enables static A0 steering. Signed-off-by: Matan Barak <matanb@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c4
-rw-r--r--include/linux/mlx4/device.h17
7 files changed, 191 insertions, 19 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 568e1f41fdd4..6ff214de1111 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2594,7 +2594,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2594 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 2594 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2595 2595
2596 if (mdev->dev->caps.steering_mode == 2596 if (mdev->dev->caps.steering_mode ==
2597 MLX4_STEERING_MODE_DEVICE_MANAGED) 2597 MLX4_STEERING_MODE_DEVICE_MANAGED &&
2598 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
2598 dev->hw_features |= NETIF_F_NTUPLE; 2599 dev->hw_features |= NETIF_F_NTUPLE;
2599 2600
2600 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 2601 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 073b3d1c8b91..ef3b95bac2ad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -144,7 +144,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
144 [15] = "Ethernet Backplane autoneg support", 144 [15] = "Ethernet Backplane autoneg support",
145 [16] = "CONFIG DEV support", 145 [16] = "CONFIG DEV support",
146 [17] = "Asymmetric EQs support", 146 [17] = "Asymmetric EQs support",
147 [18] = "More than 80 VFs support" 147 [18] = "More than 80 VFs support",
148 [19] = "Performance optimized for limited rule configuration flow steering support"
148 }; 149 };
149 int i; 150 int i;
150 151
@@ -680,6 +681,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
680#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d 681#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
681#define QUERY_DEV_CAP_VXLAN 0x9e 682#define QUERY_DEV_CAP_VXLAN 0x9e
682#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 683#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
684#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
685#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
683 686
684 dev_cap->flags2 = 0; 687 dev_cap->flags2 = 0;
685 mailbox = mlx4_alloc_cmd_mailbox(dev); 688 mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -876,6 +879,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
876 if (field32 & (1 << 0)) 879 if (field32 & (1 << 0))
877 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX; 880 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
878 881
882 MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox,
883 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET);
884 dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK;
885 MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox,
886 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET);
887 dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK;
888
879 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 889 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
880 if (field32 & (1 << 16)) 890 if (field32 & (1 << 16))
881 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; 891 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
@@ -935,6 +945,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
935 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); 945 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
936 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); 946 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
937 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); 947 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
948 mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n",
949 dev_cap->dmfs_high_rate_qpn_base);
950 mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
951 dev_cap->dmfs_high_rate_qpn_range);
938 952
939 dump_dev_cap_flags(dev, dev_cap->flags); 953 dump_dev_cap_flags(dev, dev_cap->flags);
940 dump_dev_cap_flags2(dev, dev_cap->flags2); 954 dump_dev_cap_flags2(dev, dev_cap->flags2);
@@ -996,6 +1010,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c
996 port_cap->supported_port_types = field & 3; 1010 port_cap->supported_port_types = field & 3;
997 port_cap->suggested_type = (field >> 3) & 1; 1011 port_cap->suggested_type = (field >> 3) & 1;
998 port_cap->default_sense = (field >> 4) & 1; 1012 port_cap->default_sense = (field >> 4) & 1;
1013 port_cap->dmfs_optimized_state = (field >> 5) & 1;
999 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); 1014 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
1000 port_cap->ib_mtu = field & 0xf; 1015 port_cap->ib_mtu = field & 0xf;
1001 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); 1016 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
@@ -1530,6 +1545,12 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1530 struct mlx4_cmd_mailbox *mailbox; 1545 struct mlx4_cmd_mailbox *mailbox;
1531 __be32 *inbox; 1546 __be32 *inbox;
1532 int err; 1547 int err;
1548 static const u8 a0_dmfs_hw_steering[] = {
1549 [MLX4_STEERING_DMFS_A0_DEFAULT] = 0,
1550 [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1,
1551 [MLX4_STEERING_DMFS_A0_STATIC] = 2,
1552 [MLX4_STEERING_DMFS_A0_DISABLE] = 3
1553 };
1533 1554
1534#define INIT_HCA_IN_SIZE 0x200 1555#define INIT_HCA_IN_SIZE 0x200
1535#define INIT_HCA_VERSION_OFFSET 0x000 1556#define INIT_HCA_VERSION_OFFSET 0x000
@@ -1563,6 +1584,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1563#define INIT_HCA_FS_PARAM_OFFSET 0x1d0 1584#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1564#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) 1585#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1565#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) 1586#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1587#define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
1566#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) 1588#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1567#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) 1589#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1568#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) 1590#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
@@ -1673,8 +1695,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1673 /* Enable Ethernet flow steering 1695 /* Enable Ethernet flow steering
1674 * with udp unicast and tcp unicast 1696 * with udp unicast and tcp unicast
1675 */ 1697 */
1676 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 1698 if (dev->caps.dmfs_high_steer_mode !=
1677 INIT_HCA_FS_ETH_BITS_OFFSET); 1699 MLX4_STEERING_DMFS_A0_STATIC)
1700 MLX4_PUT(inbox,
1701 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1702 INIT_HCA_FS_ETH_BITS_OFFSET);
1678 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1703 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1679 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); 1704 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1680 /* Enable IPoIB flow steering 1705 /* Enable IPoIB flow steering
@@ -1684,6 +1709,13 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1684 INIT_HCA_FS_IB_BITS_OFFSET); 1709 INIT_HCA_FS_IB_BITS_OFFSET);
1685 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1710 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1686 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); 1711 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1712
1713 if (dev->caps.dmfs_high_steer_mode !=
1714 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1715 MLX4_PUT(inbox,
1716 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode]
1717 << 6)),
1718 INIT_HCA_FS_A0_OFFSET);
1687 } else { 1719 } else {
1688 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 1720 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1689 MLX4_PUT(inbox, param->log_mc_entry_sz, 1721 MLX4_PUT(inbox, param->log_mc_entry_sz,
@@ -1734,6 +1766,12 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1734 u32 dword_field; 1766 u32 dword_field;
1735 int err; 1767 int err;
1736 u8 byte_field; 1768 u8 byte_field;
1769 static const u8 a0_dmfs_query_hw_steering[] = {
1770 [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
1771 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
1772 [2] = MLX4_STEERING_DMFS_A0_STATIC,
1773 [3] = MLX4_STEERING_DMFS_A0_DISABLE
1774 };
1737 1775
1738#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 1776#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1739#define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c 1777#define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
@@ -1786,6 +1824,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1786 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 1824 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1787 MLX4_GET(param->log_mc_table_sz, outbox, 1825 MLX4_GET(param->log_mc_table_sz, outbox,
1788 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 1826 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1827 MLX4_GET(byte_field, outbox,
1828 INIT_HCA_FS_A0_OFFSET);
1829 param->dmfs_high_steer_mode =
1830 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
1789 } else { 1831 } else {
1790 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 1832 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1791 MLX4_GET(param->log_mc_entry_sz, outbox, 1833 MLX4_GET(param->log_mc_entry_sz, outbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 744398b7ab5e..794e2826609a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -60,6 +60,7 @@ struct mlx4_port_cap {
60 int vendor_oui; 60 int vendor_oui;
61 u16 wavelength; 61 u16 wavelength;
62 u64 trans_code; 62 u64 trans_code;
63 u8 dmfs_optimized_state;
63}; 64};
64 65
65struct mlx4_dev_cap { 66struct mlx4_dev_cap {
@@ -124,6 +125,8 @@ struct mlx4_dev_cap {
124 int max_gso_sz; 125 int max_gso_sz;
125 int max_rss_tbl_sz; 126 int max_rss_tbl_sz;
126 u32 max_counters; 127 u32 max_counters;
128 u32 dmfs_high_rate_qpn_base;
129 u32 dmfs_high_rate_qpn_range;
127 struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; 130 struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
128}; 131};
129 132
@@ -194,6 +197,7 @@ struct mlx4_init_hca_param {
194 u8 mw_enabled; /* Enable memory windows */ 197 u8 mw_enabled; /* Enable memory windows */
195 u8 uar_page_sz; /* log pg sz in 4k chunks */ 198 u8 uar_page_sz; /* log pg sz in 4k chunks */
196 u8 steering_mode; /* for QUERY_HCA */ 199 u8 steering_mode; /* for QUERY_HCA */
200 u8 dmfs_high_steer_mode; /* for QUERY_HCA */
197 u64 dev_cap_enabled; 201 u64 dev_cap_enabled;
198 u16 cqe_size; /* For use only when CQE stride feature enabled */ 202 u16 cqe_size; /* For use only when CQE stride feature enabled */
199 u16 eqe_size; /* For use only when EQE stride feature enabled */ 203 u16 eqe_size; /* For use only when EQE stride feature enabled */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 6173b8072988..e25436b24ce7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -105,7 +105,8 @@ MODULE_PARM_DESC(enable_64b_cqe_eqe,
105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
106 106
107#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 107#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
108 MLX4_FUNC_CAP_EQE_CQE_STRIDE) 108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
109 MLX4_FUNC_CAP_DMFS_A0_STATIC)
109 110
110static char mlx4_version[] = 111static char mlx4_version[] =
111 DRV_NAME ": Mellanox ConnectX core driver v" 112 DRV_NAME ": Mellanox ConnectX core driver v"
@@ -463,8 +464,28 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
463 (1 << dev->caps.log_num_vlans) * 464 (1 << dev->caps.log_num_vlans) *
464 dev->caps.num_ports; 465 dev->caps.num_ports;
465 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 466 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
467
468 if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
469 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
470 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
471 else
472 dev->caps.dmfs_high_rate_qpn_base =
473 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
474
475 if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
476 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
477 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
478 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
479 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
480 } else {
481 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
482 dev->caps.dmfs_high_rate_qpn_base =
483 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
484 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
485 }
486
466 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 487 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
467 MLX4_A0_STEERING_TABLE_SIZE; 488 dev->caps.dmfs_high_rate_qpn_range;
468 489
469 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 490 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
470 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 491 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
@@ -753,7 +774,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
753 774
754 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 775 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
755 PF_CONTEXT_BEHAVIOUR_MASK) { 776 PF_CONTEXT_BEHAVIOUR_MASK) {
756 mlx4_err(dev, "Unknown pf context behaviour\n"); 777 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
778 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
757 return -ENOSYS; 779 return -ENOSYS;
758 } 780 }
759 781
@@ -1640,10 +1662,46 @@ static int choose_log_fs_mgm_entry_size(int qp_per_entry)
1640 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 1662 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
1641} 1663}
1642 1664
1665static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
1666{
1667 switch (dmfs_high_steer_mode) {
1668 case MLX4_STEERING_DMFS_A0_DEFAULT:
1669 return "default performance";
1670
1671 case MLX4_STEERING_DMFS_A0_DYNAMIC:
1672 return "dynamic hybrid mode";
1673
1674 case MLX4_STEERING_DMFS_A0_STATIC:
1675 return "performance optimized for limited rule configuration (static)";
1676
1677 case MLX4_STEERING_DMFS_A0_DISABLE:
1678 return "disabled performance optimized steering";
1679
1680 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
1681 return "performance optimized steering not supported";
1682
1683 default:
1684 return "Unrecognized mode";
1685 }
1686}
1687
1688#define MLX4_DMFS_A0_STEERING (1UL << 2)
1689
1643static void choose_steering_mode(struct mlx4_dev *dev, 1690static void choose_steering_mode(struct mlx4_dev *dev,
1644 struct mlx4_dev_cap *dev_cap) 1691 struct mlx4_dev_cap *dev_cap)
1645{ 1692{
1646 if (mlx4_log_num_mgm_entry_size == -1 && 1693 if (mlx4_log_num_mgm_entry_size <= 0) {
1694 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
1695 if (dev->caps.dmfs_high_steer_mode ==
1696 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1697 mlx4_err(dev, "DMFS high rate mode not supported\n");
1698 else
1699 dev->caps.dmfs_high_steer_mode =
1700 MLX4_STEERING_DMFS_A0_STATIC;
1701 }
1702 }
1703
1704 if (mlx4_log_num_mgm_entry_size <= 0 &&
1647 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1705 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1648 (!mlx4_is_mfunc(dev) || 1706 (!mlx4_is_mfunc(dev) ||
1649 (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) && 1707 (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
@@ -1656,6 +1714,9 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1656 dev->caps.fs_log_max_ucast_qp_range_size = 1714 dev->caps.fs_log_max_ucast_qp_range_size =
1657 dev_cap->fs_log_max_ucast_qp_range_size; 1715 dev_cap->fs_log_max_ucast_qp_range_size;
1658 } else { 1716 } else {
1717 if (dev->caps.dmfs_high_steer_mode !=
1718 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1719 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
1659 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 1720 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
1660 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1721 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1661 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 1722 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
@@ -1682,7 +1743,8 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
1682 struct mlx4_dev_cap *dev_cap) 1743 struct mlx4_dev_cap *dev_cap)
1683{ 1744{
1684 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 1745 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
1685 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 1746 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS &&
1747 dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
1686 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 1748 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
1687 else 1749 else
1688 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 1750 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
@@ -1691,6 +1753,35 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
1691 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 1753 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
1692} 1754}
1693 1755
1756static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
1757{
1758 int i;
1759 struct mlx4_port_cap port_cap;
1760
1761 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1762 return -EINVAL;
1763
1764 for (i = 1; i <= dev->caps.num_ports; i++) {
1765 if (mlx4_dev_port(dev, i, &port_cap)) {
1766 mlx4_err(dev,
1767 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
1768 } else if ((dev->caps.dmfs_high_steer_mode !=
1769 MLX4_STEERING_DMFS_A0_DEFAULT) &&
1770 (port_cap.dmfs_optimized_state ==
1771 !!(dev->caps.dmfs_high_steer_mode ==
1772 MLX4_STEERING_DMFS_A0_DISABLE))) {
1773 mlx4_err(dev,
1774 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
1775 dmfs_high_rate_steering_mode_str(
1776 dev->caps.dmfs_high_steer_mode),
1777 (port_cap.dmfs_optimized_state ?
1778 "enabled" : "disabled"));
1779 }
1780 }
1781
1782 return 0;
1783}
1784
1694static int mlx4_init_fw(struct mlx4_dev *dev) 1785static int mlx4_init_fw(struct mlx4_dev *dev)
1695{ 1786{
1696 struct mlx4_mod_stat_cfg mlx4_cfg; 1787 struct mlx4_mod_stat_cfg mlx4_cfg;
@@ -1743,6 +1834,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1743 choose_steering_mode(dev, &dev_cap); 1834 choose_steering_mode(dev, &dev_cap);
1744 choose_tunnel_offload_mode(dev, &dev_cap); 1835 choose_tunnel_offload_mode(dev, &dev_cap);
1745 1836
1837 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
1838 mlx4_is_master(dev))
1839 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
1840
1746 err = mlx4_get_phys_port_id(dev); 1841 err = mlx4_get_phys_port_id(dev);
1747 if (err) 1842 if (err)
1748 mlx4_err(dev, "Fail to get physical port id\n"); 1843 mlx4_err(dev, "Fail to get physical port id\n");
@@ -1829,6 +1924,24 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1829 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 1924 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
1830 } 1925 }
1831 } 1926 }
1927
1928 if (dev->caps.dmfs_high_steer_mode !=
1929 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
1930 if (mlx4_validate_optimized_steering(dev))
1931 mlx4_warn(dev, "Optimized steering validation failed\n");
1932
1933 if (dev->caps.dmfs_high_steer_mode ==
1934 MLX4_STEERING_DMFS_A0_DISABLE) {
1935 dev->caps.dmfs_high_rate_qpn_base =
1936 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1937 dev->caps.dmfs_high_rate_qpn_range =
1938 MLX4_A0_STEERING_TABLE_SIZE;
1939 }
1940
1941 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n",
1942 dmfs_high_rate_steering_mode_str(
1943 dev->caps.dmfs_high_steer_mode));
1944 }
1832 } else { 1945 } else {
1833 err = mlx4_init_slave(dev); 1946 err = mlx4_init_slave(dev);
1834 if (err) { 1947 if (err) {
@@ -3201,10 +3314,11 @@ static int __init mlx4_verify_params(void)
3201 port_type_array[0] = true; 3314 port_type_array[0] = true;
3202 } 3315 }
3203 3316
3204 if (mlx4_log_num_mgm_entry_size != -1 && 3317 if (mlx4_log_num_mgm_entry_size < -7 ||
3205 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 3318 (mlx4_log_num_mgm_entry_size > 0 &&
3206 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { 3319 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
3207 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n", 3320 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
3321 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
3208 mlx4_log_num_mgm_entry_size, 3322 mlx4_log_num_mgm_entry_size,
3209 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 3323 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
3210 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 3324 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cebd1180702b..bdd4eea2247c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -689,8 +689,6 @@ enum mlx4_qp_table_zones {
689 MLX4_QP_TABLE_ZONE_NUM 689 MLX4_QP_TABLE_ZONE_NUM
690}; 690};
691 691
692#define MLX4_A0_STEERING_TABLE_SIZE 256
693
694struct mlx4_qp_table { 692struct mlx4_qp_table {
695 struct mlx4_bitmap *bitmap_gen; 693 struct mlx4_bitmap *bitmap_gen;
696 struct mlx4_zone_allocator *zones; 694 struct mlx4_zone_allocator *zones;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index d8d040c366f4..1586ecce13c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -712,8 +712,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
712 int k; 712 int k;
713 int fixed_reserved_from_bot_rv = 0; 713 int fixed_reserved_from_bot_rv = 0;
714 int bottom_reserved_for_rss_bitmap; 714 int bottom_reserved_for_rss_bitmap;
715 u32 max_table_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 715 u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
716 MLX4_A0_STEERING_TABLE_SIZE; 716 dev->caps.dmfs_high_rate_qpn_range;
717 717
718 spin_lock_init(&qp_table->lock); 718 spin_lock_init(&qp_table->lock);
719 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); 719 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 39890cddc5fa..25c791e295fd 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -117,6 +117,14 @@ enum {
117 MLX4_STEERING_MODE_DEVICE_MANAGED 117 MLX4_STEERING_MODE_DEVICE_MANAGED
118}; 118};
119 119
120enum {
121 MLX4_STEERING_DMFS_A0_DEFAULT,
122 MLX4_STEERING_DMFS_A0_DYNAMIC,
123 MLX4_STEERING_DMFS_A0_STATIC,
124 MLX4_STEERING_DMFS_A0_DISABLE,
125 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
126};
127
120static inline const char *mlx4_steering_mode_str(int steering_mode) 128static inline const char *mlx4_steering_mode_str(int steering_mode)
121{ 129{
122 switch (steering_mode) { 130 switch (steering_mode) {
@@ -191,7 +199,8 @@ enum {
191 MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, 199 MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15,
192 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, 200 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
193 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, 201 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
194 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18 202 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
203 MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19
195}; 204};
196 205
197enum { 206enum {
@@ -225,7 +234,8 @@ enum {
225 234
226enum { 235enum {
227 MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, 236 MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
228 MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 237 MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
238 MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
229}; 239};
230 240
231 241
@@ -482,6 +492,7 @@ struct mlx4_caps {
482 int reserved_mcgs; 492 int reserved_mcgs;
483 int num_qp_per_mgm; 493 int num_qp_per_mgm;
484 int steering_mode; 494 int steering_mode;
495 int dmfs_high_steer_mode;
485 int fs_log_max_ucast_qp_range_size; 496 int fs_log_max_ucast_qp_range_size;
486 int num_pds; 497 int num_pds;
487 int reserved_pds; 498 int reserved_pds;
@@ -522,6 +533,8 @@ struct mlx4_caps {
522 int tunnel_offload_mode; 533 int tunnel_offload_mode;
523 u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; 534 u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
524 u8 alloc_res_qp_mask; 535 u8 alloc_res_qp_mask;
536 u32 dmfs_high_rate_qpn_base;
537 u32 dmfs_high_rate_qpn_range;
525}; 538};
526 539
527struct mlx4_buf_list { 540struct mlx4_buf_list {