aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-07-25 20:53:41 -0400
committerDavid S. Miller <davem@davemloft.net>2016-07-25 20:53:41 -0400
commit779d1436fa9c5c0cbfb05cb949e362982588beaf (patch)
treee1c5dede24617ba0df1ec6531f5f9124f1b4c688
parent5fc382d87517707ad77ea4c9c12e2a3fde2c838a (diff)
parentcff92d7c7ebd7ceddd4def6b39e0302585b1eb14 (diff)
Merge branch 'mlx5-minimum-inline-header-mode'
Saeed Mahameed says: ==================== Mellanox 100G mlx5 minimum inline header mode This small series from Hadar adds the support for minimum inline header mode query in mlx5e NIC driver. Today on TX the driver copies to the HW descriptor only up to L2 header which is the default required mode and sufficient for today's needs. The header in the HW descriptor is used for HW loopback steering decision, without it packets will go directly to the wire with no questions asked. For TX loopback steering according to L2/L3/L4 headers, ConnectX-4 requires to copy the corresponding headers into the send queue(SQ) WQE HW descriptor so it can decide whether to loop it back or to forward to wire. For legacy E-Switch mode only L2 headers copy is required. For advanced steering (E-Switch offloads) more header layers may be required to be copied, the required mode will be advertised by FW to each VF and PF according to the corresponding E-Switch configuration. Changes V2: - Allocate query_nic_vport_context_out on the stack ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c12
-rw-r--r--include/linux/mlx5/device.h7
-rw-r--r--include/linux/mlx5/mlx5_ifc.h10
-rw-r--r--include/linux/mlx5/vport.h2
7 files changed, 105 insertions, 7 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 4cbd452fec25..1b495efa7490 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -129,6 +129,12 @@ static inline int mlx5_max_log_rq_size(int wq_type)
129 } 129 }
130} 130}
131 131
132enum {
133 MLX5E_INLINE_MODE_L2,
134 MLX5E_INLINE_MODE_VPORT_CONTEXT,
135 MLX5_INLINE_MODE_NOT_REQUIRED,
136};
137
132struct mlx5e_tx_wqe { 138struct mlx5e_tx_wqe {
133 struct mlx5_wqe_ctrl_seg ctrl; 139 struct mlx5_wqe_ctrl_seg ctrl;
134 struct mlx5_wqe_eth_seg eth; 140 struct mlx5_wqe_eth_seg eth;
@@ -188,6 +194,7 @@ struct mlx5e_params {
188 bool lro_en; 194 bool lro_en;
189 u32 lro_wqe_sz; 195 u32 lro_wqe_sz;
190 u16 tx_max_inline; 196 u16 tx_max_inline;
197 u8 tx_min_inline_mode;
191 u8 rss_hfunc; 198 u8 rss_hfunc;
192 u8 toeplitz_hash_key[40]; 199 u8 toeplitz_hash_key[40];
193 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; 200 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
@@ -398,6 +405,7 @@ struct mlx5e_sq {
398 u32 sqn; 405 u32 sqn;
399 u16 bf_buf_size; 406 u16 bf_buf_size;
400 u16 max_inline; 407 u16 max_inline;
408 u8 min_inline_mode;
401 u16 edge; 409 u16 edge;
402 struct device *pdev; 410 struct device *pdev;
403 struct mlx5e_tstamp *tstamp; 411 struct mlx5e_tstamp *tstamp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bdcb699fe63e..870bea37c57c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -56,6 +56,7 @@ struct mlx5e_sq_param {
56 u32 sqc[MLX5_ST_SZ_DW(sqc)]; 56 u32 sqc[MLX5_ST_SZ_DW(sqc)];
57 struct mlx5_wq_param wq; 57 struct mlx5_wq_param wq;
58 u16 max_inline; 58 u16 max_inline;
59 u8 min_inline_mode;
59 bool icosq; 60 bool icosq;
60}; 61};
61 62
@@ -649,6 +650,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
649 } 650 }
650 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; 651 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
651 sq->max_inline = param->max_inline; 652 sq->max_inline = param->max_inline;
653 sq->min_inline_mode =
654 MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ?
655 param->min_inline_mode : 0;
652 656
653 err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); 657 err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
654 if (err) 658 if (err)
@@ -731,6 +735,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
731 735
732 MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]); 736 MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
733 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); 737 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
738 MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
734 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 739 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
735 MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1); 740 MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
736 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 741 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
@@ -1343,6 +1348,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1343 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); 1348 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1344 1349
1345 param->max_inline = priv->params.tx_max_inline; 1350 param->max_inline = priv->params.tx_max_inline;
1351 param->min_inline_mode = priv->params.tx_min_inline_mode;
1346} 1352}
1347 1353
1348static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, 1354static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -2978,6 +2984,23 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
2978 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; 2984 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
2979} 2985}
2980 2986
2987static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
2988 u8 *min_inline_mode)
2989{
2990 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
2991 case MLX5E_INLINE_MODE_L2:
2992 *min_inline_mode = MLX5_INLINE_MODE_L2;
2993 break;
2994 case MLX5E_INLINE_MODE_VPORT_CONTEXT:
2995 mlx5_query_nic_vport_min_inline(mdev,
2996 min_inline_mode);
2997 break;
2998 case MLX5_INLINE_MODE_NOT_REQUIRED:
2999 *min_inline_mode = MLX5_INLINE_MODE_NONE;
3000 break;
3001 }
3002}
3003
2981static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, 3004static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
2982 struct net_device *netdev, 3005 struct net_device *netdev,
2983 const struct mlx5e_profile *profile, 3006 const struct mlx5e_profile *profile,
@@ -3043,6 +3066,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
3043 priv->params.tx_cq_moderation.pkts = 3066 priv->params.tx_cq_moderation.pkts =
3044 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 3067 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
3045 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); 3068 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
3069 mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
3046 priv->params.num_tc = 1; 3070 priv->params.num_tc = 1;
3047 priv->params.rss_hfunc = ETH_RSS_HASH_XOR; 3071 priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
3048 3072
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 5740b465ef84..e073bf59890d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -128,6 +128,50 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
128 return priv->channeltc_to_txq_map[channel_ix][up]; 128 return priv->channeltc_to_txq_map[channel_ix][up];
129} 129}
130 130
131static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
132{
133#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
134
135 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
136}
137
138static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
139{
140 struct flow_keys keys;
141
142 if (skb_transport_header_was_set(skb))
143 return skb_transport_offset(skb);
144 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
145 return keys.control.thoff;
146 else
147 return mlx5e_skb_l2_header_offset(skb);
148}
149
150static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
151 struct sk_buff *skb)
152{
153 int hlen;
154
155 switch (mode) {
156 case MLX5_INLINE_MODE_TCP_UDP:
157 hlen = eth_get_headlen(skb->data, skb_headlen(skb));
158 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
159 hlen += VLAN_HLEN;
160 return hlen;
161 case MLX5_INLINE_MODE_IP:
162 /* When transport header is set to zero, it means no transport
163 * header. When transport header is set to 0xff's, it means
164 * transport header wasn't set.
165 */
166 if (skb_transport_offset(skb))
167 return mlx5e_skb_l3_header_offset(skb);
168 /* fall through */
169 case MLX5_INLINE_MODE_L2:
170 default:
171 return mlx5e_skb_l2_header_offset(skb);
172 }
173}
174
131static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, 175static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
132 struct sk_buff *skb, bool bf) 176 struct sk_buff *skb, bool bf)
133{ 177{
@@ -135,8 +179,6 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
135 * headers and occur before the data gather. 179 * headers and occur before the data gather.
136 * Therefore these headers must be copied into the WQE 180 * Therefore these headers must be copied into the WQE
137 */ 181 */
138#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
139
140 if (bf) { 182 if (bf) {
141 u16 ihs = skb_headlen(skb); 183 u16 ihs = skb_headlen(skb);
142 184
@@ -146,8 +188,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
146 if (ihs <= sq->max_inline) 188 if (ihs <= sq->max_inline)
147 return skb_headlen(skb); 189 return skb_headlen(skb);
148 } 190 }
149 191 return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
150 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
151} 192}
152 193
153static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, 194static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 91846dfcbe9c..21365d06982b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -135,6 +135,18 @@ static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
135 return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); 135 return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
136} 136}
137 137
138void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
139 u8 *min_inline_mode)
140{
141 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
142
143 mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out));
144
145 *min_inline_mode = MLX5_GET(query_nic_vport_context_out, out,
146 nic_vport_context.min_wqe_inline_mode);
147}
148EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
149
138int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, 150int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
139 u16 vport, u8 *addr) 151 u16 vport, u8 *addr)
140{ 152{
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index e0a3ed758287..0b6d15cddb2f 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -129,6 +129,13 @@ __mlx5_mask(typ, fld))
129 tmp; \ 129 tmp; \
130 }) 130 })
131 131
132enum mlx5_inline_modes {
133 MLX5_INLINE_MODE_NONE,
134 MLX5_INLINE_MODE_L2,
135 MLX5_INLINE_MODE_IP,
136 MLX5_INLINE_MODE_TCP_UDP,
137};
138
132enum { 139enum {
133 MLX5_MAX_COMMANDS = 32, 140 MLX5_MAX_COMMANDS = 32,
134 MLX5_CMD_DATA_BLOCK_SIZE = 512, 141 MLX5_CMD_DATA_BLOCK_SIZE = 512,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index d671e4e8e7db..21bc4557b67a 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -536,7 +536,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
536 u8 self_lb_en_modifiable[0x1]; 536 u8 self_lb_en_modifiable[0x1];
537 u8 reserved_at_9[0x2]; 537 u8 reserved_at_9[0x2];
538 u8 max_lso_cap[0x5]; 538 u8 max_lso_cap[0x5];
539 u8 reserved_at_10[0x4]; 539 u8 reserved_at_10[0x2];
540 u8 wqe_inline_mode[0x2];
540 u8 rss_ind_tbl_cap[0x4]; 541 u8 rss_ind_tbl_cap[0x4];
541 u8 reg_umr_sq[0x1]; 542 u8 reg_umr_sq[0x1];
542 u8 scatter_fcs[0x1]; 543 u8 scatter_fcs[0x1];
@@ -2270,7 +2271,8 @@ struct mlx5_ifc_sqc_bits {
2270 u8 cd_master[0x1]; 2271 u8 cd_master[0x1];
2271 u8 fre[0x1]; 2272 u8 fre[0x1];
2272 u8 flush_in_error_en[0x1]; 2273 u8 flush_in_error_en[0x1];
2273 u8 reserved_at_4[0x4]; 2274 u8 reserved_at_4[0x1];
2275 u8 min_wqe_inline_mode[0x3];
2274 u8 state[0x4]; 2276 u8 state[0x4];
2275 u8 reg_umr[0x1]; 2277 u8 reg_umr[0x1];
2276 u8 reserved_at_d[0x13]; 2278 u8 reserved_at_d[0x13];
@@ -2367,7 +2369,9 @@ struct mlx5_ifc_rmpc_bits {
2367}; 2369};
2368 2370
2369struct mlx5_ifc_nic_vport_context_bits { 2371struct mlx5_ifc_nic_vport_context_bits {
2370 u8 reserved_at_0[0x1f]; 2372 u8 reserved_at_0[0x5];
2373 u8 min_wqe_inline_mode[0x3];
2374 u8 reserved_at_8[0x17];
2371 u8 roce_en[0x1]; 2375 u8 roce_en[0x1];
2372 2376
2373 u8 arm_change_event[0x1]; 2377 u8 arm_change_event[0x1];
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 6c16c198f680..e087b7d047ac 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -43,6 +43,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
43 u16 vport, u8 state); 43 u16 vport, u8 state);
44int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, 44int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
45 u16 vport, u8 *addr); 45 u16 vport, u8 *addr);
46void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
47 u8 *min_inline);
46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, 48int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
47 u16 vport, u8 *addr); 49 u16 vport, u8 *addr);
48int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); 50int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);