diff options
author | David S. Miller <davem@davemloft.net> | 2017-03-22 15:11:14 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-03-22 15:11:14 -0400 |
commit | efad54a115c22c5eee5ac89a027d07e06fafb706 (patch) | |
tree | 71cf501e46766fe082840d76c1e21993f0accb41 | |
parent | bf601fe52e8392082e1c0587ab5e34df97675a97 (diff) | |
parent | 8ab7e2ae15d84ba758b2c8c6f4075722e9bd2a08 (diff) |
Merge branch 'mlx5-fixes'
Saeed Mahameed says:
====================
Mellanox mlx5 fixes 2017-03-21
This series contains some mlx5 core and ethernet driver fixes.
For -stable:
net/mlx5e: Count LRO packets correctly (for kernel >= 4.2)
net/mlx5e: Count GSO packets correctly (for kernel >= 4.2)
net/mlx5: Increase number of max QPs in default profile (for kernel >= 4.0)
net/mlx5e: Avoid supporting udp tunnel port ndo for VF reps (for kernel >= 4.10)
net/mlx5e: Use the proper UAPI values when offloading TC vlan actions (for kernel >= v4.9)
net/mlx5: E-Switch, Don't allow changing inline mode when flows are configured (for kernel >= 4.10)
net/mlx5e: Change the TC offload rule add/del code path to be per NIC or E-Switch (for kernel >= 4.10)
net/mlx5: Add missing entries for set/query rate limit commands (for kernel >= 4.8)
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 74 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 22 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 |
10 files changed, 94 insertions, 37 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index caa837e5e2b9..a380353a78c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, | |||
361 | case MLX5_CMD_OP_QUERY_VPORT_COUNTER: | 361 | case MLX5_CMD_OP_QUERY_VPORT_COUNTER: |
362 | case MLX5_CMD_OP_ALLOC_Q_COUNTER: | 362 | case MLX5_CMD_OP_ALLOC_Q_COUNTER: |
363 | case MLX5_CMD_OP_QUERY_Q_COUNTER: | 363 | case MLX5_CMD_OP_QUERY_Q_COUNTER: |
364 | case MLX5_CMD_OP_SET_RATE_LIMIT: | ||
365 | case MLX5_CMD_OP_QUERY_RATE_LIMIT: | ||
364 | case MLX5_CMD_OP_ALLOC_PD: | 366 | case MLX5_CMD_OP_ALLOC_PD: |
365 | case MLX5_CMD_OP_ALLOC_UAR: | 367 | case MLX5_CMD_OP_ALLOC_UAR: |
366 | case MLX5_CMD_OP_CONFIG_INT_MODERATION: | 368 | case MLX5_CMD_OP_CONFIG_INT_MODERATION: |
@@ -497,6 +499,8 @@ const char *mlx5_command_str(int command) | |||
497 | MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); | 499 | MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); |
498 | MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); | 500 | MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); |
499 | MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); | 501 | MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); |
502 | MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); | ||
503 | MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); | ||
500 | MLX5_COMMAND_STR_CASE(ALLOC_PD); | 504 | MLX5_COMMAND_STR_CASE(ALLOC_PD); |
501 | MLX5_COMMAND_STR_CASE(DEALLOC_PD); | 505 | MLX5_COMMAND_STR_CASE(DEALLOC_PD); |
502 | MLX5_COMMAND_STR_CASE(ALLOC_UAR); | 506 | MLX5_COMMAND_STR_CASE(ALLOC_UAR); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f6a6ded204f6..dc52053128bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); | |||
928 | int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); | 928 | int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); |
929 | void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); | 929 | void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); |
930 | u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); | 930 | u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); |
931 | void mlx5e_add_vxlan_port(struct net_device *netdev, | ||
932 | struct udp_tunnel_info *ti); | ||
933 | void mlx5e_del_vxlan_port(struct net_device *netdev, | ||
934 | struct udp_tunnel_info *ti); | ||
935 | 931 | ||
936 | int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, | 932 | int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, |
937 | void *sp); | 933 | void *sp); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8ef64c4db2c2..66c133757a5e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev, | |||
3100 | vf_stats); | 3100 | vf_stats); |
3101 | } | 3101 | } |
3102 | 3102 | ||
3103 | void mlx5e_add_vxlan_port(struct net_device *netdev, | 3103 | static void mlx5e_add_vxlan_port(struct net_device *netdev, |
3104 | struct udp_tunnel_info *ti) | 3104 | struct udp_tunnel_info *ti) |
3105 | { | 3105 | { |
3106 | struct mlx5e_priv *priv = netdev_priv(netdev); | 3106 | struct mlx5e_priv *priv = netdev_priv(netdev); |
3107 | 3107 | ||
@@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev, | |||
3114 | mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); | 3114 | mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); |
3115 | } | 3115 | } |
3116 | 3116 | ||
3117 | void mlx5e_del_vxlan_port(struct net_device *netdev, | 3117 | static void mlx5e_del_vxlan_port(struct net_device *netdev, |
3118 | struct udp_tunnel_info *ti) | 3118 | struct udp_tunnel_info *ti) |
3119 | { | 3119 | { |
3120 | struct mlx5e_priv *priv = netdev_priv(netdev); | 3120 | struct mlx5e_priv *priv = netdev_priv(netdev); |
3121 | 3121 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2c864574a9d5..f621373bd7a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { | |||
393 | .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, | 393 | .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, |
394 | .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, | 394 | .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, |
395 | .ndo_get_stats64 = mlx5e_rep_get_stats, | 395 | .ndo_get_stats64 = mlx5e_rep_get_stats, |
396 | .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, | ||
397 | .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, | ||
398 | .ndo_has_offload_stats = mlx5e_has_offload_stats, | 396 | .ndo_has_offload_stats = mlx5e_has_offload_stats, |
399 | .ndo_get_offload_stats = mlx5e_get_offload_stats, | 397 | .ndo_get_offload_stats = mlx5e_get_offload_stats, |
400 | }; | 398 | }; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3d371688fbbb..bafcb349a50c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, | |||
601 | if (lro_num_seg > 1) { | 601 | if (lro_num_seg > 1) { |
602 | mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); | 602 | mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); |
603 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); | 603 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); |
604 | /* Subtract one since we already counted this as one | ||
605 | * "regular" packet in mlx5e_complete_rx_cqe() | ||
606 | */ | ||
607 | rq->stats.packets += lro_num_seg - 1; | ||
604 | rq->stats.lro_packets++; | 608 | rq->stats.lro_packets++; |
605 | rq->stats.lro_bytes += cqe_bcnt; | 609 | rq->stats.lro_bytes += cqe_bcnt; |
606 | } | 610 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 79481f4cf264..fade7233dac5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -133,6 +133,23 @@ err_create_ft: | |||
133 | return rule; | 133 | return rule; |
134 | } | 134 | } |
135 | 135 | ||
136 | static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, | ||
137 | struct mlx5e_tc_flow *flow) | ||
138 | { | ||
139 | struct mlx5_fc *counter = NULL; | ||
140 | |||
141 | if (!IS_ERR(flow->rule)) { | ||
142 | counter = mlx5_flow_rule_counter(flow->rule); | ||
143 | mlx5_del_flow_rules(flow->rule); | ||
144 | mlx5_fc_destroy(priv->mdev, counter); | ||
145 | } | ||
146 | |||
147 | if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { | ||
148 | mlx5_destroy_flow_table(priv->fs.tc.t); | ||
149 | priv->fs.tc.t = NULL; | ||
150 | } | ||
151 | } | ||
152 | |||
136 | static struct mlx5_flow_handle * | 153 | static struct mlx5_flow_handle * |
137 | mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, | 154 | mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, |
138 | struct mlx5_flow_spec *spec, | 155 | struct mlx5_flow_spec *spec, |
@@ -149,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, | |||
149 | } | 166 | } |
150 | 167 | ||
151 | static void mlx5e_detach_encap(struct mlx5e_priv *priv, | 168 | static void mlx5e_detach_encap(struct mlx5e_priv *priv, |
152 | struct mlx5e_tc_flow *flow) { | 169 | struct mlx5e_tc_flow *flow); |
170 | |||
171 | static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, | ||
172 | struct mlx5e_tc_flow *flow) | ||
173 | { | ||
174 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | ||
175 | |||
176 | mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr); | ||
177 | |||
178 | mlx5_eswitch_del_vlan_action(esw, flow->attr); | ||
179 | |||
180 | if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) | ||
181 | mlx5e_detach_encap(priv, flow); | ||
182 | } | ||
183 | |||
184 | static void mlx5e_detach_encap(struct mlx5e_priv *priv, | ||
185 | struct mlx5e_tc_flow *flow) | ||
186 | { | ||
153 | struct list_head *next = flow->encap.next; | 187 | struct list_head *next = flow->encap.next; |
154 | 188 | ||
155 | list_del(&flow->encap); | 189 | list_del(&flow->encap); |
@@ -173,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, | |||
173 | static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, | 207 | static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, |
174 | struct mlx5e_tc_flow *flow) | 208 | struct mlx5e_tc_flow *flow) |
175 | { | 209 | { |
176 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | 210 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) |
177 | struct mlx5_fc *counter = NULL; | 211 | mlx5e_tc_del_fdb_flow(priv, flow); |
178 | 212 | else | |
179 | if (!IS_ERR(flow->rule)) { | 213 | mlx5e_tc_del_nic_flow(priv, flow); |
180 | counter = mlx5_flow_rule_counter(flow->rule); | ||
181 | mlx5_del_flow_rules(flow->rule); | ||
182 | mlx5_fc_destroy(priv->mdev, counter); | ||
183 | } | ||
184 | |||
185 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { | ||
186 | mlx5_eswitch_del_vlan_action(esw, flow->attr); | ||
187 | if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) | ||
188 | mlx5e_detach_encap(priv, flow); | ||
189 | } | ||
190 | |||
191 | if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { | ||
192 | mlx5_destroy_flow_table(priv->fs.tc.t); | ||
193 | priv->fs.tc.t = NULL; | ||
194 | } | ||
195 | } | 214 | } |
196 | 215 | ||
197 | static void parse_vxlan_attr(struct mlx5_flow_spec *spec, | 216 | static void parse_vxlan_attr(struct mlx5_flow_spec *spec, |
@@ -248,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, | |||
248 | skb_flow_dissector_target(f->dissector, | 267 | skb_flow_dissector_target(f->dissector, |
249 | FLOW_DISSECTOR_KEY_ENC_PORTS, | 268 | FLOW_DISSECTOR_KEY_ENC_PORTS, |
250 | f->mask); | 269 | f->mask); |
270 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | ||
271 | struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); | ||
272 | struct mlx5e_priv *up_priv = netdev_priv(up_dev); | ||
251 | 273 | ||
252 | /* Full udp dst port must be given */ | 274 | /* Full udp dst port must be given */ |
253 | if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) | 275 | if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) |
254 | goto vxlan_match_offload_err; | 276 | goto vxlan_match_offload_err; |
255 | 277 | ||
256 | if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && | 278 | if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) && |
257 | MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) | 279 | MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) |
258 | parse_vxlan_attr(spec, f); | 280 | parse_vxlan_attr(spec, f); |
259 | else { | 281 | else { |
@@ -976,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, | |||
976 | struct mlx5_esw_flow_attr *attr) | 998 | struct mlx5_esw_flow_attr *attr) |
977 | { | 999 | { |
978 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | 1000 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
1001 | struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); | ||
1002 | struct mlx5e_priv *up_priv = netdev_priv(up_dev); | ||
979 | unsigned short family = ip_tunnel_info_af(tun_info); | 1003 | unsigned short family = ip_tunnel_info_af(tun_info); |
980 | struct ip_tunnel_key *key = &tun_info->key; | 1004 | struct ip_tunnel_key *key = &tun_info->key; |
981 | struct mlx5_encap_entry *e; | 1005 | struct mlx5_encap_entry *e; |
@@ -996,7 +1020,7 @@ vxlan_encap_offload_err: | |||
996 | return -EOPNOTSUPP; | 1020 | return -EOPNOTSUPP; |
997 | } | 1021 | } |
998 | 1022 | ||
999 | if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && | 1023 | if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) && |
1000 | MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { | 1024 | MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { |
1001 | tunnel_type = MLX5_HEADER_TYPE_VXLAN; | 1025 | tunnel_type = MLX5_HEADER_TYPE_VXLAN; |
1002 | } else { | 1026 | } else { |
@@ -1112,14 +1136,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
1112 | } | 1136 | } |
1113 | 1137 | ||
1114 | if (is_tcf_vlan(a)) { | 1138 | if (is_tcf_vlan(a)) { |
1115 | if (tcf_vlan_action(a) == VLAN_F_POP) { | 1139 | if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { |
1116 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; | 1140 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; |
1117 | } else if (tcf_vlan_action(a) == VLAN_F_PUSH) { | 1141 | } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { |
1118 | if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) | 1142 | if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) |
1119 | return -EOPNOTSUPP; | 1143 | return -EOPNOTSUPP; |
1120 | 1144 | ||
1121 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; | 1145 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; |
1122 | attr->vlan = tcf_vlan_push_vid(a); | 1146 | attr->vlan = tcf_vlan_push_vid(a); |
1147 | } else { /* action is TCA_VLAN_ACT_MODIFY */ | ||
1148 | return -EOPNOTSUPP; | ||
1123 | } | 1149 | } |
1124 | continue; | 1150 | continue; |
1125 | } | 1151 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f193128bac4b..57f5e2d7ebd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
@@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) | |||
274 | sq->stats.tso_bytes += skb->len - ihs; | 274 | sq->stats.tso_bytes += skb->len - ihs; |
275 | } | 275 | } |
276 | 276 | ||
277 | sq->stats.packets += skb_shinfo(skb)->gso_segs; | ||
277 | num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; | 278 | num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; |
278 | } else { | 279 | } else { |
279 | bf = sq->bf_budget && | 280 | bf = sq->bf_budget && |
280 | !skb->xmit_more && | 281 | !skb->xmit_more && |
281 | !skb_shinfo(skb)->nr_frags; | 282 | !skb_shinfo(skb)->nr_frags; |
282 | ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); | 283 | ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); |
284 | sq->stats.packets++; | ||
283 | num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); | 285 | num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); |
284 | } | 286 | } |
285 | 287 | ||
288 | sq->stats.bytes += num_bytes; | ||
286 | wi->num_bytes = num_bytes; | 289 | wi->num_bytes = num_bytes; |
287 | 290 | ||
288 | ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; | 291 | ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; |
@@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) | |||
381 | if (bf) | 384 | if (bf) |
382 | sq->bf_budget--; | 385 | sq->bf_budget--; |
383 | 386 | ||
384 | sq->stats.packets++; | ||
385 | sq->stats.bytes += num_bytes; | ||
386 | return NETDEV_TX_OK; | 387 | return NETDEV_TX_OK; |
387 | 388 | ||
388 | dma_unmap_wqe_err: | 389 | dma_unmap_wqe_err: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 5b78883d5654..ad329b1680b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | |||
@@ -209,6 +209,7 @@ struct mlx5_esw_offload { | |||
209 | struct mlx5_eswitch_rep *vport_reps; | 209 | struct mlx5_eswitch_rep *vport_reps; |
210 | DECLARE_HASHTABLE(encap_tbl, 8); | 210 | DECLARE_HASHTABLE(encap_tbl, 8); |
211 | u8 inline_mode; | 211 | u8 inline_mode; |
212 | u64 num_flows; | ||
212 | }; | 213 | }; |
213 | 214 | ||
214 | struct mlx5_eswitch { | 215 | struct mlx5_eswitch { |
@@ -271,6 +272,11 @@ struct mlx5_flow_handle * | |||
271 | mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, | 272 | mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, |
272 | struct mlx5_flow_spec *spec, | 273 | struct mlx5_flow_spec *spec, |
273 | struct mlx5_esw_flow_attr *attr); | 274 | struct mlx5_esw_flow_attr *attr); |
275 | void | ||
276 | mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, | ||
277 | struct mlx5_flow_handle *rule, | ||
278 | struct mlx5_esw_flow_attr *attr); | ||
279 | |||
274 | struct mlx5_flow_handle * | 280 | struct mlx5_flow_handle * |
275 | mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); | 281 | mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); |
276 | 282 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 4f5b0d47d5f3..307ec6c5fd3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -93,10 +93,27 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, | |||
93 | spec, &flow_act, dest, i); | 93 | spec, &flow_act, dest, i); |
94 | if (IS_ERR(rule)) | 94 | if (IS_ERR(rule)) |
95 | mlx5_fc_destroy(esw->dev, counter); | 95 | mlx5_fc_destroy(esw->dev, counter); |
96 | else | ||
97 | esw->offloads.num_flows++; | ||
96 | 98 | ||
97 | return rule; | 99 | return rule; |
98 | } | 100 | } |
99 | 101 | ||
102 | void | ||
103 | mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, | ||
104 | struct mlx5_flow_handle *rule, | ||
105 | struct mlx5_esw_flow_attr *attr) | ||
106 | { | ||
107 | struct mlx5_fc *counter = NULL; | ||
108 | |||
109 | if (!IS_ERR(rule)) { | ||
110 | counter = mlx5_flow_rule_counter(rule); | ||
111 | mlx5_del_flow_rules(rule); | ||
112 | mlx5_fc_destroy(esw->dev, counter); | ||
113 | esw->offloads.num_flows--; | ||
114 | } | ||
115 | } | ||
116 | |||
100 | static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) | 117 | static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) |
101 | { | 118 | { |
102 | struct mlx5_eswitch_rep *rep; | 119 | struct mlx5_eswitch_rep *rep; |
@@ -908,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) | |||
908 | MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) | 925 | MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) |
909 | return -EOPNOTSUPP; | 926 | return -EOPNOTSUPP; |
910 | 927 | ||
928 | if (esw->offloads.num_flows > 0) { | ||
929 | esw_warn(dev, "Can't set inline mode when flows are configured\n"); | ||
930 | return -EOPNOTSUPP; | ||
931 | } | ||
932 | |||
911 | err = esw_inline_mode_from_devlink(mode, &mlx5_mode); | 933 | err = esw_inline_mode_from_devlink(mode, &mlx5_mode); |
912 | if (err) | 934 | if (err) |
913 | goto out; | 935 | goto out; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index e2bd600d19de..60154a175bd3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = { | |||
87 | [2] = { | 87 | [2] = { |
88 | .mask = MLX5_PROF_MASK_QP_SIZE | | 88 | .mask = MLX5_PROF_MASK_QP_SIZE | |
89 | MLX5_PROF_MASK_MR_CACHE, | 89 | MLX5_PROF_MASK_MR_CACHE, |
90 | .log_max_qp = 17, | 90 | .log_max_qp = 18, |
91 | .mr_cache[0] = { | 91 | .mr_cache[0] = { |
92 | .size = 500, | 92 | .size = 500, |
93 | .limit = 250 | 93 | .limit = 250 |