aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-01-10 21:34:03 -0500
committerDavid S. Miller <davem@davemloft.net>2017-01-10 21:34:03 -0500
commit6c711c8691bf91d0e830ff4215b08e51c0626769 (patch)
tree4f4a80ffb789cf974cfc3e0457aeee5eac7a2622
parent57ea52a865144aedbcd619ee0081155e658b6f7d (diff)
parent5e44fca5047054f1762813751626b5245e0da022 (diff)
Merge branch 'mlx5-fixes'
Saeed Mahameed says: ==================== Mellanox mlx5 fixes and cleanups 2017-01-10 This series includes some mlx5e general cleanups from Daniel, Gil, Hadar and myself. Also it includes some critical mlx5e TC offloads fixes from Or Gerlitz. For -stable: - net/mlx5e: Remove WARN_ONCE from adaptive moderation code Although this fix doesn't affect any functionality, I thought it is better to clean this -WARN_ONCE- up for -stable in case someone hits such corner case. Please apply and let me know if there's any problem. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c6
4 files changed, 75 insertions, 44 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1236b27b1493..2b7dd315020c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3675,14 +3675,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
3675 3675
3676static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) 3676static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
3677{ 3677{
3678 struct mlx5_core_dev *mdev = priv->mdev;
3679 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3680
3681 mlx5e_vxlan_cleanup(priv); 3678 mlx5e_vxlan_cleanup(priv);
3682 3679
3683 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3684 mlx5_eswitch_unregister_vport_rep(esw, 0);
3685
3686 if (priv->xdp_prog) 3680 if (priv->xdp_prog)
3687 bpf_prog_put(priv->xdp_prog); 3681 bpf_prog_put(priv->xdp_prog);
3688} 3682}
@@ -3807,9 +3801,14 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
3807 3801
3808static void mlx5e_nic_disable(struct mlx5e_priv *priv) 3802static void mlx5e_nic_disable(struct mlx5e_priv *priv)
3809{ 3803{
3804 struct mlx5_core_dev *mdev = priv->mdev;
3805 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3806
3810 queue_work(priv->wq, &priv->set_rx_mode_work); 3807 queue_work(priv->wq, &priv->set_rx_mode_work);
3808 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3809 mlx5_eswitch_unregister_vport_rep(esw, 0);
3811 mlx5e_disable_async_events(priv); 3810 mlx5e_disable_async_events(priv);
3812 mlx5_lag_remove(priv->mdev); 3811 mlx5_lag_remove(mdev);
3813} 3812}
3814 3813
3815static const struct mlx5e_profile mlx5e_nic_profile = { 3814static const struct mlx5e_profile mlx5e_nic_profile = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index 1fffe48a93cc..cbfac06b7ffd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -109,7 +109,6 @@ static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
109 switch (am->tune_state) { 109 switch (am->tune_state) {
110 case MLX5E_AM_PARKING_ON_TOP: 110 case MLX5E_AM_PARKING_ON_TOP:
111 case MLX5E_AM_PARKING_TIRED: 111 case MLX5E_AM_PARKING_TIRED:
112 WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
113 return true; 112 return true;
114 case MLX5E_AM_GOING_RIGHT: 113 case MLX5E_AM_GOING_RIGHT:
115 return (am->steps_left > 1) && (am->steps_right == 1); 114 return (am->steps_left > 1) && (am->steps_right == 1);
@@ -123,7 +122,6 @@ static void mlx5e_am_turn(struct mlx5e_rx_am *am)
123 switch (am->tune_state) { 122 switch (am->tune_state) {
124 case MLX5E_AM_PARKING_ON_TOP: 123 case MLX5E_AM_PARKING_ON_TOP:
125 case MLX5E_AM_PARKING_TIRED: 124 case MLX5E_AM_PARKING_TIRED:
126 WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
127 break; 125 break;
128 case MLX5E_AM_GOING_RIGHT: 126 case MLX5E_AM_GOING_RIGHT:
129 am->tune_state = MLX5E_AM_GOING_LEFT; 127 am->tune_state = MLX5E_AM_GOING_LEFT;
@@ -144,7 +142,6 @@ static int mlx5e_am_step(struct mlx5e_rx_am *am)
144 switch (am->tune_state) { 142 switch (am->tune_state) {
145 case MLX5E_AM_PARKING_ON_TOP: 143 case MLX5E_AM_PARKING_ON_TOP:
146 case MLX5E_AM_PARKING_TIRED: 144 case MLX5E_AM_PARKING_TIRED:
147 WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
148 break; 145 break;
149 case MLX5E_AM_GOING_RIGHT: 146 case MLX5E_AM_GOING_RIGHT:
150 if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1)) 147 if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
@@ -282,10 +279,8 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
282 u32 delta_us = ktime_us_delta(end->time, start->time); 279 u32 delta_us = ktime_us_delta(end->time, start->time);
283 unsigned int npkts = end->pkt_ctr - start->pkt_ctr; 280 unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
284 281
285 if (!delta_us) { 282 if (!delta_us)
286 WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
287 return; 283 return;
288 }
289 284
290 curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; 285 curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
291 curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; 286 curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index f8829b517156..118cea5e5489 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -161,15 +161,21 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
161 } 161 }
162} 162}
163 163
164/* we get here also when setting rule to the FW failed, etc. It means that the
165 * flow rule itself might not exist, but some offloading related to the actions
166 * should be cleaned.
167 */
164static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 168static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
165 struct mlx5e_tc_flow *flow) 169 struct mlx5e_tc_flow *flow)
166{ 170{
167 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 171 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
168 struct mlx5_fc *counter = NULL; 172 struct mlx5_fc *counter = NULL;
169 173
170 counter = mlx5_flow_rule_counter(flow->rule); 174 if (!IS_ERR(flow->rule)) {
171 175 counter = mlx5_flow_rule_counter(flow->rule);
172 mlx5_del_flow_rules(flow->rule); 176 mlx5_del_flow_rules(flow->rule);
177 mlx5_fc_destroy(priv->mdev, counter);
178 }
173 179
174 if (esw && esw->mode == SRIOV_OFFLOADS) { 180 if (esw && esw->mode == SRIOV_OFFLOADS) {
175 mlx5_eswitch_del_vlan_action(esw, flow->attr); 181 mlx5_eswitch_del_vlan_action(esw, flow->attr);
@@ -177,8 +183,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
177 mlx5e_detach_encap(priv, flow); 183 mlx5e_detach_encap(priv, flow);
178 } 184 }
179 185
180 mlx5_fc_destroy(priv->mdev, counter);
181
182 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { 186 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
183 mlx5_destroy_flow_table(priv->fs.tc.t); 187 mlx5_destroy_flow_table(priv->fs.tc.t);
184 priv->fs.tc.t = NULL; 188 priv->fs.tc.t = NULL;
@@ -225,6 +229,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
225 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 229 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
226 outer_headers); 230 outer_headers);
227 231
232 struct flow_dissector_key_control *enc_control =
233 skb_flow_dissector_target(f->dissector,
234 FLOW_DISSECTOR_KEY_ENC_CONTROL,
235 f->key);
236
228 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { 237 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
229 struct flow_dissector_key_ports *key = 238 struct flow_dissector_key_ports *key =
230 skb_flow_dissector_target(f->dissector, 239 skb_flow_dissector_target(f->dissector,
@@ -237,28 +246,34 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
237 246
238 /* Full udp dst port must be given */ 247 /* Full udp dst port must be given */
239 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) 248 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
240 return -EOPNOTSUPP; 249 goto vxlan_match_offload_err;
241
242 /* udp src port isn't supported */
243 if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
244 return -EOPNOTSUPP;
245 250
246 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && 251 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
247 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) 252 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
248 parse_vxlan_attr(spec, f); 253 parse_vxlan_attr(spec, f);
249 else 254 else {
255 netdev_warn(priv->netdev,
256 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
250 return -EOPNOTSUPP; 257 return -EOPNOTSUPP;
258 }
251 259
252 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 260 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
253 udp_dport, ntohs(mask->dst)); 261 udp_dport, ntohs(mask->dst));
254 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 262 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
255 udp_dport, ntohs(key->dst)); 263 udp_dport, ntohs(key->dst));
256 264
265 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
266 udp_sport, ntohs(mask->src));
267 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268 udp_sport, ntohs(key->src));
257 } else { /* udp dst port must be given */ 269 } else { /* udp dst port must be given */
258 return -EOPNOTSUPP; 270vxlan_match_offload_err:
271 netdev_warn(priv->netdev,
272 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
273 return -EOPNOTSUPP;
259 } 274 }
260 275
261 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { 276 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
262 struct flow_dissector_key_ipv4_addrs *key = 277 struct flow_dissector_key_ipv4_addrs *key =
263 skb_flow_dissector_target(f->dissector, 278 skb_flow_dissector_target(f->dissector,
264 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 279 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
@@ -280,10 +295,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
280 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 295 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
281 dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 296 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
282 ntohl(key->dst)); 297 ntohl(key->dst));
283 }
284 298
285 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); 299 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
286 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); 300 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
301 }
287 302
288 /* Enforce DMAC when offloading incoming tunneled flows. 303 /* Enforce DMAC when offloading incoming tunneled flows.
289 * Flow counters require a match on the DMAC. 304 * Flow counters require a match on the DMAC.
@@ -346,6 +361,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
346 if (parse_tunnel_attr(priv, spec, f)) 361 if (parse_tunnel_attr(priv, spec, f))
347 return -EOPNOTSUPP; 362 return -EOPNOTSUPP;
348 break; 363 break;
364 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
365 netdev_warn(priv->netdev,
366 "IPv6 tunnel decap offload isn't supported\n");
349 default: 367 default:
350 return -EOPNOTSUPP; 368 return -EOPNOTSUPP;
351 } 369 }
@@ -375,6 +393,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
375 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 393 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
376 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 394 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
377 key->flags & FLOW_DIS_IS_FRAGMENT); 395 key->flags & FLOW_DIS_IS_FRAGMENT);
396
397 /* the HW doesn't need L3 inline to match on frag=no */
398 if (key->flags & FLOW_DIS_IS_FRAGMENT)
399 *min_inline = MLX5_INLINE_MODE_IP;
378 } 400 }
379 } 401 }
380 402
@@ -647,17 +669,14 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
647 669
648#if IS_ENABLED(CONFIG_INET) 670#if IS_ENABLED(CONFIG_INET)
649 rt = ip_route_output_key(dev_net(mirred_dev), fl4); 671 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
650 if (IS_ERR(rt)) { 672 if (IS_ERR(rt))
651 pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr); 673 return PTR_ERR(rt);
652 return -EOPNOTSUPP;
653 }
654#else 674#else
655 return -EOPNOTSUPP; 675 return -EOPNOTSUPP;
656#endif 676#endif
657 677
658 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { 678 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
659 pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n", 679 pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
660 __func__);
661 ip_rt_put(rt); 680 ip_rt_put(rt);
662 return -EOPNOTSUPP; 681 return -EOPNOTSUPP;
663 } 682 }
@@ -718,12 +737,12 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
718 struct net_device **out_dev) 737 struct net_device **out_dev)
719{ 738{
720 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 739 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
740 struct neighbour *n = NULL;
721 struct flowi4 fl4 = {}; 741 struct flowi4 fl4 = {};
722 struct neighbour *n;
723 char *encap_header; 742 char *encap_header;
724 int encap_size; 743 int encap_size;
725 __be32 saddr; 744 __be32 saddr = 0;
726 int ttl; 745 int ttl = 0;
727 int err; 746 int err;
728 747
729 encap_header = kzalloc(max_encap_size, GFP_KERNEL); 748 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
@@ -750,7 +769,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
750 e->out_dev = *out_dev; 769 e->out_dev = *out_dev;
751 770
752 if (!(n->nud_state & NUD_VALID)) { 771 if (!(n->nud_state & NUD_VALID)) {
753 err = -ENOTSUPP; 772 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
773 err = -EOPNOTSUPP;
754 goto out; 774 goto out;
755 } 775 }
756 776
@@ -772,6 +792,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
772 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 792 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
773 encap_size, encap_header, &e->encap_id); 793 encap_size, encap_header, &e->encap_id);
774out: 794out:
795 if (err && n)
796 neigh_release(n);
775 kfree(encap_header); 797 kfree(encap_header);
776 return err; 798 return err;
777} 799}
@@ -792,9 +814,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
792 int tunnel_type; 814 int tunnel_type;
793 int err; 815 int err;
794 816
795 /* udp dst port must be given */ 817 /* udp dst port must be set */
796 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst))) 818 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
819 goto vxlan_encap_offload_err;
820
821 /* setting udp src port isn't supported */
822 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
823vxlan_encap_offload_err:
824 netdev_warn(priv->netdev,
825 "must set udp dst port and not set udp src port\n");
797 return -EOPNOTSUPP; 826 return -EOPNOTSUPP;
827 }
798 828
799 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && 829 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
800 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { 830 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
@@ -802,6 +832,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
802 info.tun_id = tunnel_id_to_key32(key->tun_id); 832 info.tun_id = tunnel_id_to_key32(key->tun_id);
803 tunnel_type = MLX5_HEADER_TYPE_VXLAN; 833 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
804 } else { 834 } else {
835 netdev_warn(priv->netdev,
836 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
805 return -EOPNOTSUPP; 837 return -EOPNOTSUPP;
806 } 838 }
807 839
@@ -809,6 +841,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
809 case AF_INET: 841 case AF_INET:
810 info.daddr = key->u.ipv4.dst; 842 info.daddr = key->u.ipv4.dst;
811 break; 843 break;
844 case AF_INET6:
845 netdev_warn(priv->netdev,
846 "IPv6 tunnel encap offload isn't supported\n");
812 default: 847 default:
813 return -EOPNOTSUPP; 848 return -EOPNOTSUPP;
814 } 849 }
@@ -986,7 +1021,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
986 1021
987 if (IS_ERR(flow->rule)) { 1022 if (IS_ERR(flow->rule)) {
988 err = PTR_ERR(flow->rule); 1023 err = PTR_ERR(flow->rule);
989 goto err_free; 1024 goto err_del_rule;
990 } 1025 }
991 1026
992 err = rhashtable_insert_fast(&tc->ht, &flow->node, 1027 err = rhashtable_insert_fast(&tc->ht, &flow->node,
@@ -997,7 +1032,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
997 goto out; 1032 goto out;
998 1033
999err_del_rule: 1034err_del_rule:
1000 mlx5_del_flow_rules(flow->rule); 1035 mlx5e_tc_del_flow(priv, flow);
1001 1036
1002err_free: 1037err_free:
1003 kfree(flow); 1038 kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 6547f22e6b9b..d01e9f21d469 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1195,7 +1195,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1195{ 1195{
1196 int err = 0; 1196 int err = 0;
1197 1197
1198 mlx5_drain_health_wq(dev); 1198 if (cleanup)
1199 mlx5_drain_health_wq(dev);
1199 1200
1200 mutex_lock(&dev->intf_state_mutex); 1201 mutex_lock(&dev->intf_state_mutex);
1201 if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { 1202 if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
@@ -1359,9 +1360,10 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1359 1360
1360 mlx5_enter_error_state(dev); 1361 mlx5_enter_error_state(dev);
1361 mlx5_unload_one(dev, priv, false); 1362 mlx5_unload_one(dev, priv, false);
1362 /* In case of kernel call save the pci state */ 1363 /* In case of kernel call save the pci state and drain the health wq */
1363 if (state) { 1364 if (state) {
1364 pci_save_state(pdev); 1365 pci_save_state(pdev);
1366 mlx5_drain_health_wq(dev);
1365 mlx5_pci_disable_device(dev); 1367 mlx5_pci_disable_device(dev);
1366 } 1368 }
1367 1369