aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-10-27 09:23:41 -0400
committerDavid S. Miller <davem@davemloft.net>2017-10-27 09:23:41 -0400
commit5be9541a0923c7199f4cd55b9e3abc9a43247509 (patch)
tree6e68385811ddad11eb8adb0409ef18a7c2d2c9f5
parent9618aec3349b7669b6bf123c7c6121789cb82861 (diff)
parentbe0f161ef141e4df368aa3f417a1c2ab9c362e75 (diff)
Merge tag 'mlx5-fixes-2017-10-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says: ==================== Mellanox, mlx5 fixes 2017-10-26 The series includes some misc fixes for mlx5 core and etherent driver. Please pull and let me know if there's any problem. For -Stable: net/mlx5e: Properly deal with encap flows add/del under neigh update (kernels >= 4.12) net/mlx5: Fix health work queue spin lock to IRQ safe (kernels >= 4.13) ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c21
-rw-r--r--include/linux/mlx5/port.h2
6 files changed, 204 insertions, 96 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index ff60cf7342ca..fc281712869b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv,
77 list_add_tail(&delayed_event->list, &priv->waiting_events_list); 77 list_add_tail(&delayed_event->list, &priv->waiting_events_list);
78} 78}
79 79
80static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx, 80static void delayed_event_release(struct mlx5_device_context *dev_ctx,
81 struct mlx5_core_dev *dev, 81 struct mlx5_priv *priv)
82 struct mlx5_priv *priv)
83{ 82{
83 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
84 struct mlx5_delayed_event *de; 84 struct mlx5_delayed_event *de;
85 struct mlx5_delayed_event *n; 85 struct mlx5_delayed_event *n;
86 struct list_head temp;
86 87
87 /* stop delaying events */ 88 INIT_LIST_HEAD(&temp);
88 priv->is_accum_events = false; 89
90 spin_lock_irq(&priv->ctx_lock);
89 91
90 /* fire all accumulated events before new event comes */ 92 priv->is_accum_events = false;
91 list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { 93 list_splice_init(&priv->waiting_events_list, &temp);
94 if (!dev_ctx->context)
95 goto out;
96 list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
92 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); 97 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
98
99out:
100 spin_unlock_irq(&priv->ctx_lock);
101
102 list_for_each_entry_safe(de, n, &temp, list) {
93 list_del(&de->list); 103 list_del(&de->list);
94 kfree(de); 104 kfree(de);
95 } 105 }
96} 106}
97 107
98static void cleanup_delayed_evets(struct mlx5_priv *priv) 108/* accumulating events that can come after mlx5_ib calls to
109 * ib_register_device, till adding that interface to the events list.
110 */
111static void delayed_event_start(struct mlx5_priv *priv)
99{ 112{
100 struct mlx5_delayed_event *de;
101 struct mlx5_delayed_event *n;
102
103 spin_lock_irq(&priv->ctx_lock); 113 spin_lock_irq(&priv->ctx_lock);
104 priv->is_accum_events = false; 114 priv->is_accum_events = true;
105 list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
106 list_del(&de->list);
107 kfree(de);
108 }
109 spin_unlock_irq(&priv->ctx_lock); 115 spin_unlock_irq(&priv->ctx_lock);
110} 116}
111 117
@@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
122 return; 128 return;
123 129
124 dev_ctx->intf = intf; 130 dev_ctx->intf = intf;
125 /* accumulating events that can come after mlx5_ib calls to
126 * ib_register_device, till adding that interface to the events list.
127 */
128 131
129 priv->is_accum_events = true; 132 delayed_event_start(priv);
130 133
131 dev_ctx->context = intf->add(dev); 134 dev_ctx->context = intf->add(dev);
132 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 135 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
@@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
137 spin_lock_irq(&priv->ctx_lock); 140 spin_lock_irq(&priv->ctx_lock);
138 list_add_tail(&dev_ctx->list, &priv->ctx_list); 141 list_add_tail(&dev_ctx->list, &priv->ctx_list);
139 142
140 fire_delayed_event_locked(dev_ctx, dev, priv);
141
142#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 143#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
143 if (dev_ctx->intf->pfault) { 144 if (dev_ctx->intf->pfault) {
144 if (priv->pfault) { 145 if (priv->pfault) {
@@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
150 } 151 }
151#endif 152#endif
152 spin_unlock_irq(&priv->ctx_lock); 153 spin_unlock_irq(&priv->ctx_lock);
153 } else {
154 kfree(dev_ctx);
155 /* delete all accumulated events */
156 cleanup_delayed_evets(priv);
157 } 154 }
155
156 delayed_event_release(dev_ctx, priv);
157
158 if (!dev_ctx->context)
159 kfree(dev_ctx);
158} 160}
159 161
160static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf, 162static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
@@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
205 if (!dev_ctx) 207 if (!dev_ctx)
206 return; 208 return;
207 209
210 delayed_event_start(priv);
208 if (intf->attach) { 211 if (intf->attach) {
209 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) 212 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
210 return; 213 goto out;
211 intf->attach(dev, dev_ctx->context); 214 intf->attach(dev, dev_ctx->context);
212 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); 215 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
213 } else { 216 } else {
214 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) 217 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
215 return; 218 goto out;
216 dev_ctx->context = intf->add(dev); 219 dev_ctx->context = intf->add(dev);
217 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 220 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
218 } 221 }
222
223out:
224 delayed_event_release(dev_ctx, priv);
219} 225}
220 226
221void mlx5_attach_device(struct mlx5_core_dev *dev) 227void mlx5_attach_device(struct mlx5_core_dev *dev)
@@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
414 if (priv->is_accum_events) 420 if (priv->is_accum_events)
415 add_delayed_event(priv, dev, event, param); 421 add_delayed_event(priv, dev, event, param);
416 422
423 /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
424 * still in priv->ctx_list. In this case, only notify the dev_ctx if its
425 * ADDED or ATTACHED bit are set.
426 */
417 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 427 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
418 if (dev_ctx->intf->event) 428 if (dev_ctx->intf->event &&
429 (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
430 test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
419 dev_ctx->intf->event(dev, dev_ctx->context, event, param); 431 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
420 432
421 spin_unlock_irqrestore(&priv->ctx_lock, flags); 433 spin_unlock_irqrestore(&priv->ctx_lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c1d384fca4dc..51c4cc00a186 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -41,6 +41,11 @@
41#define MLX5E_CEE_STATE_UP 1 41#define MLX5E_CEE_STATE_UP 1
42#define MLX5E_CEE_STATE_DOWN 0 42#define MLX5E_CEE_STATE_DOWN 0
43 43
44enum {
45 MLX5E_VENDOR_TC_GROUP_NUM = 7,
46 MLX5E_LOWEST_PRIO_GROUP = 0,
47};
48
44/* If dcbx mode is non-host set the dcbx mode to host. 49/* If dcbx mode is non-host set the dcbx mode to host.
45 */ 50 */
46static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv, 51static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
@@ -85,6 +90,9 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
85{ 90{
86 struct mlx5e_priv *priv = netdev_priv(netdev); 91 struct mlx5e_priv *priv = netdev_priv(netdev);
87 struct mlx5_core_dev *mdev = priv->mdev; 92 struct mlx5_core_dev *mdev = priv->mdev;
93 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
94 bool is_tc_group_6_exist = false;
95 bool is_zero_bw_ets_tc = false;
88 int err = 0; 96 int err = 0;
89 int i; 97 int i;
90 98
@@ -96,37 +104,64 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
96 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); 104 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
97 if (err) 105 if (err)
98 return err; 106 return err;
99 }
100 107
101 for (i = 0; i < ets->ets_cap; i++) { 108 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
109 if (err)
110 return err;
111
102 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]); 112 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
103 if (err) 113 if (err)
104 return err; 114 return err;
115
116 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
117 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
118 is_zero_bw_ets_tc = true;
119
120 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
121 is_tc_group_6_exist = true;
122 }
123
124 /* Report 0% ets tc if exits*/
125 if (is_zero_bw_ets_tc) {
126 for (i = 0; i < ets->ets_cap; i++)
127 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
128 ets->tc_tx_bw[i] = 0;
129 }
130
131 /* Update tc_tsa based on fw setting*/
132 for (i = 0; i < ets->ets_cap; i++) {
105 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC) 133 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
106 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; 134 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
135 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
136 !is_tc_group_6_exist)
137 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
107 } 138 }
108
109 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); 139 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
110 140
111 return err; 141 return err;
112} 142}
113 143
114enum {
115 MLX5E_VENDOR_TC_GROUP_NUM = 7,
116 MLX5E_ETS_TC_GROUP_NUM = 0,
117};
118
119static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) 144static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
120{ 145{
121 bool any_tc_mapped_to_ets = false; 146 bool any_tc_mapped_to_ets = false;
147 bool ets_zero_bw = false;
122 int strict_group; 148 int strict_group;
123 int i; 149 int i;
124 150
125 for (i = 0; i <= max_tc; i++) 151 for (i = 0; i <= max_tc; i++) {
126 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) 152 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
127 any_tc_mapped_to_ets = true; 153 any_tc_mapped_to_ets = true;
154 if (!ets->tc_tx_bw[i])
155 ets_zero_bw = true;
156 }
157 }
128 158
129 strict_group = any_tc_mapped_to_ets ? 1 : 0; 159 /* strict group has higher priority than ets group */
160 strict_group = MLX5E_LOWEST_PRIO_GROUP;
161 if (any_tc_mapped_to_ets)
162 strict_group++;
163 if (ets_zero_bw)
164 strict_group++;
130 165
131 for (i = 0; i <= max_tc; i++) { 166 for (i = 0; i <= max_tc; i++) {
132 switch (ets->tc_tsa[i]) { 167 switch (ets->tc_tsa[i]) {
@@ -137,7 +172,9 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
137 tc_group[i] = strict_group++; 172 tc_group[i] = strict_group++;
138 break; 173 break;
139 case IEEE_8021QAZ_TSA_ETS: 174 case IEEE_8021QAZ_TSA_ETS:
140 tc_group[i] = MLX5E_ETS_TC_GROUP_NUM; 175 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
176 if (ets->tc_tx_bw[i] && ets_zero_bw)
177 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
141 break; 178 break;
142 } 179 }
143 } 180 }
@@ -146,9 +183,23 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
146static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, 183static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
147 u8 *tc_group, int max_tc) 184 u8 *tc_group, int max_tc)
148{ 185{
186 int bw_for_ets_zero_bw_tc = 0;
187 int last_ets_zero_bw_tc = -1;
188 int num_ets_zero_bw = 0;
149 int i; 189 int i;
150 190
151 for (i = 0; i <= max_tc; i++) { 191 for (i = 0; i <= max_tc; i++) {
192 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
193 !ets->tc_tx_bw[i]) {
194 num_ets_zero_bw++;
195 last_ets_zero_bw_tc = i;
196 }
197 }
198
199 if (num_ets_zero_bw)
200 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
201
202 for (i = 0; i <= max_tc; i++) {
152 switch (ets->tc_tsa[i]) { 203 switch (ets->tc_tsa[i]) {
153 case IEEE_8021QAZ_TSA_VENDOR: 204 case IEEE_8021QAZ_TSA_VENDOR:
154 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 205 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
@@ -157,12 +208,26 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
157 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 208 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
158 break; 209 break;
159 case IEEE_8021QAZ_TSA_ETS: 210 case IEEE_8021QAZ_TSA_ETS:
160 tc_tx_bw[i] = ets->tc_tx_bw[i]; 211 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
212 ets->tc_tx_bw[i] :
213 bw_for_ets_zero_bw_tc;
161 break; 214 break;
162 } 215 }
163 } 216 }
217
218 /* Make sure the total bw for ets zero bw group is 100% */
219 if (last_ets_zero_bw_tc != -1)
220 tc_tx_bw[last_ets_zero_bw_tc] +=
221 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
164} 222}
165 223
224/* If there are ETS BW 0,
225 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
226 * Set group #0 to all the ETS BW 0 tcs and
227 * equally splits the 100% BW between them
228 * Report both group #0 and #1 as ETS type.
229 * All the tcs in group #0 will be reported with 0% BW.
230 */
166int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) 231int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
167{ 232{
168 struct mlx5_core_dev *mdev = priv->mdev; 233 struct mlx5_core_dev *mdev = priv->mdev;
@@ -188,7 +253,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
188 return err; 253 return err;
189 254
190 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); 255 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
191
192 return err; 256 return err;
193} 257}
194 258
@@ -209,17 +273,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
209 } 273 }
210 274
211 /* Validate Bandwidth Sum */ 275 /* Validate Bandwidth Sum */
212 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 276 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
213 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { 277 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
214 if (!ets->tc_tx_bw[i]) {
215 netdev_err(netdev,
216 "Failed to validate ETS: BW 0 is illegal\n");
217 return -EINVAL;
218 }
219
220 bw_sum += ets->tc_tx_bw[i]; 278 bw_sum += ets->tc_tx_bw[i];
221 }
222 }
223 279
224 if (bw_sum != 0 && bw_sum != 100) { 280 if (bw_sum != 0 && bw_sum != 100) {
225 netdev_err(netdev, 281 netdev_err(netdev,
@@ -533,8 +589,7 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
533static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev, 589static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
534 int pgid, u8 *bw_pct) 590 int pgid, u8 *bw_pct)
535{ 591{
536 struct mlx5e_priv *priv = netdev_priv(netdev); 592 struct ieee_ets ets;
537 struct mlx5_core_dev *mdev = priv->mdev;
538 593
539 if (pgid >= CEE_DCBX_MAX_PGS) { 594 if (pgid >= CEE_DCBX_MAX_PGS) {
540 netdev_err(netdev, 595 netdev_err(netdev,
@@ -542,8 +597,8 @@ static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
542 return; 597 return;
543 } 598 }
544 599
545 if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct)) 600 mlx5e_dcbnl_ieee_getets(netdev, &ets);
546 *bw_pct = 0; 601 *bw_pct = ets.tc_tx_bw[pgid];
547} 602}
548 603
549static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev, 604static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
@@ -739,8 +794,6 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
739 ets.prio_tc[i] = i; 794 ets.prio_tc[i] = i;
740 } 795 }
741 796
742 memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
743
744 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ 797 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
745 ets.prio_tc[0] = 1; 798 ets.prio_tc[0] = 1;
746 ets.prio_tc[1] = 0; 799 ets.prio_tc[1] = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 1aa2028ed995..9ba1f72060aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -78,9 +78,11 @@ struct mlx5e_tc_flow {
78}; 78};
79 79
80struct mlx5e_tc_flow_parse_attr { 80struct mlx5e_tc_flow_parse_attr {
81 struct ip_tunnel_info tun_info;
81 struct mlx5_flow_spec spec; 82 struct mlx5_flow_spec spec;
82 int num_mod_hdr_actions; 83 int num_mod_hdr_actions;
83 void *mod_hdr_actions; 84 void *mod_hdr_actions;
85 int mirred_ifindex;
84}; 86};
85 87
86enum { 88enum {
@@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
322static void mlx5e_detach_encap(struct mlx5e_priv *priv, 324static void mlx5e_detach_encap(struct mlx5e_priv *priv,
323 struct mlx5e_tc_flow *flow); 325 struct mlx5e_tc_flow *flow);
324 326
327static int mlx5e_attach_encap(struct mlx5e_priv *priv,
328 struct ip_tunnel_info *tun_info,
329 struct net_device *mirred_dev,
330 struct net_device **encap_dev,
331 struct mlx5e_tc_flow *flow);
332
325static struct mlx5_flow_handle * 333static struct mlx5_flow_handle *
326mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 334mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
327 struct mlx5e_tc_flow_parse_attr *parse_attr, 335 struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
329{ 337{
330 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 338 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
331 struct mlx5_esw_flow_attr *attr = flow->esw_attr; 339 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
332 struct mlx5_flow_handle *rule; 340 struct net_device *out_dev, *encap_dev = NULL;
341 struct mlx5_flow_handle *rule = NULL;
342 struct mlx5e_rep_priv *rpriv;
343 struct mlx5e_priv *out_priv;
333 int err; 344 int err;
334 345
346 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
347 out_dev = __dev_get_by_index(dev_net(priv->netdev),
348 attr->parse_attr->mirred_ifindex);
349 err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
350 out_dev, &encap_dev, flow);
351 if (err) {
352 rule = ERR_PTR(err);
353 if (err != -EAGAIN)
354 goto err_attach_encap;
355 }
356 out_priv = netdev_priv(encap_dev);
357 rpriv = out_priv->ppriv;
358 attr->out_rep = rpriv->rep;
359 }
360
335 err = mlx5_eswitch_add_vlan_action(esw, attr); 361 err = mlx5_eswitch_add_vlan_action(esw, attr);
336 if (err) { 362 if (err) {
337 rule = ERR_PTR(err); 363 rule = ERR_PTR(err);
@@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
347 } 373 }
348 } 374 }
349 375
350 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); 376 /* we get here if (1) there's no error (rule being null) or when
351 if (IS_ERR(rule)) 377 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
352 goto err_add_rule; 378 */
353 379 if (rule != ERR_PTR(-EAGAIN)) {
380 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
381 if (IS_ERR(rule))
382 goto err_add_rule;
383 }
354 return rule; 384 return rule;
355 385
356err_add_rule: 386err_add_rule:
@@ -361,6 +391,7 @@ err_mod_hdr:
361err_add_vlan: 391err_add_vlan:
362 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) 392 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
363 mlx5e_detach_encap(priv, flow); 393 mlx5e_detach_encap(priv, flow);
394err_attach_encap:
364 return rule; 395 return rule;
365} 396}
366 397
@@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
389void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, 420void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
390 struct mlx5e_encap_entry *e) 421 struct mlx5e_encap_entry *e)
391{ 422{
423 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
424 struct mlx5_esw_flow_attr *esw_attr;
392 struct mlx5e_tc_flow *flow; 425 struct mlx5e_tc_flow *flow;
393 int err; 426 int err;
394 427
@@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
404 mlx5e_rep_queue_neigh_stats_work(priv); 437 mlx5e_rep_queue_neigh_stats_work(priv);
405 438
406 list_for_each_entry(flow, &e->flows, encap) { 439 list_for_each_entry(flow, &e->flows, encap) {
407 flow->esw_attr->encap_id = e->encap_id; 440 esw_attr = flow->esw_attr;
408 flow->rule = mlx5e_tc_add_fdb_flow(priv, 441 esw_attr->encap_id = e->encap_id;
409 flow->esw_attr->parse_attr, 442 flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
410 flow);
411 if (IS_ERR(flow->rule)) { 443 if (IS_ERR(flow->rule)) {
412 err = PTR_ERR(flow->rule); 444 err = PTR_ERR(flow->rule);
413 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", 445 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
@@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
421void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, 453void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
422 struct mlx5e_encap_entry *e) 454 struct mlx5e_encap_entry *e)
423{ 455{
456 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
424 struct mlx5e_tc_flow *flow; 457 struct mlx5e_tc_flow *flow;
425 struct mlx5_fc *counter;
426 458
427 list_for_each_entry(flow, &e->flows, encap) { 459 list_for_each_entry(flow, &e->flows, encap) {
428 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { 460 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
429 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; 461 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
430 counter = mlx5_flow_rule_counter(flow->rule); 462 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
431 mlx5_del_flow_rules(flow->rule);
432 mlx5_fc_destroy(priv->mdev, counter);
433 } 463 }
434 } 464 }
435 465
@@ -1942,7 +1972,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1942 1972
1943 if (is_tcf_mirred_egress_redirect(a)) { 1973 if (is_tcf_mirred_egress_redirect(a)) {
1944 int ifindex = tcf_mirred_ifindex(a); 1974 int ifindex = tcf_mirred_ifindex(a);
1945 struct net_device *out_dev, *encap_dev = NULL; 1975 struct net_device *out_dev;
1946 struct mlx5e_priv *out_priv; 1976 struct mlx5e_priv *out_priv;
1947 1977
1948 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); 1978 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -1955,17 +1985,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1955 rpriv = out_priv->ppriv; 1985 rpriv = out_priv->ppriv;
1956 attr->out_rep = rpriv->rep; 1986 attr->out_rep = rpriv->rep;
1957 } else if (encap) { 1987 } else if (encap) {
1958 err = mlx5e_attach_encap(priv, info, 1988 parse_attr->mirred_ifindex = ifindex;
1959 out_dev, &encap_dev, flow); 1989 parse_attr->tun_info = *info;
1960 if (err && err != -EAGAIN) 1990 attr->parse_attr = parse_attr;
1961 return err;
1962 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | 1991 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1963 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1992 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1964 MLX5_FLOW_CONTEXT_ACTION_COUNT; 1993 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1965 out_priv = netdev_priv(encap_dev); 1994 /* attr->out_rep is resolved when we handle encap */
1966 rpriv = out_priv->ppriv;
1967 attr->out_rep = rpriv->rep;
1968 attr->parse_attr = parse_attr;
1969 } else { 1995 } else {
1970 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", 1996 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1971 priv->netdev->name, out_dev->name); 1997 priv->netdev->name, out_dev->name);
@@ -2047,7 +2073,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
2047 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 2073 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
2048 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); 2074 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
2049 if (err < 0) 2075 if (err < 0)
2050 goto err_handle_encap_flow; 2076 goto err_free;
2051 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); 2077 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
2052 } else { 2078 } else {
2053 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); 2079 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
@@ -2058,10 +2084,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
2058 2084
2059 if (IS_ERR(flow->rule)) { 2085 if (IS_ERR(flow->rule)) {
2060 err = PTR_ERR(flow->rule); 2086 err = PTR_ERR(flow->rule);
2061 goto err_free; 2087 if (err != -EAGAIN)
2088 goto err_free;
2062 } 2089 }
2063 2090
2064 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; 2091 if (err != -EAGAIN)
2092 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2093
2065 err = rhashtable_insert_fast(&tc->ht, &flow->node, 2094 err = rhashtable_insert_fast(&tc->ht, &flow->node,
2066 tc->ht_params); 2095 tc->ht_params);
2067 if (err) 2096 if (err)
@@ -2075,16 +2104,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
2075err_del_rule: 2104err_del_rule:
2076 mlx5e_tc_del_flow(priv, flow); 2105 mlx5e_tc_del_flow(priv, flow);
2077 2106
2078err_handle_encap_flow:
2079 if (err == -EAGAIN) {
2080 err = rhashtable_insert_fast(&tc->ht, &flow->node,
2081 tc->ht_params);
2082 if (err)
2083 mlx5e_tc_del_flow(priv, flow);
2084 else
2085 return 0;
2086 }
2087
2088err_free: 2107err_free:
2089 kvfree(parse_attr); 2108 kvfree(parse_attr);
2090 kfree(flow); 2109 kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 8aea0a065e56..db86e1506c8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
356void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) 356void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
357{ 357{
358 struct mlx5_core_health *health = &dev->priv.health; 358 struct mlx5_core_health *health = &dev->priv.health;
359 unsigned long flags;
359 360
360 spin_lock(&health->wq_lock); 361 spin_lock_irqsave(&health->wq_lock, flags);
361 set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); 362 set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
362 spin_unlock(&health->wq_lock); 363 spin_unlock_irqrestore(&health->wq_lock, flags);
363 cancel_delayed_work_sync(&dev->priv.health.recover_work); 364 cancel_delayed_work_sync(&dev->priv.health.recover_work);
364} 365}
365 366
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 1975d4388d4f..e07061f565d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -677,6 +677,27 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
677} 677}
678EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); 678EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
679 679
680int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
681 u8 tc, u8 *tc_group)
682{
683 u32 out[MLX5_ST_SZ_DW(qetc_reg)];
684 void *ets_tcn_conf;
685 int err;
686
687 err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
688 if (err)
689 return err;
690
691 ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
692 tc_configuration[tc]);
693
694 *tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
695 group);
696
697 return 0;
698}
699EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
700
680int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) 701int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
681{ 702{
682 u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; 703 u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index c57d4b7de3a8..c59af8ab753a 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
157int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, 157int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
158 u8 prio, u8 *tc); 158 u8 prio, u8 *tc);
159int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); 159int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
160int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
161 u8 tc, u8 *tc_group);
160int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); 162int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
161int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, 163int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
162 u8 tc, u8 *bw_pct); 164 u8 tc, u8 *bw_pct);