aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorShani Michaeli <shanim@mellanox.com>2015-03-05 13:16:13 -0500
committerDavid S. Miller <davem@davemloft.net>2015-03-06 21:50:02 -0500
commit708b869bf56e58b0c41460ba7bf363bf50f330c2 (patch)
tree20e2241ab631a974a86f3880cbb53a26b112f939 /drivers/net/ethernet
parentd237baa1cbb3a2335357484c1d63a810a01947e2 (diff)
net/mlx4_en: Add QCN parameters and statistics handling
Implement the IEEE DCB handlers for set/get QCN parameters and statistics reading per TC. Signed-off-by: Shani Michaeli <shanim@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
2 files changed, 219 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index c95ca252187c..cde14fa2f742 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -36,6 +36,49 @@
36 36
37#include "mlx4_en.h" 37#include "mlx4_en.h"
38 38
39/* Definitions for QCN
40 */
41
42struct mlx4_congestion_control_mb_prio_802_1_qau_params {
43 __be32 modify_enable_high;
44 __be32 modify_enable_low;
45 __be32 reserved1;
46 __be32 extended_enable;
47 __be32 rppp_max_rps;
48 __be32 rpg_time_reset;
49 __be32 rpg_byte_reset;
50 __be32 rpg_threshold;
51 __be32 rpg_max_rate;
52 __be32 rpg_ai_rate;
53 __be32 rpg_hai_rate;
54 __be32 rpg_gd;
55 __be32 rpg_min_dec_fac;
56 __be32 rpg_min_rate;
57 __be32 max_time_rise;
58 __be32 max_byte_rise;
59 __be32 max_qdelta;
60 __be32 min_qoffset;
61 __be32 gd_coefficient;
62 __be32 reserved2[5];
63 __be32 cp_sample_base;
64 __be32 reserved3[39];
65};
66
67struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
68 __be64 rppp_rp_centiseconds;
69 __be32 reserved1;
70 __be32 ignored_cnm;
71 __be32 rppp_created_rps;
72 __be32 estimated_total_rate;
73 __be32 max_active_rate_limiter_index;
74 __be32 dropped_cnms_busy_fw;
75 __be32 reserved2;
76 __be32 cnms_handled_successfully;
77 __be32 min_total_limiters_rate;
78 __be32 max_total_limiters_rate;
79 __be32 reserved3[4];
80};
81
39static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, 82static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
40 struct ieee_ets *ets) 83 struct ieee_ets *ets)
41{ 84{
@@ -242,6 +285,178 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
242 return 0; 285 return 0;
243} 286}
244 287
288#define RPG_ENABLE_BIT 31
289#define CN_TAG_BIT 30
290
291static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
292 struct ieee_qcn *qcn)
293{
294 struct mlx4_en_priv *priv = netdev_priv(dev);
295 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
296 struct mlx4_cmd_mailbox *mailbox_out = NULL;
297 u64 mailbox_in_dma = 0;
298 u32 inmod = 0;
299 int i, err;
300
301 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
302 return -EOPNOTSUPP;
303
304 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
305 if (IS_ERR(mailbox_out))
306 return -ENOMEM;
307 hw_qcn =
308 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
309 mailbox_out->buf;
310
311 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
312 inmod = priv->port | ((1 << i) << 8) |
313 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
314 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
315 mailbox_out->dma,
316 inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
317 MLX4_CMD_CONGESTION_CTRL_OPCODE,
318 MLX4_CMD_TIME_CLASS_C,
319 MLX4_CMD_NATIVE);
320 if (err) {
321 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
322 return err;
323 }
324
325 qcn->rpg_enable[i] =
326 be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
327 qcn->rppp_max_rps[i] =
328 be32_to_cpu(hw_qcn->rppp_max_rps);
329 qcn->rpg_time_reset[i] =
330 be32_to_cpu(hw_qcn->rpg_time_reset);
331 qcn->rpg_byte_reset[i] =
332 be32_to_cpu(hw_qcn->rpg_byte_reset);
333 qcn->rpg_threshold[i] =
334 be32_to_cpu(hw_qcn->rpg_threshold);
335 qcn->rpg_max_rate[i] =
336 be32_to_cpu(hw_qcn->rpg_max_rate);
337 qcn->rpg_ai_rate[i] =
338 be32_to_cpu(hw_qcn->rpg_ai_rate);
339 qcn->rpg_hai_rate[i] =
340 be32_to_cpu(hw_qcn->rpg_hai_rate);
341 qcn->rpg_gd[i] =
342 be32_to_cpu(hw_qcn->rpg_gd);
343 qcn->rpg_min_dec_fac[i] =
344 be32_to_cpu(hw_qcn->rpg_min_dec_fac);
345 qcn->rpg_min_rate[i] =
346 be32_to_cpu(hw_qcn->rpg_min_rate);
347 qcn->cndd_state_machine[i] =
348 priv->cndd_state[i];
349 }
350 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
351 return 0;
352}
353
354static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
355 struct ieee_qcn *qcn)
356{
357 struct mlx4_en_priv *priv = netdev_priv(dev);
358 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
359 struct mlx4_cmd_mailbox *mailbox_in = NULL;
360 u64 mailbox_in_dma = 0;
361 u32 inmod = 0;
362 int i, err;
363#define MODIFY_ENABLE_HIGH_MASK 0xc0000000
364#define MODIFY_ENABLE_LOW_MASK 0xffc00000
365
366 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
367 return -EOPNOTSUPP;
368
369 mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
370 if (IS_ERR(mailbox_in))
371 return -ENOMEM;
372
373 mailbox_in_dma = mailbox_in->dma;
374 hw_qcn =
375 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
376 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
377 inmod = priv->port | ((1 << i) << 8) |
378 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
379
380 /* Before updating QCN parameter,
381 * need to set it's modify enable bit to 1
382 */
383
384 hw_qcn->modify_enable_high = cpu_to_be32(
385 MODIFY_ENABLE_HIGH_MASK);
386 hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
387
388 hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
389 hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
390 hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
391 hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
392 hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
393 hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
394 hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
395 hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
396 hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
397 hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
398 hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
399 priv->cndd_state[i] = qcn->cndd_state_machine[i];
400 if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
401 hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
402
403 err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
404 MLX4_CONGESTION_CONTROL_SET_PARAMS,
405 MLX4_CMD_CONGESTION_CTRL_OPCODE,
406 MLX4_CMD_TIME_CLASS_C,
407 MLX4_CMD_NATIVE);
408 if (err) {
409 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
410 return err;
411 }
412 }
413 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
414 return 0;
415}
416
417static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
418 struct ieee_qcn_stats *qcn_stats)
419{
420 struct mlx4_en_priv *priv = netdev_priv(dev);
421 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
422 struct mlx4_cmd_mailbox *mailbox_out = NULL;
423 u64 mailbox_in_dma = 0;
424 u32 inmod = 0;
425 int i, err;
426
427 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
428 return -EOPNOTSUPP;
429
430 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
431 if (IS_ERR(mailbox_out))
432 return -ENOMEM;
433
434 hw_qcn_stats =
435 (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
436 mailbox_out->buf;
437
438 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
439 inmod = priv->port | ((1 << i) << 8) |
440 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
441 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
442 mailbox_out->dma, inmod,
443 MLX4_CONGESTION_CONTROL_GET_STATISTICS,
444 MLX4_CMD_CONGESTION_CTRL_OPCODE,
445 MLX4_CMD_TIME_CLASS_C,
446 MLX4_CMD_NATIVE);
447 if (err) {
448 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
449 return err;
450 }
451 qcn_stats->rppp_rp_centiseconds[i] =
452 be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
453 qcn_stats->rppp_created_rps[i] =
454 be32_to_cpu(hw_qcn_stats->rppp_created_rps);
455 }
456 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
457 return 0;
458}
459
245const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { 460const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
246 .ieee_getets = mlx4_en_dcbnl_ieee_getets, 461 .ieee_getets = mlx4_en_dcbnl_ieee_getets,
247 .ieee_setets = mlx4_en_dcbnl_ieee_setets, 462 .ieee_setets = mlx4_en_dcbnl_ieee_setets,
@@ -252,6 +467,9 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
252 467
253 .getdcbx = mlx4_en_dcbnl_getdcbx, 468 .getdcbx = mlx4_en_dcbnl_getdcbx,
254 .setdcbx = mlx4_en_dcbnl_setdcbx, 469 .setdcbx = mlx4_en_dcbnl_setdcbx,
470 .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
471 .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
472 .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
255}; 473};
256 474
257const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { 475const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2a8268e6be15..94553b501c76 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -608,6 +608,7 @@ struct mlx4_en_priv {
608#ifdef CONFIG_MLX4_EN_DCB 608#ifdef CONFIG_MLX4_EN_DCB
609 struct ieee_ets ets; 609 struct ieee_ets ets;
610 u16 maxrate[IEEE_8021QAZ_MAX_TCS]; 610 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
611 enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
611#endif 612#endif
612#ifdef CONFIG_RFS_ACCEL 613#ifdef CONFIG_RFS_ACCEL
613 spinlock_t filters_lock; 614 spinlock_t filters_lock;