aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-06 21:50:07 -0500
committerDavid S. Miller <davem@davemloft.net>2015-03-06 21:50:07 -0500
commitf68a8ebd33d752d3178d1fcae453a14106357c79 (patch)
tree20e2241ab631a974a86f3880cbb53a26b112f939 /drivers
parent34de26d366ea49d99be633ae389e751fd5d592f5 (diff)
parent708b869bf56e58b0c41460ba7bf363bf50f330c2 (diff)
Merge branch 'mlx4-qcn'
Or Gerlitz says: ==================== Add QCN support to the DCB NL layer This series from Shani Michaeli adds support for the IEEE QCN attribute to the kernel DCB NL stack, and implementation in the mlx4 driver which programs the firmware according to the admin directives. changes from V0: - applied feedback from John and added his acked-by to patch #1 ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
4 files changed, 239 insertions, 2 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index a681d7c0bb9f..20b3c7b21e63 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1499,6 +1499,15 @@ static struct mlx4_cmd_info cmd_info[] = {
1499 .verify = NULL, 1499 .verify = NULL,
1500 .wrapper = mlx4_ACCESS_REG_wrapper, 1500 .wrapper = mlx4_ACCESS_REG_wrapper,
1501 }, 1501 },
1502 {
1503 .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
1504 .has_inbox = false,
1505 .has_outbox = false,
1506 .out_is_imm = false,
1507 .encode_slave_id = false,
1508 .verify = NULL,
1509 .wrapper = mlx4_CMD_EPERM_wrapper,
1510 },
1502 /* Native multicast commands are not available for guests */ 1511 /* Native multicast commands are not available for guests */
1503 { 1512 {
1504 .opcode = MLX4_CMD_QP_ATTACH, 1513 .opcode = MLX4_CMD_QP_ATTACH,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index c95ca252187c..cde14fa2f742 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -36,6 +36,49 @@
36 36
37#include "mlx4_en.h" 37#include "mlx4_en.h"
38 38
39/* Definitions for QCN
40 */
41
42struct mlx4_congestion_control_mb_prio_802_1_qau_params {
43 __be32 modify_enable_high;
44 __be32 modify_enable_low;
45 __be32 reserved1;
46 __be32 extended_enable;
47 __be32 rppp_max_rps;
48 __be32 rpg_time_reset;
49 __be32 rpg_byte_reset;
50 __be32 rpg_threshold;
51 __be32 rpg_max_rate;
52 __be32 rpg_ai_rate;
53 __be32 rpg_hai_rate;
54 __be32 rpg_gd;
55 __be32 rpg_min_dec_fac;
56 __be32 rpg_min_rate;
57 __be32 max_time_rise;
58 __be32 max_byte_rise;
59 __be32 max_qdelta;
60 __be32 min_qoffset;
61 __be32 gd_coefficient;
62 __be32 reserved2[5];
63 __be32 cp_sample_base;
64 __be32 reserved3[39];
65};
66
67struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
68 __be64 rppp_rp_centiseconds;
69 __be32 reserved1;
70 __be32 ignored_cnm;
71 __be32 rppp_created_rps;
72 __be32 estimated_total_rate;
73 __be32 max_active_rate_limiter_index;
74 __be32 dropped_cnms_busy_fw;
75 __be32 reserved2;
76 __be32 cnms_handled_successfully;
77 __be32 min_total_limiters_rate;
78 __be32 max_total_limiters_rate;
79 __be32 reserved3[4];
80};
81
39static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, 82static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
40 struct ieee_ets *ets) 83 struct ieee_ets *ets)
41{ 84{
@@ -242,6 +285,178 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
242 return 0; 285 return 0;
243} 286}
244 287
288#define RPG_ENABLE_BIT 31
289#define CN_TAG_BIT 30
290
291static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
292 struct ieee_qcn *qcn)
293{
294 struct mlx4_en_priv *priv = netdev_priv(dev);
295 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
296 struct mlx4_cmd_mailbox *mailbox_out = NULL;
297 u64 mailbox_in_dma = 0;
298 u32 inmod = 0;
299 int i, err;
300
301 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
302 return -EOPNOTSUPP;
303
304 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
305 if (IS_ERR(mailbox_out))
306 return -ENOMEM;
307 hw_qcn =
308 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
309 mailbox_out->buf;
310
311 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
312 inmod = priv->port | ((1 << i) << 8) |
313 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
314 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
315 mailbox_out->dma,
316 inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
317 MLX4_CMD_CONGESTION_CTRL_OPCODE,
318 MLX4_CMD_TIME_CLASS_C,
319 MLX4_CMD_NATIVE);
320 if (err) {
321 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
322 return err;
323 }
324
325 qcn->rpg_enable[i] =
326 be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
327 qcn->rppp_max_rps[i] =
328 be32_to_cpu(hw_qcn->rppp_max_rps);
329 qcn->rpg_time_reset[i] =
330 be32_to_cpu(hw_qcn->rpg_time_reset);
331 qcn->rpg_byte_reset[i] =
332 be32_to_cpu(hw_qcn->rpg_byte_reset);
333 qcn->rpg_threshold[i] =
334 be32_to_cpu(hw_qcn->rpg_threshold);
335 qcn->rpg_max_rate[i] =
336 be32_to_cpu(hw_qcn->rpg_max_rate);
337 qcn->rpg_ai_rate[i] =
338 be32_to_cpu(hw_qcn->rpg_ai_rate);
339 qcn->rpg_hai_rate[i] =
340 be32_to_cpu(hw_qcn->rpg_hai_rate);
341 qcn->rpg_gd[i] =
342 be32_to_cpu(hw_qcn->rpg_gd);
343 qcn->rpg_min_dec_fac[i] =
344 be32_to_cpu(hw_qcn->rpg_min_dec_fac);
345 qcn->rpg_min_rate[i] =
346 be32_to_cpu(hw_qcn->rpg_min_rate);
347 qcn->cndd_state_machine[i] =
348 priv->cndd_state[i];
349 }
350 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
351 return 0;
352}
353
354static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
355 struct ieee_qcn *qcn)
356{
357 struct mlx4_en_priv *priv = netdev_priv(dev);
358 struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
359 struct mlx4_cmd_mailbox *mailbox_in = NULL;
360 u64 mailbox_in_dma = 0;
361 u32 inmod = 0;
362 int i, err;
363#define MODIFY_ENABLE_HIGH_MASK 0xc0000000
364#define MODIFY_ENABLE_LOW_MASK 0xffc00000
365
366 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
367 return -EOPNOTSUPP;
368
369 mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
370 if (IS_ERR(mailbox_in))
371 return -ENOMEM;
372
373 mailbox_in_dma = mailbox_in->dma;
374 hw_qcn =
375 (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
376 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
377 inmod = priv->port | ((1 << i) << 8) |
378 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
379
380 /* Before updating QCN parameter,
381 * need to set it's modify enable bit to 1
382 */
383
384 hw_qcn->modify_enable_high = cpu_to_be32(
385 MODIFY_ENABLE_HIGH_MASK);
386 hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
387
388 hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
389 hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
390 hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
391 hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
392 hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
393 hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
394 hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
395 hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
396 hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
397 hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
398 hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
399 priv->cndd_state[i] = qcn->cndd_state_machine[i];
400 if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
401 hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
402
403 err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
404 MLX4_CONGESTION_CONTROL_SET_PARAMS,
405 MLX4_CMD_CONGESTION_CTRL_OPCODE,
406 MLX4_CMD_TIME_CLASS_C,
407 MLX4_CMD_NATIVE);
408 if (err) {
409 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
410 return err;
411 }
412 }
413 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
414 return 0;
415}
416
417static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
418 struct ieee_qcn_stats *qcn_stats)
419{
420 struct mlx4_en_priv *priv = netdev_priv(dev);
421 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
422 struct mlx4_cmd_mailbox *mailbox_out = NULL;
423 u64 mailbox_in_dma = 0;
424 u32 inmod = 0;
425 int i, err;
426
427 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
428 return -EOPNOTSUPP;
429
430 mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
431 if (IS_ERR(mailbox_out))
432 return -ENOMEM;
433
434 hw_qcn_stats =
435 (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
436 mailbox_out->buf;
437
438 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
439 inmod = priv->port | ((1 << i) << 8) |
440 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
441 err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
442 mailbox_out->dma, inmod,
443 MLX4_CONGESTION_CONTROL_GET_STATISTICS,
444 MLX4_CMD_CONGESTION_CTRL_OPCODE,
445 MLX4_CMD_TIME_CLASS_C,
446 MLX4_CMD_NATIVE);
447 if (err) {
448 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
449 return err;
450 }
451 qcn_stats->rppp_rp_centiseconds[i] =
452 be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
453 qcn_stats->rppp_created_rps[i] =
454 be32_to_cpu(hw_qcn_stats->rppp_created_rps);
455 }
456 mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
457 return 0;
458}
459
245const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { 460const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
246 .ieee_getets = mlx4_en_dcbnl_ieee_getets, 461 .ieee_getets = mlx4_en_dcbnl_ieee_getets,
247 .ieee_setets = mlx4_en_dcbnl_ieee_setets, 462 .ieee_setets = mlx4_en_dcbnl_ieee_setets,
@@ -252,6 +467,9 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
252 467
253 .getdcbx = mlx4_en_dcbnl_getdcbx, 468 .getdcbx = mlx4_en_dcbnl_getdcbx,
254 .setdcbx = mlx4_en_dcbnl_setdcbx, 469 .setdcbx = mlx4_en_dcbnl_setdcbx,
470 .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
471 .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
472 .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
255}; 473};
256 474
257const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { 475const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 5a21e5dc94cb..242bcee5d774 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -143,7 +143,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
143 [18] = "More than 80 VFs support", 143 [18] = "More than 80 VFs support",
144 [19] = "Performance optimized for limited rule configuration flow steering support", 144 [19] = "Performance optimized for limited rule configuration flow steering support",
145 [20] = "Recoverable error events support", 145 [20] = "Recoverable error events support",
146 [21] = "Port Remap support" 146 [21] = "Port Remap support",
147 [22] = "QCN support"
147 }; 148 };
148 int i; 149 int i;
149 150
@@ -675,7 +676,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
675#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 676#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
676#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 677#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
677#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a 678#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
678#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a 679#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b
679#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 680#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
680#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 681#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
681#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 682#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -777,6 +778,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
777 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; 778 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
778 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); 779 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
779 dev_cap->fs_max_num_qp_per_entry = field; 780 dev_cap->fs_max_num_qp_per_entry = field;
781 MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
782 if (field & 0x1)
783 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
780 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 784 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
781 dev_cap->stat_rate_support = stat_rate; 785 dev_cap->stat_rate_support = stat_rate;
782 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 786 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
@@ -1149,6 +1153,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1149 DEV_CAP_EXT_2_FLAG_FSM); 1153 DEV_CAP_EXT_2_FLAG_FSM);
1150 MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1154 MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1151 1155
1156 /* turn off QCN for guests */
1157 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
1158 field &= 0xfe;
1159 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
1160
1152 return 0; 1161 return 0;
1153} 1162}
1154 1163
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2a8268e6be15..94553b501c76 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -608,6 +608,7 @@ struct mlx4_en_priv {
608#ifdef CONFIG_MLX4_EN_DCB 608#ifdef CONFIG_MLX4_EN_DCB
609 struct ieee_ets ets; 609 struct ieee_ets ets;
610 u16 maxrate[IEEE_8021QAZ_MAX_TCS]; 610 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
611 enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
611#endif 612#endif
612#ifdef CONFIG_RFS_ACCEL 613#ifdef CONFIG_RFS_ACCEL
613 spinlock_t filters_lock; 614 spinlock_t filters_lock;