aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c746
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.h4
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h18
9 files changed, 675 insertions, 348 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 45f6dc75c0df..e9d7d90363a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -138,6 +138,80 @@ struct mlx5e_vport_stats {
138#define NUM_VPORT_COUNTERS 31 138#define NUM_VPORT_COUNTERS 31
139}; 139};
140 140
141static const char pport_strings[][ETH_GSTRING_LEN] = {
142 /* IEEE802.3 counters */
143 "frames_tx",
144 "frames_rx",
145 "check_seq_err",
146 "alignment_err",
147 "octets_tx",
148 "octets_received",
149 "multicast_xmitted",
150 "broadcast_xmitted",
151 "multicast_rx",
152 "broadcast_rx",
153 "in_range_len_errors",
154 "out_of_range_len",
155 "too_long_errors",
156 "symbol_err",
157 "mac_control_tx",
158 "mac_control_rx",
159 "unsupported_op_rx",
160 "pause_ctrl_rx",
161 "pause_ctrl_tx",
162
163 /* RFC2863 counters */
164 "in_octets",
165 "in_ucast_pkts",
166 "in_discards",
167 "in_errors",
168 "in_unknown_protos",
169 "out_octets",
170 "out_ucast_pkts",
171 "out_discards",
172 "out_errors",
173 "in_multicast_pkts",
174 "in_broadcast_pkts",
175 "out_multicast_pkts",
176 "out_broadcast_pkts",
177
178 /* RFC2819 counters */
179 "drop_events",
180 "octets",
181 "pkts",
182 "broadcast_pkts",
183 "multicast_pkts",
184 "crc_align_errors",
185 "undersize_pkts",
186 "oversize_pkts",
187 "fragments",
188 "jabbers",
189 "collisions",
190 "p64octets",
191 "p65to127octets",
192 "p128to255octets",
193 "p256to511octets",
194 "p512to1023octets",
195 "p1024to1518octets",
196 "p1519to2047octets",
197 "p2048to4095octets",
198 "p4096to8191octets",
199 "p8192to10239octets",
200};
201
202#define NUM_IEEE_802_3_COUNTERS 19
203#define NUM_RFC_2863_COUNTERS 13
204#define NUM_RFC_2819_COUNTERS 21
205#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \
206 NUM_RFC_2863_COUNTERS + \
207 NUM_RFC_2819_COUNTERS)
208
209struct mlx5e_pport_stats {
210 __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
211 __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
212 __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
213};
214
141static const char rq_stats_strings[][ETH_GSTRING_LEN] = { 215static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
142 "packets", 216 "packets",
143 "csum_none", 217 "csum_none",
@@ -180,6 +254,7 @@ struct mlx5e_sq_stats {
180 254
181struct mlx5e_stats { 255struct mlx5e_stats {
182 struct mlx5e_vport_stats vport; 256 struct mlx5e_vport_stats vport;
257 struct mlx5e_pport_stats pport;
183}; 258};
184 259
185struct mlx5e_params { 260struct mlx5e_params {
@@ -217,6 +292,7 @@ struct mlx5e_cq {
217 struct napi_struct *napi; 292 struct napi_struct *napi;
218 struct mlx5_core_cq mcq; 293 struct mlx5_core_cq mcq;
219 struct mlx5e_channel *channel; 294 struct mlx5e_channel *channel;
295 struct mlx5e_priv *priv;
220 296
221 /* control */ 297 /* control */
222 struct mlx5_wq_ctrl wq_ctrl; 298 struct mlx5_wq_ctrl wq_ctrl;
@@ -240,6 +316,7 @@ struct mlx5e_rq {
240 struct mlx5_wq_ctrl wq_ctrl; 316 struct mlx5_wq_ctrl wq_ctrl;
241 u32 rqn; 317 u32 rqn;
242 struct mlx5e_channel *channel; 318 struct mlx5e_channel *channel;
319 struct mlx5e_priv *priv;
243} ____cacheline_aligned_in_smp; 320} ____cacheline_aligned_in_smp;
244 321
245struct mlx5e_tx_skb_cb { 322struct mlx5e_tx_skb_cb {
@@ -344,10 +421,10 @@ enum mlx5e_traffic_types {
344 MLX5E_NUM_TT, 421 MLX5E_NUM_TT,
345}; 422};
346 423
347enum { 424enum mlx5e_rqt_ix {
348 MLX5E_RQT_SPREADING = 0, 425 MLX5E_INDIRECTION_RQT,
349 MLX5E_RQT_DEFAULT_RQ = 1, 426 MLX5E_SINGLE_RQ_RQT,
350 MLX5E_NUM_RQT = 2, 427 MLX5E_NUM_RQT,
351}; 428};
352 429
353struct mlx5e_eth_addr_info { 430struct mlx5e_eth_addr_info {
@@ -372,10 +449,10 @@ struct mlx5e_eth_addr_db {
372enum { 449enum {
373 MLX5E_STATE_ASYNC_EVENTS_ENABLE, 450 MLX5E_STATE_ASYNC_EVENTS_ENABLE,
374 MLX5E_STATE_OPENED, 451 MLX5E_STATE_OPENED,
452 MLX5E_STATE_DESTROYING,
375}; 453};
376 454
377struct mlx5e_vlan_db { 455struct mlx5e_vlan_db {
378 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
379 u32 active_vlans_ft_ix[VLAN_N_VID]; 456 u32 active_vlans_ft_ix[VLAN_N_VID];
380 u32 untagged_rule_ft_ix; 457 u32 untagged_rule_ft_ix;
381 u32 any_vlan_rule_ft_ix; 458 u32 any_vlan_rule_ft_ix;
@@ -399,10 +476,11 @@ struct mlx5e_priv {
399 u32 pdn; 476 u32 pdn;
400 u32 tdn; 477 u32 tdn;
401 struct mlx5_core_mr mr; 478 struct mlx5_core_mr mr;
479 struct mlx5e_rq drop_rq;
402 480
403 struct mlx5e_channel **channel; 481 struct mlx5e_channel **channel;
404 u32 tisn[MLX5E_MAX_NUM_TC]; 482 u32 tisn[MLX5E_MAX_NUM_TC];
405 u32 rqtn; 483 u32 rqtn[MLX5E_NUM_RQT];
406 u32 tirn[MLX5E_NUM_TT]; 484 u32 tirn[MLX5E_NUM_TT];
407 485
408 struct mlx5e_flow_table ft; 486 struct mlx5e_flow_table ft;
@@ -479,10 +557,9 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
479 557
480void mlx5e_update_stats(struct mlx5e_priv *priv); 558void mlx5e_update_stats(struct mlx5e_priv *priv);
481 559
482int mlx5e_open_flow_table(struct mlx5e_priv *priv); 560int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
483void mlx5e_close_flow_table(struct mlx5e_priv *priv); 561void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
484void mlx5e_init_eth_addr(struct mlx5e_priv *priv); 562void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
485void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
486void mlx5e_set_rx_mode_work(struct work_struct *work); 563void mlx5e_set_rx_mode_work(struct work_struct *work);
487 564
488int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, 565int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -491,8 +568,6 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
491 u16 vid); 568 u16 vid);
492void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); 569void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
493void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); 570void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
494int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
495void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
496 571
497int mlx5e_open_locked(struct net_device *netdev); 572int mlx5e_open_locked(struct net_device *netdev);
498int mlx5e_close_locked(struct net_device *netdev); 573int mlx5e_close_locked(struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index b95aa3384c36..b549797b315f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -171,7 +171,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
171 171
172 switch (sset) { 172 switch (sset) {
173 case ETH_SS_STATS: 173 case ETH_SS_STATS:
174 return NUM_VPORT_COUNTERS + 174 return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
175 priv->params.num_channels * NUM_RQ_STATS + 175 priv->params.num_channels * NUM_RQ_STATS +
176 priv->params.num_channels * priv->params.num_tc * 176 priv->params.num_channels * priv->params.num_tc *
177 NUM_SQ_STATS; 177 NUM_SQ_STATS;
@@ -200,6 +200,11 @@ static void mlx5e_get_strings(struct net_device *dev,
200 strcpy(data + (idx++) * ETH_GSTRING_LEN, 200 strcpy(data + (idx++) * ETH_GSTRING_LEN,
201 vport_strings[i]); 201 vport_strings[i]);
202 202
203 /* PPORT counters */
204 for (i = 0; i < NUM_PPORT_COUNTERS; i++)
205 strcpy(data + (idx++) * ETH_GSTRING_LEN,
206 pport_strings[i]);
207
203 /* per channel counters */ 208 /* per channel counters */
204 for (i = 0; i < priv->params.num_channels; i++) 209 for (i = 0; i < priv->params.num_channels; i++)
205 for (j = 0; j < NUM_RQ_STATS; j++) 210 for (j = 0; j < NUM_RQ_STATS; j++)
@@ -234,6 +239,9 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
234 for (i = 0; i < NUM_VPORT_COUNTERS; i++) 239 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
235 data[idx++] = ((u64 *)&priv->stats.vport)[i]; 240 data[idx++] = ((u64 *)&priv->stats.vport)[i];
236 241
242 for (i = 0; i < NUM_PPORT_COUNTERS; i++)
243 data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
244
237 /* per channel counters */ 245 /* per channel counters */
238 for (i = 0; i < priv->params.num_channels; i++) 246 for (i = 0; i < priv->params.num_channels; i++)
239 for (j = 0; j < NUM_RQ_STATS; j++) 247 for (j = 0; j < NUM_RQ_STATS; j++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index 70ec31b9e1e9..e71563ce05d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -594,44 +594,28 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
594 594
595void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) 595void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
596{ 596{
597 WARN_ON(!mutex_is_locked(&priv->state_lock)); 597 if (!priv->vlan.filter_disabled)
598 return;
598 599
599 if (priv->vlan.filter_disabled) { 600 priv->vlan.filter_disabled = false;
600 priv->vlan.filter_disabled = false; 601 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
601 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
602 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
603 0);
604 }
605} 602}
606 603
607void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) 604void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
608{ 605{
609 WARN_ON(!mutex_is_locked(&priv->state_lock)); 606 if (priv->vlan.filter_disabled)
607 return;
610 608
611 if (!priv->vlan.filter_disabled) { 609 priv->vlan.filter_disabled = true;
612 priv->vlan.filter_disabled = true; 610 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
613 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
614 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
615 0);
616 }
617} 611}
618 612
619int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, 613int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
620 u16 vid) 614 u16 vid)
621{ 615{
622 struct mlx5e_priv *priv = netdev_priv(dev); 616 struct mlx5e_priv *priv = netdev_priv(dev);
623 int err = 0;
624
625 mutex_lock(&priv->state_lock);
626
627 set_bit(vid, priv->vlan.active_vlans);
628 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
629 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
630 vid);
631 617
632 mutex_unlock(&priv->state_lock); 618 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
633
634 return err;
635} 619}
636 620
637int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, 621int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -639,56 +623,11 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
639{ 623{
640 struct mlx5e_priv *priv = netdev_priv(dev); 624 struct mlx5e_priv *priv = netdev_priv(dev);
641 625
642 mutex_lock(&priv->state_lock); 626 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
643
644 clear_bit(vid, priv->vlan.active_vlans);
645 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
646 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
647
648 mutex_unlock(&priv->state_lock);
649
650 return 0;
651}
652
653int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
654{
655 u16 vid;
656 int err;
657
658 for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
659 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
660 vid);
661 if (err)
662 return err;
663 }
664
665 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
666 if (err)
667 return err;
668
669 if (priv->vlan.filter_disabled) {
670 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
671 0);
672 if (err)
673 return err;
674 }
675 627
676 return 0; 628 return 0;
677} 629}
678 630
679void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
680{
681 u16 vid;
682
683 if (priv->vlan.filter_disabled)
684 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
685
686 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
687
688 for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
689 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
690}
691
692#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ 631#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
693 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ 632 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
694 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) 633 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
@@ -752,18 +691,21 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
752 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) 691 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
753 hn->action = MLX5E_ACTION_DEL; 692 hn->action = MLX5E_ACTION_DEL;
754 693
755 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 694 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
756 mlx5e_sync_netdev_addr(priv); 695 mlx5e_sync_netdev_addr(priv);
757 696
758 mlx5e_apply_netdev_addr(priv); 697 mlx5e_apply_netdev_addr(priv);
759} 698}
760 699
761void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) 700void mlx5e_set_rx_mode_work(struct work_struct *work)
762{ 701{
702 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
703 set_rx_mode_work);
704
763 struct mlx5e_eth_addr_db *ea = &priv->eth_addr; 705 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
764 struct net_device *ndev = priv->netdev; 706 struct net_device *ndev = priv->netdev;
765 707
766 bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state); 708 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
767 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); 709 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
768 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); 710 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
769 bool broadcast_enabled = rx_mode_enable; 711 bool broadcast_enabled = rx_mode_enable;
@@ -796,17 +738,6 @@ void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
796 ea->broadcast_enabled = broadcast_enabled; 738 ea->broadcast_enabled = broadcast_enabled;
797} 739}
798 740
799void mlx5e_set_rx_mode_work(struct work_struct *work)
800{
801 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
802 set_rx_mode_work);
803
804 mutex_lock(&priv->state_lock);
805 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
806 mlx5e_set_rx_mode_core(priv);
807 mutex_unlock(&priv->state_lock);
808}
809
810void mlx5e_init_eth_addr(struct mlx5e_priv *priv) 741void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
811{ 742{
812 ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); 743 ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
@@ -929,7 +860,7 @@ static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
929 mlx5_destroy_flow_table(priv->ft.vlan); 860 mlx5_destroy_flow_table(priv->ft.vlan);
930} 861}
931 862
932int mlx5e_open_flow_table(struct mlx5e_priv *priv) 863int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
933{ 864{
934 int err; 865 int err;
935 866
@@ -941,16 +872,24 @@ int mlx5e_open_flow_table(struct mlx5e_priv *priv)
941 if (err) 872 if (err)
942 goto err_destroy_main_flow_table; 873 goto err_destroy_main_flow_table;
943 874
875 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
876 if (err)
877 goto err_destroy_vlan_flow_table;
878
944 return 0; 879 return 0;
945 880
881err_destroy_vlan_flow_table:
882 mlx5e_destroy_vlan_flow_table(priv);
883
946err_destroy_main_flow_table: 884err_destroy_main_flow_table:
947 mlx5e_destroy_main_flow_table(priv); 885 mlx5e_destroy_main_flow_table(priv);
948 886
949 return err; 887 return err;
950} 888}
951 889
952void mlx5e_close_flow_table(struct mlx5e_priv *priv) 890void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
953{ 891{
892 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
954 mlx5e_destroy_vlan_flow_table(priv); 893 mlx5e_destroy_vlan_flow_table(priv);
955 mlx5e_destroy_main_flow_table(priv); 894 mlx5e_destroy_main_flow_table(priv);
956} 895}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bb815893d3a8..111427b33ec8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -82,6 +82,47 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
82 mutex_unlock(&priv->state_lock); 82 mutex_unlock(&priv->state_lock);
83} 83}
84 84
85static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
86{
87 struct mlx5_core_dev *mdev = priv->mdev;
88 struct mlx5e_pport_stats *s = &priv->stats.pport;
89 u32 *in;
90 u32 *out;
91 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
92
93 in = mlx5_vzalloc(sz);
94 out = mlx5_vzalloc(sz);
95 if (!in || !out)
96 goto free_out;
97
98 MLX5_SET(ppcnt_reg, in, local_port, 1);
99
100 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
101 mlx5_core_access_reg(mdev, in, sz, out,
102 sz, MLX5_REG_PPCNT, 0, 0);
103 memcpy(s->IEEE_802_3_counters,
104 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
105 sizeof(s->IEEE_802_3_counters));
106
107 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
108 mlx5_core_access_reg(mdev, in, sz, out,
109 sz, MLX5_REG_PPCNT, 0, 0);
110 memcpy(s->RFC_2863_counters,
111 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
112 sizeof(s->RFC_2863_counters));
113
114 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
115 mlx5_core_access_reg(mdev, in, sz, out,
116 sz, MLX5_REG_PPCNT, 0, 0);
117 memcpy(s->RFC_2819_counters,
118 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
119 sizeof(s->RFC_2819_counters));
120
121free_out:
122 kvfree(in);
123 kvfree(out);
124}
125
85void mlx5e_update_stats(struct mlx5e_priv *priv) 126void mlx5e_update_stats(struct mlx5e_priv *priv)
86{ 127{
87 struct mlx5_core_dev *mdev = priv->mdev; 128 struct mlx5_core_dev *mdev = priv->mdev;
@@ -202,6 +243,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
202 s->tx_csum_offload = s->tx_packets - tx_offload_none; 243 s->tx_csum_offload = s->tx_packets - tx_offload_none;
203 s->rx_csum_good = s->rx_packets - s->rx_csum_none; 244 s->rx_csum_good = s->rx_packets - s->rx_csum_none;
204 245
246 mlx5e_update_pport_counters(priv);
205free_out: 247free_out:
206 kvfree(out); 248 kvfree(out);
207} 249}
@@ -307,6 +349,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
307 rq->netdev = c->netdev; 349 rq->netdev = c->netdev;
308 rq->channel = c; 350 rq->channel = c;
309 rq->ix = c->ix; 351 rq->ix = c->ix;
352 rq->priv = c->priv;
310 353
311 return 0; 354 return 0;
312 355
@@ -324,8 +367,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
324 367
325static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) 368static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
326{ 369{
327 struct mlx5e_channel *c = rq->channel; 370 struct mlx5e_priv *priv = rq->priv;
328 struct mlx5e_priv *priv = c->priv;
329 struct mlx5_core_dev *mdev = priv->mdev; 371 struct mlx5_core_dev *mdev = priv->mdev;
330 372
331 void *in; 373 void *in;
@@ -392,11 +434,7 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
392 434
393static void mlx5e_disable_rq(struct mlx5e_rq *rq) 435static void mlx5e_disable_rq(struct mlx5e_rq *rq)
394{ 436{
395 struct mlx5e_channel *c = rq->channel; 437 mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
396 struct mlx5e_priv *priv = c->priv;
397 struct mlx5_core_dev *mdev = priv->mdev;
398
399 mlx5_core_destroy_rq(mdev, rq->rqn);
400} 438}
401 439
402static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) 440static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
@@ -740,6 +778,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
740 } 778 }
741 779
742 cq->channel = c; 780 cq->channel = c;
781 cq->priv = priv;
743 782
744 return 0; 783 return 0;
745} 784}
@@ -751,8 +790,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
751 790
752static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) 791static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
753{ 792{
754 struct mlx5e_channel *c = cq->channel; 793 struct mlx5e_priv *priv = cq->priv;
755 struct mlx5e_priv *priv = c->priv;
756 struct mlx5_core_dev *mdev = priv->mdev; 794 struct mlx5_core_dev *mdev = priv->mdev;
757 struct mlx5_core_cq *mcq = &cq->mcq; 795 struct mlx5_core_cq *mcq = &cq->mcq;
758 796
@@ -798,8 +836,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
798 836
799static void mlx5e_disable_cq(struct mlx5e_cq *cq) 837static void mlx5e_disable_cq(struct mlx5e_cq *cq)
800{ 838{
801 struct mlx5e_channel *c = cq->channel; 839 struct mlx5e_priv *priv = cq->priv;
802 struct mlx5e_priv *priv = c->priv;
803 struct mlx5_core_dev *mdev = priv->mdev; 840 struct mlx5_core_dev *mdev = priv->mdev;
804 841
805 mlx5_core_destroy_cq(mdev, &cq->mcq); 842 mlx5_core_destroy_cq(mdev, &cq->mcq);
@@ -1119,112 +1156,419 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
1119 kfree(priv->channel); 1156 kfree(priv->channel);
1120} 1157}
1121 1158
1122static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) 1159static int mlx5e_rx_hash_fn(int hfunc)
1123{ 1160{
1124 struct mlx5_core_dev *mdev = priv->mdev; 1161 return (hfunc == ETH_RSS_HASH_TOP) ?
1125 u32 in[MLX5_ST_SZ_DW(create_tis_in)]; 1162 MLX5_RX_HASH_FN_TOEPLITZ :
1126 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 1163 MLX5_RX_HASH_FN_INVERTED_XOR8;
1164}
1127 1165
1128 memset(in, 0, sizeof(in)); 1166static int mlx5e_bits_invert(unsigned long a, int size)
1167{
1168 int inv = 0;
1169 int i;
1129 1170
1130 MLX5_SET(tisc, tisc, prio, tc); 1171 for (i = 0; i < size; i++)
1131 MLX5_SET(tisc, tisc, transport_domain, priv->tdn); 1172 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1132 1173
1133 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); 1174 return inv;
1134} 1175}
1135 1176
1136static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc) 1177static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
1178 enum mlx5e_rqt_ix rqt_ix)
1137{ 1179{
1138 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); 1180 int i;
1181 int log_sz;
1182
1183 switch (rqt_ix) {
1184 case MLX5E_INDIRECTION_RQT:
1185 log_sz = priv->params.rx_hash_log_tbl_sz;
1186 for (i = 0; i < (1 << log_sz); i++) {
1187 int ix = i;
1188
1189 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1190 ix = mlx5e_bits_invert(i, log_sz);
1191
1192 ix = ix % priv->params.num_channels;
1193 MLX5_SET(rqtc, rqtc, rq_num[i],
1194 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1195 priv->channel[ix]->rq.rqn :
1196 priv->drop_rq.rqn);
1197 }
1198
1199 break;
1200
1201 default: /* MLX5E_SINGLE_RQ_RQT */
1202 MLX5_SET(rqtc, rqtc, rq_num[0],
1203 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1204 priv->channel[0]->rq.rqn :
1205 priv->drop_rq.rqn);
1206
1207 break;
1208 }
1139} 1209}
1140 1210
1141static int mlx5e_open_tises(struct mlx5e_priv *priv) 1211static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1142{ 1212{
1213 struct mlx5_core_dev *mdev = priv->mdev;
1214 u32 *in;
1215 void *rqtc;
1216 int inlen;
1217 int log_sz;
1218 int sz;
1143 int err; 1219 int err;
1144 int tc;
1145 1220
1146 for (tc = 0; tc < priv->params.num_tc; tc++) { 1221 log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
1147 err = mlx5e_open_tis(priv, tc); 1222 priv->params.rx_hash_log_tbl_sz;
1148 if (err) 1223 sz = 1 << log_sz;
1149 goto err_close_tises;
1150 }
1151 1224
1152 return 0; 1225 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1226 in = mlx5_vzalloc(inlen);
1227 if (!in)
1228 return -ENOMEM;
1153 1229
1154err_close_tises: 1230 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1155 for (tc--; tc >= 0; tc--) 1231
1156 mlx5e_close_tis(priv, tc); 1232 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1233 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1234
1235 mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1236
1237 err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
1238
1239 kvfree(in);
1157 1240
1158 return err; 1241 return err;
1159} 1242}
1160 1243
1161static void mlx5e_close_tises(struct mlx5e_priv *priv) 1244static int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1162{ 1245{
1163 int tc; 1246 struct mlx5_core_dev *mdev = priv->mdev;
1247 u32 *in;
1248 void *rqtc;
1249 int inlen;
1250 int log_sz;
1251 int sz;
1252 int err;
1164 1253
1165 for (tc = 0; tc < priv->params.num_tc; tc++) 1254 log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
1166 mlx5e_close_tis(priv, tc); 1255 priv->params.rx_hash_log_tbl_sz;
1256 sz = 1 << log_sz;
1257
1258 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1259 in = mlx5_vzalloc(inlen);
1260 if (!in)
1261 return -ENOMEM;
1262
1263 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
1264
1265 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1266
1267 mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1268
1269 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
1270
1271 err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
1272
1273 kvfree(in);
1274
1275 return err;
1167} 1276}
1168 1277
1169static int mlx5e_rx_hash_fn(int hfunc) 1278static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1170{ 1279{
1171 return (hfunc == ETH_RSS_HASH_TOP) ? 1280 mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
1172 MLX5_RX_HASH_FN_TOEPLITZ :
1173 MLX5_RX_HASH_FN_INVERTED_XOR8;
1174} 1281}
1175 1282
1176static int mlx5e_bits_invert(unsigned long a, int size) 1283static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1177{ 1284{
1178 int inv = 0; 1285 mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
1179 int i; 1286 mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1287}
1180 1288
1181 for (i = 0; i < size; i++) 1289static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1182 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; 1290{
1291 if (!priv->params.lro_en)
1292 return;
1183 1293
1184 return inv; 1294#define ROUGH_MAX_L2_L3_HDR_SZ 256
1295
1296 MLX5_SET(tirc, tirc, lro_enable_mask,
1297 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1298 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1299 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1300 (priv->params.lro_wqe_sz -
1301 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1302 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1303 MLX5_CAP_ETH(priv->mdev,
1304 lro_timer_supported_periods[3]));
1185} 1305}
1186 1306
1187static int mlx5e_open_rqt(struct mlx5e_priv *priv) 1307static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1188{ 1308{
1189 struct mlx5_core_dev *mdev = priv->mdev; 1309 struct mlx5_core_dev *mdev = priv->mdev;
1190 u32 *in; 1310
1191 void *rqtc; 1311 void *in;
1312 void *tirc;
1192 int inlen; 1313 int inlen;
1193 int err; 1314 int err;
1194 int log_tbl_sz = priv->params.rx_hash_log_tbl_sz;
1195 int sz = 1 << log_tbl_sz;
1196 int i;
1197 1315
1198 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 1316 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1199 in = mlx5_vzalloc(inlen); 1317 in = mlx5_vzalloc(inlen);
1200 if (!in) 1318 if (!in)
1201 return -ENOMEM; 1319 return -ENOMEM;
1202 1320
1203 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 1321 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
1322 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1204 1323
1205 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 1324 mlx5e_build_tir_ctx_lro(tirc, priv);
1206 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1207 1325
1208 for (i = 0; i < sz; i++) { 1326 err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
1209 int ix = i;
1210 1327
1211 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) 1328 kvfree(in);
1212 ix = mlx5e_bits_invert(i, log_tbl_sz); 1329
1330 return err;
1331}
1332
1333static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1334{
1335 struct mlx5e_priv *priv = netdev_priv(netdev);
1336 struct mlx5_core_dev *mdev = priv->mdev;
1337 int hw_mtu;
1338 int err;
1339
1340 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
1341 if (err)
1342 return err;
1343
1344 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1345
1346 if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
1347 netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
1348 __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
1349
1350 netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
1351 return 0;
1352}
1353
1354int mlx5e_open_locked(struct net_device *netdev)
1355{
1356 struct mlx5e_priv *priv = netdev_priv(netdev);
1357 int num_txqs;
1358 int err;
1359
1360 set_bit(MLX5E_STATE_OPENED, &priv->state);
1361
1362 num_txqs = priv->params.num_channels * priv->params.num_tc;
1363 netif_set_real_num_tx_queues(netdev, num_txqs);
1364 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1213 1365
1214 ix = ix % priv->params.num_channels; 1366 err = mlx5e_set_dev_port_mtu(netdev);
1215 MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn); 1367 if (err)
1368 return err;
1369
1370 err = mlx5e_open_channels(priv);
1371 if (err) {
1372 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1373 __func__, err);
1374 return err;
1216 } 1375 }
1217 1376
1218 err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn); 1377 mlx5e_update_carrier(priv);
1378 mlx5e_redirect_rqts(priv);
1219 1379
1220 kvfree(in); 1380 schedule_delayed_work(&priv->update_stats_work, 0);
1381
1382 return 0;
1383}
1384
1385static int mlx5e_open(struct net_device *netdev)
1386{
1387 struct mlx5e_priv *priv = netdev_priv(netdev);
1388 int err;
1389
1390 mutex_lock(&priv->state_lock);
1391 err = mlx5e_open_locked(netdev);
1392 mutex_unlock(&priv->state_lock);
1221 1393
1222 return err; 1394 return err;
1223} 1395}
1224 1396
1225static void mlx5e_close_rqt(struct mlx5e_priv *priv) 1397int mlx5e_close_locked(struct net_device *netdev)
1226{ 1398{
1227 mlx5_core_destroy_rqt(priv->mdev, priv->rqtn); 1399 struct mlx5e_priv *priv = netdev_priv(netdev);
1400
1401 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1402
1403 mlx5e_redirect_rqts(priv);
1404 netif_carrier_off(priv->netdev);
1405 mlx5e_close_channels(priv);
1406
1407 return 0;
1408}
1409
1410static int mlx5e_close(struct net_device *netdev)
1411{
1412 struct mlx5e_priv *priv = netdev_priv(netdev);
1413 int err;
1414
1415 mutex_lock(&priv->state_lock);
1416 err = mlx5e_close_locked(netdev);
1417 mutex_unlock(&priv->state_lock);
1418
1419 return err;
1420}
1421
1422static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
1423 struct mlx5e_rq *rq,
1424 struct mlx5e_rq_param *param)
1425{
1426 struct mlx5_core_dev *mdev = priv->mdev;
1427 void *rqc = param->rqc;
1428 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1429 int err;
1430
1431 param->wq.db_numa_node = param->wq.buf_numa_node;
1432
1433 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
1434 &rq->wq_ctrl);
1435 if (err)
1436 return err;
1437
1438 rq->priv = priv;
1439
1440 return 0;
1441}
1442
1443static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
1444 struct mlx5e_cq *cq,
1445 struct mlx5e_cq_param *param)
1446{
1447 struct mlx5_core_dev *mdev = priv->mdev;
1448 struct mlx5_core_cq *mcq = &cq->mcq;
1449 int eqn_not_used;
1450 int irqn;
1451 int err;
1452
1453 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1454 &cq->wq_ctrl);
1455 if (err)
1456 return err;
1457
1458 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1459
1460 mcq->cqe_sz = 64;
1461 mcq->set_ci_db = cq->wq_ctrl.db.db;
1462 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1463 *mcq->set_ci_db = 0;
1464 *mcq->arm_db = 0;
1465 mcq->vector = param->eq_ix;
1466 mcq->comp = mlx5e_completion_event;
1467 mcq->event = mlx5e_cq_error_event;
1468 mcq->irqn = irqn;
1469 mcq->uar = &priv->cq_uar;
1470
1471 cq->priv = priv;
1472
1473 return 0;
1474}
1475
1476static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
1477{
1478 struct mlx5e_cq_param cq_param;
1479 struct mlx5e_rq_param rq_param;
1480 struct mlx5e_rq *rq = &priv->drop_rq;
1481 struct mlx5e_cq *cq = &priv->drop_rq.cq;
1482 int err;
1483
1484 memset(&cq_param, 0, sizeof(cq_param));
1485 memset(&rq_param, 0, sizeof(rq_param));
1486 mlx5e_build_rx_cq_param(priv, &cq_param);
1487 mlx5e_build_rq_param(priv, &rq_param);
1488
1489 err = mlx5e_create_drop_cq(priv, cq, &cq_param);
1490 if (err)
1491 return err;
1492
1493 err = mlx5e_enable_cq(cq, &cq_param);
1494 if (err)
1495 goto err_destroy_cq;
1496
1497 err = mlx5e_create_drop_rq(priv, rq, &rq_param);
1498 if (err)
1499 goto err_disable_cq;
1500
1501 err = mlx5e_enable_rq(rq, &rq_param);
1502 if (err)
1503 goto err_destroy_rq;
1504
1505 return 0;
1506
1507err_destroy_rq:
1508 mlx5e_destroy_rq(&priv->drop_rq);
1509
1510err_disable_cq:
1511 mlx5e_disable_cq(&priv->drop_rq.cq);
1512
1513err_destroy_cq:
1514 mlx5e_destroy_cq(&priv->drop_rq.cq);
1515
1516 return err;
1517}
1518
1519static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
1520{
1521 mlx5e_disable_rq(&priv->drop_rq);
1522 mlx5e_destroy_rq(&priv->drop_rq);
1523 mlx5e_disable_cq(&priv->drop_rq.cq);
1524 mlx5e_destroy_cq(&priv->drop_rq.cq);
1525}
1526
1527static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
1528{
1529 struct mlx5_core_dev *mdev = priv->mdev;
1530 u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1531 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1532
1533 memset(in, 0, sizeof(in));
1534
1535 MLX5_SET(tisc, tisc, prio, tc);
1536 MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1537
1538 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
1539}
1540
1541static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
1542{
1543 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
1544}
1545
1546static int mlx5e_create_tises(struct mlx5e_priv *priv)
1547{
1548 int err;
1549 int tc;
1550
1551 for (tc = 0; tc < priv->params.num_tc; tc++) {
1552 err = mlx5e_create_tis(priv, tc);
1553 if (err)
1554 goto err_close_tises;
1555 }
1556
1557 return 0;
1558
1559err_close_tises:
1560 for (tc--; tc >= 0; tc--)
1561 mlx5e_destroy_tis(priv, tc);
1562
1563 return err;
1564}
1565
1566static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
1567{
1568 int tc;
1569
1570 for (tc = 0; tc < priv->params.num_tc; tc++)
1571 mlx5e_destroy_tis(priv, tc);
1228} 1572}
1229 1573
1230static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) 1574static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
@@ -1233,8 +1577,6 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1233 1577
1234 MLX5_SET(tirc, tirc, transport_domain, priv->tdn); 1578 MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
1235 1579
1236#define ROUGH_MAX_L2_L3_HDR_SZ 256
1237
1238#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ 1580#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
1239 MLX5_HASH_FIELD_SEL_DST_IP) 1581 MLX5_HASH_FIELD_SEL_DST_IP)
1240 1582
@@ -1247,30 +1589,19 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1247 MLX5_HASH_FIELD_SEL_DST_IP |\ 1589 MLX5_HASH_FIELD_SEL_DST_IP |\
1248 MLX5_HASH_FIELD_SEL_IPSEC_SPI) 1590 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1249 1591
1250 if (priv->params.lro_en) { 1592 mlx5e_build_tir_ctx_lro(tirc, priv);
1251 MLX5_SET(tirc, tirc, lro_enable_mask, 1593
1252 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 1594 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1253 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1254 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1255 (priv->params.lro_wqe_sz -
1256 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1257 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1258 MLX5_CAP_ETH(priv->mdev,
1259 lro_timer_supported_periods[3]));
1260 }
1261 1595
1262 switch (tt) { 1596 switch (tt) {
1263 case MLX5E_TT_ANY: 1597 case MLX5E_TT_ANY:
1264 MLX5_SET(tirc, tirc, disp_type, 1598 MLX5_SET(tirc, tirc, indirect_table,
1265 MLX5_TIRC_DISP_TYPE_DIRECT); 1599 priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
1266 MLX5_SET(tirc, tirc, inline_rqn, 1600 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
1267 priv->channel[0]->rq.rqn);
1268 break; 1601 break;
1269 default: 1602 default:
1270 MLX5_SET(tirc, tirc, disp_type,
1271 MLX5_TIRC_DISP_TYPE_INDIRECT);
1272 MLX5_SET(tirc, tirc, indirect_table, 1603 MLX5_SET(tirc, tirc, indirect_table,
1273 priv->rqtn); 1604 priv->rqtn[MLX5E_INDIRECTION_RQT]);
1274 MLX5_SET(tirc, tirc, rx_hash_fn, 1605 MLX5_SET(tirc, tirc, rx_hash_fn,
1275 mlx5e_rx_hash_fn(priv->params.rss_hfunc)); 1606 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1276 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { 1607 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -1366,7 +1697,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1366 } 1697 }
1367} 1698}
1368 1699
1369static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) 1700static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
1370{ 1701{
1371 struct mlx5_core_dev *mdev = priv->mdev; 1702 struct mlx5_core_dev *mdev = priv->mdev;
1372 u32 *in; 1703 u32 *in;
@@ -1390,184 +1721,37 @@ static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
1390 return err; 1721 return err;
1391} 1722}
1392 1723
1393static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt) 1724static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
1394{ 1725{
1395 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); 1726 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
1396} 1727}
1397 1728
1398static int mlx5e_open_tirs(struct mlx5e_priv *priv) 1729static int mlx5e_create_tirs(struct mlx5e_priv *priv)
1399{ 1730{
1400 int err; 1731 int err;
1401 int i; 1732 int i;
1402 1733
1403 for (i = 0; i < MLX5E_NUM_TT; i++) { 1734 for (i = 0; i < MLX5E_NUM_TT; i++) {
1404 err = mlx5e_open_tir(priv, i); 1735 err = mlx5e_create_tir(priv, i);
1405 if (err) 1736 if (err)
1406 goto err_close_tirs; 1737 goto err_destroy_tirs;
1407 } 1738 }
1408 1739
1409 return 0; 1740 return 0;
1410 1741
1411err_close_tirs: 1742err_destroy_tirs:
1412 for (i--; i >= 0; i--) 1743 for (i--; i >= 0; i--)
1413 mlx5e_close_tir(priv, i); 1744 mlx5e_destroy_tir(priv, i);
1414 1745
1415 return err; 1746 return err;
1416} 1747}
1417 1748
1418static void mlx5e_close_tirs(struct mlx5e_priv *priv) 1749static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
1419{ 1750{
1420 int i; 1751 int i;
1421 1752
1422 for (i = 0; i < MLX5E_NUM_TT; i++) 1753 for (i = 0; i < MLX5E_NUM_TT; i++)
1423 mlx5e_close_tir(priv, i); 1754 mlx5e_destroy_tir(priv, i);
1424}
1425
1426static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1427{
1428 struct mlx5e_priv *priv = netdev_priv(netdev);
1429 struct mlx5_core_dev *mdev = priv->mdev;
1430 int hw_mtu;
1431 int err;
1432
1433 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
1434 if (err)
1435 return err;
1436
1437 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1438
1439 if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
1440 netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
1441 __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
1442
1443 netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
1444 return 0;
1445}
1446
1447int mlx5e_open_locked(struct net_device *netdev)
1448{
1449 struct mlx5e_priv *priv = netdev_priv(netdev);
1450 int num_txqs;
1451 int err;
1452
1453 num_txqs = priv->params.num_channels * priv->params.num_tc;
1454 netif_set_real_num_tx_queues(netdev, num_txqs);
1455 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1456
1457 err = mlx5e_set_dev_port_mtu(netdev);
1458 if (err)
1459 return err;
1460
1461 err = mlx5e_open_tises(priv);
1462 if (err) {
1463 netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
1464 __func__, err);
1465 return err;
1466 }
1467
1468 err = mlx5e_open_channels(priv);
1469 if (err) {
1470 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1471 __func__, err);
1472 goto err_close_tises;
1473 }
1474
1475 err = mlx5e_open_rqt(priv);
1476 if (err) {
1477 netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
1478 __func__, err);
1479 goto err_close_channels;
1480 }
1481
1482 err = mlx5e_open_tirs(priv);
1483 if (err) {
1484 netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
1485 __func__, err);
1486 goto err_close_rqls;
1487 }
1488
1489 err = mlx5e_open_flow_table(priv);
1490 if (err) {
1491 netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
1492 __func__, err);
1493 goto err_close_tirs;
1494 }
1495
1496 err = mlx5e_add_all_vlan_rules(priv);
1497 if (err) {
1498 netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
1499 __func__, err);
1500 goto err_close_flow_table;
1501 }
1502
1503 mlx5e_init_eth_addr(priv);
1504
1505 set_bit(MLX5E_STATE_OPENED, &priv->state);
1506
1507 mlx5e_update_carrier(priv);
1508 mlx5e_set_rx_mode_core(priv);
1509
1510 schedule_delayed_work(&priv->update_stats_work, 0);
1511 return 0;
1512
1513err_close_flow_table:
1514 mlx5e_close_flow_table(priv);
1515
1516err_close_tirs:
1517 mlx5e_close_tirs(priv);
1518
1519err_close_rqls:
1520 mlx5e_close_rqt(priv);
1521
1522err_close_channels:
1523 mlx5e_close_channels(priv);
1524
1525err_close_tises:
1526 mlx5e_close_tises(priv);
1527
1528 return err;
1529}
1530
1531static int mlx5e_open(struct net_device *netdev)
1532{
1533 struct mlx5e_priv *priv = netdev_priv(netdev);
1534 int err;
1535
1536 mutex_lock(&priv->state_lock);
1537 err = mlx5e_open_locked(netdev);
1538 mutex_unlock(&priv->state_lock);
1539
1540 return err;
1541}
1542
1543int mlx5e_close_locked(struct net_device *netdev)
1544{
1545 struct mlx5e_priv *priv = netdev_priv(netdev);
1546
1547 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1548
1549 mlx5e_set_rx_mode_core(priv);
1550 mlx5e_del_all_vlan_rules(priv);
1551 netif_carrier_off(priv->netdev);
1552 mlx5e_close_flow_table(priv);
1553 mlx5e_close_tirs(priv);
1554 mlx5e_close_rqt(priv);
1555 mlx5e_close_channels(priv);
1556 mlx5e_close_tises(priv);
1557
1558 return 0;
1559}
1560
1561static int mlx5e_close(struct net_device *netdev)
1562{
1563 struct mlx5e_priv *priv = netdev_priv(netdev);
1564 int err;
1565
1566 mutex_lock(&priv->state_lock);
1567 err = mlx5e_close_locked(netdev);
1568 mutex_unlock(&priv->state_lock);
1569
1570 return err;
1571} 1755}
1572 1756
1573static struct rtnl_link_stats64 * 1757static struct rtnl_link_stats64 *
@@ -1631,11 +1815,15 @@ static int mlx5e_set_features(struct net_device *netdev,
1631 mlx5e_close_locked(priv->netdev); 1815 mlx5e_close_locked(priv->netdev);
1632 1816
1633 priv->params.lro_en = !!(features & NETIF_F_LRO); 1817 priv->params.lro_en = !!(features & NETIF_F_LRO);
1818 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
1819 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
1634 1820
1635 if (was_opened) 1821 if (was_opened)
1636 err = mlx5e_open_locked(priv->netdev); 1822 err = mlx5e_open_locked(priv->netdev);
1637 } 1823 }
1638 1824
1825 mutex_unlock(&priv->state_lock);
1826
1639 if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) { 1827 if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
1640 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1828 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1641 mlx5e_enable_vlan_filter(priv); 1829 mlx5e_enable_vlan_filter(priv);
@@ -1643,8 +1831,6 @@ static int mlx5e_set_features(struct net_device *netdev,
1643 mlx5e_disable_vlan_filter(priv); 1831 mlx5e_disable_vlan_filter(priv);
1644 } 1832 }
1645 1833
1646 mutex_unlock(&priv->state_lock);
1647
1648 return 0; 1834 return 0;
1649} 1835}
1650 1836
@@ -1891,16 +2077,73 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
1891 goto err_dealloc_transport_domain; 2077 goto err_dealloc_transport_domain;
1892 } 2078 }
1893 2079
2080 err = mlx5e_create_tises(priv);
2081 if (err) {
2082 mlx5_core_warn(mdev, "create tises failed, %d\n", err);
2083 goto err_destroy_mkey;
2084 }
2085
2086 err = mlx5e_open_drop_rq(priv);
2087 if (err) {
2088 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
2089 goto err_destroy_tises;
2090 }
2091
2092 err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
2093 if (err) {
2094 mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
2095 goto err_close_drop_rq;
2096 }
2097
2098 err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2099 if (err) {
2100 mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
2101 goto err_destroy_rqt_indir;
2102 }
2103
2104 err = mlx5e_create_tirs(priv);
2105 if (err) {
2106 mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
2107 goto err_destroy_rqt_single;
2108 }
2109
2110 err = mlx5e_create_flow_tables(priv);
2111 if (err) {
2112 mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
2113 goto err_destroy_tirs;
2114 }
2115
2116 mlx5e_init_eth_addr(priv);
2117
1894 err = register_netdev(netdev); 2118 err = register_netdev(netdev);
1895 if (err) { 2119 if (err) {
1896 mlx5_core_err(mdev, "register_netdev failed, %d\n", err); 2120 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
1897 goto err_destroy_mkey; 2121 goto err_destroy_flow_tables;
1898 } 2122 }
1899 2123
1900 mlx5e_enable_async_events(priv); 2124 mlx5e_enable_async_events(priv);
2125 schedule_work(&priv->set_rx_mode_work);
1901 2126
1902 return priv; 2127 return priv;
1903 2128
2129err_destroy_flow_tables:
2130 mlx5e_destroy_flow_tables(priv);
2131
2132err_destroy_tirs:
2133 mlx5e_destroy_tirs(priv);
2134
2135err_destroy_rqt_single:
2136 mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2137
2138err_destroy_rqt_indir:
2139 mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
2140
2141err_close_drop_rq:
2142 mlx5e_close_drop_rq(priv);
2143
2144err_destroy_tises:
2145 mlx5e_destroy_tises(priv);
2146
1904err_destroy_mkey: 2147err_destroy_mkey:
1905 mlx5_core_destroy_mkey(mdev, &priv->mr); 2148 mlx5_core_destroy_mkey(mdev, &priv->mr);
1906 2149
@@ -1924,13 +2167,22 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
1924 struct mlx5e_priv *priv = vpriv; 2167 struct mlx5e_priv *priv = vpriv;
1925 struct net_device *netdev = priv->netdev; 2168 struct net_device *netdev = priv->netdev;
1926 2169
2170 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
2171
2172 schedule_work(&priv->set_rx_mode_work);
2173 mlx5e_disable_async_events(priv);
2174 flush_scheduled_work();
1927 unregister_netdev(netdev); 2175 unregister_netdev(netdev);
2176 mlx5e_destroy_flow_tables(priv);
2177 mlx5e_destroy_tirs(priv);
2178 mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2179 mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
2180 mlx5e_close_drop_rq(priv);
2181 mlx5e_destroy_tises(priv);
1928 mlx5_core_destroy_mkey(priv->mdev, &priv->mr); 2182 mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
1929 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); 2183 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
1930 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 2184 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
1931 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 2185 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
1932 mlx5e_disable_async_events(priv);
1933 flush_scheduled_work();
1934 free_netdev(netdev); 2186 free_netdev(netdev);
1935} 2187}
1936 2188
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index c4f3f74908ec..b4c87c7b0cf0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -163,6 +163,18 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
163 return err; 163 return err;
164} 164}
165 165
166int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
167 int inlen)
168{
169 u32 out[MLX5_ST_SZ_DW(modify_tir_out)];
170
171 MLX5_SET(modify_tir_in, in, tirn, tirn);
172 MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
173
174 memset(out, 0, sizeof(out));
175 return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
176}
177
166void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) 178void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
167{ 179{
168 u32 in[MLX5_ST_SZ_DW(destroy_tir_out)]; 180 u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
@@ -375,6 +387,18 @@ int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
375 return err; 387 return err;
376} 388}
377 389
390int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
391 int inlen)
392{
393 u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
394
395 MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
396 MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
397
398 memset(out, 0, sizeof(out));
399 return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
400}
401
378void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) 402void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
379{ 403{
380 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; 404 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
index 10bd75e7d9b1..74cae51436e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -45,6 +45,8 @@ int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
45void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); 45void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
46int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, 46int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
47 u32 *tirn); 47 u32 *tirn);
48int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
49 int inlen);
48void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); 50void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
49int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, 51int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
50 u32 *tisn); 52 u32 *tisn);
@@ -63,6 +65,8 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
63 65
64int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, 66int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
65 u32 *rqtn); 67 u32 *rqtn);
68int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
69 int inlen);
66void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); 70void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
67 71
68#endif /* __TRANSOBJ_H__ */ 72#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b943cd9e2097..250b1ff8b48d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1182,6 +1182,16 @@ enum {
1182 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, 1182 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1183}; 1183};
1184 1184
1185enum {
1186 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
1187 MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
1188 MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
1189 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1190 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1191 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1192 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
1193};
1194
1185static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) 1195static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1186{ 1196{
1187 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) 1197 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5fe0cae1a515..2039546b0ec6 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -103,6 +103,7 @@ enum {
103 MLX5_REG_PMTU = 0x5003, 103 MLX5_REG_PMTU = 0x5003,
104 MLX5_REG_PTYS = 0x5004, 104 MLX5_REG_PTYS = 0x5004,
105 MLX5_REG_PAOS = 0x5006, 105 MLX5_REG_PAOS = 0x5006,
106 MLX5_REG_PPCNT = 0x5008,
106 MLX5_REG_PMAOS = 0x5012, 107 MLX5_REG_PMAOS = 0x5012,
107 MLX5_REG_PUDE = 0x5009, 108 MLX5_REG_PUDE = 0x5009,
108 MLX5_REG_PMPE = 0x5010, 109 MLX5_REG_PMPE = 0x5010,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index c60a62bba652..dd2097455a2e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -4050,6 +4050,13 @@ struct mlx5_ifc_modify_tis_in_bits {
4050 struct mlx5_ifc_tisc_bits ctx; 4050 struct mlx5_ifc_tisc_bits ctx;
4051}; 4051};
4052 4052
4053struct mlx5_ifc_modify_tir_bitmask_bits {
4054 u8 reserved[0x20];
4055
4056 u8 reserved1[0x1f];
4057 u8 lro[0x1];
4058};
4059
4053struct mlx5_ifc_modify_tir_out_bits { 4060struct mlx5_ifc_modify_tir_out_bits {
4054 u8 status[0x8]; 4061 u8 status[0x8];
4055 u8 reserved_0[0x18]; 4062 u8 reserved_0[0x18];
@@ -4071,7 +4078,7 @@ struct mlx5_ifc_modify_tir_in_bits {
4071 4078
4072 u8 reserved_3[0x20]; 4079 u8 reserved_3[0x20];
4073 4080
4074 u8 modify_bitmask[0x40]; 4081 struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
4075 4082
4076 u8 reserved_4[0x40]; 4083 u8 reserved_4[0x40];
4077 4084
@@ -4116,6 +4123,13 @@ struct mlx5_ifc_modify_rqt_out_bits {
4116 u8 reserved_1[0x40]; 4123 u8 reserved_1[0x40];
4117}; 4124};
4118 4125
4126struct mlx5_ifc_rqt_bitmask_bits {
4127 u8 reserved[0x20];
4128
4129 u8 reserved1[0x1f];
4130 u8 rqn_list[0x1];
4131};
4132
4119struct mlx5_ifc_modify_rqt_in_bits { 4133struct mlx5_ifc_modify_rqt_in_bits {
4120 u8 opcode[0x10]; 4134 u8 opcode[0x10];
4121 u8 reserved_0[0x10]; 4135 u8 reserved_0[0x10];
@@ -4128,7 +4142,7 @@ struct mlx5_ifc_modify_rqt_in_bits {
4128 4142
4129 u8 reserved_3[0x20]; 4143 u8 reserved_3[0x20];
4130 4144
4131 u8 modify_bitmask[0x40]; 4145 struct mlx5_ifc_rqt_bitmask_bits bitmask;
4132 4146
4133 u8 reserved_4[0x40]; 4147 u8 reserved_4[0x40];
4134 4148