aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c232
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h59
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c10
9 files changed, 431 insertions, 130 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 82e9a8034557..adec88d941df 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -274,6 +274,8 @@ struct hnae3_ae_dev {
274 * Get firmware version 274 * Get firmware version
275 * get_mdix_mode() 275 * get_mdix_mode()
276 * Get media typr of phy 276 * Get media typr of phy
277 * enable_vlan_filter()
278 * Enable vlan filter
277 * set_vlan_filter() 279 * set_vlan_filter()
278 * Set vlan filter config of Ports 280 * Set vlan filter config of Ports
279 * set_vf_vlan_filter() 281 * set_vf_vlan_filter()
@@ -382,6 +384,7 @@ struct hnae3_ae_ops {
382 void (*get_mdix_mode)(struct hnae3_handle *handle, 384 void (*get_mdix_mode)(struct hnae3_handle *handle,
383 u8 *tp_mdix_ctrl, u8 *tp_mdix); 385 u8 *tp_mdix_ctrl, u8 *tp_mdix);
384 386
387 void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
385 int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto, 388 int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
386 u16 vlan_id, bool is_kill); 389 u16 vlan_id, bool is_kill);
387 int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, 390 int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 320ae8892a68..b23107d7821f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -247,6 +247,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
247 if (ret) 247 if (ret)
248 goto out_start_err; 248 goto out_start_err;
249 249
250 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
251
250 return 0; 252 return 0;
251 253
252out_start_err: 254out_start_err:
@@ -286,6 +288,9 @@ static void hns3_nic_net_down(struct net_device *netdev)
286 const struct hnae3_ae_ops *ops; 288 const struct hnae3_ae_ops *ops;
287 int i; 289 int i;
288 290
291 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
292 return;
293
289 /* stop ae_dev */ 294 /* stop ae_dev */
290 ops = priv->ae_handle->ae_algo->ops; 295 ops = priv->ae_handle->ae_algo->ops;
291 if (ops->stop) 296 if (ops->stop)
@@ -1101,6 +1106,11 @@ static int hns3_nic_set_features(struct net_device *netdev,
1101 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 1106 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1102 } 1107 }
1103 1108
1109 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1110 h->ae_algo->ops->enable_vlan_filter(h, true);
1111 else
1112 h->ae_algo->ops->enable_vlan_filter(h, false);
1113
1104 changed = netdev->features ^ features; 1114 changed = netdev->features ^ features;
1105 if (changed & NETIF_F_HW_VLAN_CTAG_RX) { 1115 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
1106 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1116 if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -1121,6 +1131,7 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1121{ 1131{
1122 struct hns3_nic_priv *priv = netdev_priv(netdev); 1132 struct hns3_nic_priv *priv = netdev_priv(netdev);
1123 int queue_num = priv->ae_handle->kinfo.num_tqps; 1133 int queue_num = priv->ae_handle->kinfo.num_tqps;
1134 struct hnae3_handle *handle = priv->ae_handle;
1124 struct hns3_enet_ring *ring; 1135 struct hns3_enet_ring *ring;
1125 unsigned int start; 1136 unsigned int start;
1126 unsigned int idx; 1137 unsigned int idx;
@@ -1128,6 +1139,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1128 u64 rx_bytes = 0; 1139 u64 rx_bytes = 0;
1129 u64 tx_pkts = 0; 1140 u64 tx_pkts = 0;
1130 u64 rx_pkts = 0; 1141 u64 rx_pkts = 0;
1142 u64 tx_drop = 0;
1143 u64 rx_drop = 0;
1144
1145 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1146 return;
1147
1148 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1131 1149
1132 for (idx = 0; idx < queue_num; idx++) { 1150 for (idx = 0; idx < queue_num; idx++) {
1133 /* fetch the tx stats */ 1151 /* fetch the tx stats */
@@ -1136,6 +1154,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1136 start = u64_stats_fetch_begin_irq(&ring->syncp); 1154 start = u64_stats_fetch_begin_irq(&ring->syncp);
1137 tx_bytes += ring->stats.tx_bytes; 1155 tx_bytes += ring->stats.tx_bytes;
1138 tx_pkts += ring->stats.tx_pkts; 1156 tx_pkts += ring->stats.tx_pkts;
1157 tx_drop += ring->stats.tx_busy;
1158 tx_drop += ring->stats.sw_err_cnt;
1139 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1159 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1140 1160
1141 /* fetch the rx stats */ 1161 /* fetch the rx stats */
@@ -1144,6 +1164,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1144 start = u64_stats_fetch_begin_irq(&ring->syncp); 1164 start = u64_stats_fetch_begin_irq(&ring->syncp);
1145 rx_bytes += ring->stats.rx_bytes; 1165 rx_bytes += ring->stats.rx_bytes;
1146 rx_pkts += ring->stats.rx_pkts; 1166 rx_pkts += ring->stats.rx_pkts;
1167 rx_drop += ring->stats.non_vld_descs;
1168 rx_drop += ring->stats.err_pkt_len;
1169 rx_drop += ring->stats.l2_err;
1147 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1170 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1148 } 1171 }
1149 1172
@@ -1159,8 +1182,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1159 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 1182 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1160 1183
1161 stats->tx_errors = netdev->stats.tx_errors; 1184 stats->tx_errors = netdev->stats.tx_errors;
1162 stats->rx_dropped = netdev->stats.rx_dropped; 1185 stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1163 stats->tx_dropped = netdev->stats.tx_dropped; 1186 stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
1164 stats->collisions = netdev->stats.collisions; 1187 stats->collisions = netdev->stats.collisions;
1165 stats->rx_over_errors = netdev->stats.rx_over_errors; 1188 stats->rx_over_errors = netdev->stats.rx_over_errors;
1166 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 1189 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -1390,6 +1413,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1390 return ret; 1413 return ret;
1391 } 1414 }
1392 1415
1416 netdev->mtu = new_mtu;
1417
1393 /* if the netdev was running earlier, bring it up again */ 1418 /* if the netdev was running earlier, bring it up again */
1394 if (if_running && hns3_nic_net_open(netdev)) 1419 if (if_running && hns3_nic_net_open(netdev))
1395 ret = -EINVAL; 1420 ret = -EINVAL;
@@ -1549,6 +1574,8 @@ static struct pci_driver hns3_driver = {
1549/* set default feature to hns3 */ 1574/* set default feature to hns3 */
1550static void hns3_set_default_feature(struct net_device *netdev) 1575static void hns3_set_default_feature(struct net_device *netdev)
1551{ 1576{
1577 struct hnae3_handle *h = hns3_get_handle(netdev);
1578
1552 netdev->priv_flags |= IFF_UNICAST_FLT; 1579 netdev->priv_flags |= IFF_UNICAST_FLT;
1553 1580
1554 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1581 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1577,12 +1604,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
1577 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1604 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1578 1605
1579 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1606 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1580 NETIF_F_HW_VLAN_CTAG_FILTER | 1607 NETIF_F_HW_VLAN_CTAG_TX |
1581 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1582 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1608 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1583 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1609 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1584 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1610 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1585 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1611 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1612
1613 if (!(h->flags & HNAE3_SUPPORT_VF))
1614 netdev->hw_features |=
1615 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
1586} 1616}
1587 1617
1588static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 1618static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 379c01dddbf8..1e8fac3ae750 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -15,26 +15,25 @@
15 15
16struct hns3_stats { 16struct hns3_stats {
17 char stats_string[ETH_GSTRING_LEN]; 17 char stats_string[ETH_GSTRING_LEN];
18 int stats_size;
19 int stats_offset; 18 int stats_offset;
20}; 19};
21 20
22/* tqp related stats */ 21/* tqp related stats */
23#define HNS3_TQP_STAT(_string, _member) { \ 22#define HNS3_TQP_STAT(_string, _member) { \
24 .stats_string = _string, \ 23 .stats_string = _string, \
25 .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ 24 .stats_offset = offsetof(struct hns3_enet_ring, stats) +\
26 .stats_offset = offsetof(struct hns3_enet_ring, stats), \ 25 offsetof(struct ring_stats, _member), \
27} \ 26}
28 27
29static const struct hns3_stats hns3_txq_stats[] = { 28static const struct hns3_stats hns3_txq_stats[] = {
30 /* Tx per-queue statistics */ 29 /* Tx per-queue statistics */
31 HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt), 30 HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
32 HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt), 31 HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
33 HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt), 32 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
34 HNS3_TQP_STAT("tx_pkts", tx_pkts), 33 HNS3_TQP_STAT("packets", tx_pkts),
35 HNS3_TQP_STAT("tx_bytes", tx_bytes), 34 HNS3_TQP_STAT("bytes", tx_bytes),
36 HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt), 35 HNS3_TQP_STAT("errors", tx_err_cnt),
37 HNS3_TQP_STAT("tx_restart_queue", restart_queue), 36 HNS3_TQP_STAT("tx_wake", restart_queue),
38 HNS3_TQP_STAT("tx_busy", tx_busy), 37 HNS3_TQP_STAT("tx_busy", tx_busy),
39}; 38};
40 39
@@ -42,24 +41,59 @@ static const struct hns3_stats hns3_txq_stats[] = {
42 41
43static const struct hns3_stats hns3_rxq_stats[] = { 42static const struct hns3_stats hns3_rxq_stats[] = {
44 /* Rx per-queue statistics */ 43 /* Rx per-queue statistics */
45 HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt), 44 HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
46 HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt), 45 HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
47 HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt), 46 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
48 HNS3_TQP_STAT("rx_pkts", rx_pkts), 47 HNS3_TQP_STAT("packets", rx_pkts),
49 HNS3_TQP_STAT("rx_bytes", rx_bytes), 48 HNS3_TQP_STAT("bytes", rx_bytes),
50 HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt), 49 HNS3_TQP_STAT("errors", rx_err_cnt),
51 HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt), 50 HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
52 HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len), 51 HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
53 HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs), 52 HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
54 HNS3_TQP_STAT("rx_err_bd_num", err_bd_num), 53 HNS3_TQP_STAT("err_bd_num", err_bd_num),
55 HNS3_TQP_STAT("rx_l2_err", l2_err), 54 HNS3_TQP_STAT("l2_err", l2_err),
56 HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err), 55 HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
57}; 56};
58 57
59#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) 58#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
60 59
61#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) 60#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
62 61
62/* netdev stats */
63#define HNS3_NETDEV_STAT(_string, _member) { \
64 .stats_string = _string, \
65 .stats_offset = offsetof(struct rtnl_link_stats64, _member) \
66}
67
68static const struct hns3_stats hns3_netdev_stats[] = {
69 /* Rx per-queue statistics */
70 HNS3_NETDEV_STAT("rx_packets", rx_packets),
71 HNS3_NETDEV_STAT("tx_packets", tx_packets),
72 HNS3_NETDEV_STAT("rx_bytes", rx_bytes),
73 HNS3_NETDEV_STAT("tx_bytes", tx_bytes),
74 HNS3_NETDEV_STAT("rx_errors", rx_errors),
75 HNS3_NETDEV_STAT("tx_errors", tx_errors),
76 HNS3_NETDEV_STAT("rx_dropped", rx_dropped),
77 HNS3_NETDEV_STAT("tx_dropped", tx_dropped),
78 HNS3_NETDEV_STAT("multicast", multicast),
79 HNS3_NETDEV_STAT("collisions", collisions),
80 HNS3_NETDEV_STAT("rx_length_errors", rx_length_errors),
81 HNS3_NETDEV_STAT("rx_over_errors", rx_over_errors),
82 HNS3_NETDEV_STAT("rx_crc_errors", rx_crc_errors),
83 HNS3_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
84 HNS3_NETDEV_STAT("rx_fifo_errors", rx_fifo_errors),
85 HNS3_NETDEV_STAT("rx_missed_errors", rx_missed_errors),
86 HNS3_NETDEV_STAT("tx_aborted_errors", tx_aborted_errors),
87 HNS3_NETDEV_STAT("tx_carrier_errors", tx_carrier_errors),
88 HNS3_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
89 HNS3_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
90 HNS3_NETDEV_STAT("tx_window_errors", tx_window_errors),
91 HNS3_NETDEV_STAT("rx_compressed", rx_compressed),
92 HNS3_NETDEV_STAT("tx_compressed", tx_compressed),
93};
94
95#define HNS3_NETDEV_STATS_COUNT ARRAY_SIZE(hns3_netdev_stats)
96
63#define HNS3_SELF_TEST_TPYE_NUM 1 97#define HNS3_SELF_TEST_TPYE_NUM 1
64#define HNS3_NIC_LB_TEST_PKT_NUM 1 98#define HNS3_NIC_LB_TEST_PKT_NUM 1
65#define HNS3_NIC_LB_TEST_RING_ID 0 99#define HNS3_NIC_LB_TEST_RING_ID 0
@@ -389,9 +423,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
389} 423}
390 424
391static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, 425static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
392 u32 stat_count, u32 num_tqps) 426 u32 stat_count, u32 num_tqps, const char *prefix)
393{ 427{
394#define MAX_PREFIX_SIZE (8 + 4) 428#define MAX_PREFIX_SIZE (6 + 4)
395 u32 size_left; 429 u32 size_left;
396 u32 i, j; 430 u32 i, j;
397 u32 n1; 431 u32 n1;
@@ -401,7 +435,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
401 data[ETH_GSTRING_LEN - 1] = '\0'; 435 data[ETH_GSTRING_LEN - 1] = '\0';
402 436
403 /* first, prepend the prefix string */ 437 /* first, prepend the prefix string */
404 n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i); 438 n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
439 prefix, i);
405 n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); 440 n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
406 size_left = (ETH_GSTRING_LEN - 1) - n1; 441 size_left = (ETH_GSTRING_LEN - 1) - n1;
407 442
@@ -417,14 +452,37 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
417static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) 452static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
418{ 453{
419 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 454 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
455 const char tx_prefix[] = "txq";
456 const char rx_prefix[] = "rxq";
420 457
421 /* get strings for Tx */ 458 /* get strings for Tx */
422 data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, 459 data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
423 kinfo->num_tqps); 460 kinfo->num_tqps, tx_prefix);
424 461
425 /* get strings for Rx */ 462 /* get strings for Rx */
426 data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, 463 data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
427 kinfo->num_tqps); 464 kinfo->num_tqps, rx_prefix);
465
466 return data;
467}
468
469static u8 *hns3_netdev_stats_get_strings(u8 *data)
470{
471 int i;
472
473 /* get strings for netdev */
474 for (i = 0; i < HNS3_NETDEV_STATS_COUNT; i++) {
475 snprintf(data, ETH_GSTRING_LEN,
476 hns3_netdev_stats[i].stats_string);
477 data += ETH_GSTRING_LEN;
478 }
479
480 snprintf(data, ETH_GSTRING_LEN, "netdev_rx_dropped");
481 data += ETH_GSTRING_LEN;
482 snprintf(data, ETH_GSTRING_LEN, "netdev_tx_dropped");
483 data += ETH_GSTRING_LEN;
484 snprintf(data, ETH_GSTRING_LEN, "netdev_tx_timeout");
485 data += ETH_GSTRING_LEN;
428 486
429 return data; 487 return data;
430} 488}
@@ -440,6 +498,7 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
440 498
441 switch (stringset) { 499 switch (stringset) {
442 case ETH_SS_STATS: 500 case ETH_SS_STATS:
501 buff = hns3_netdev_stats_get_strings(buff);
443 buff = hns3_get_strings_tqps(h, buff); 502 buff = hns3_get_strings_tqps(h, buff);
444 h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); 503 h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff);
445 break; 504 break;
@@ -455,13 +514,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
455 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 514 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
456 struct hns3_enet_ring *ring; 515 struct hns3_enet_ring *ring;
457 u8 *stat; 516 u8 *stat;
458 u32 i; 517 int i, j;
459 518
460 /* get stats for Tx */ 519 /* get stats for Tx */
461 for (i = 0; i < kinfo->num_tqps; i++) { 520 for (i = 0; i < kinfo->num_tqps; i++) {
462 ring = nic_priv->ring_data[i].ring; 521 ring = nic_priv->ring_data[i].ring;
463 for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { 522 for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
464 stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; 523 stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
465 *data++ = *(u64 *)stat; 524 *data++ = *(u64 *)stat;
466 } 525 }
467 } 526 }
@@ -469,8 +528,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
469 /* get stats for Rx */ 528 /* get stats for Rx */
470 for (i = 0; i < kinfo->num_tqps; i++) { 529 for (i = 0; i < kinfo->num_tqps; i++) {
471 ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; 530 ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
472 for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { 531 for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
473 stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; 532 stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
474 *data++ = *(u64 *)stat; 533 *data++ = *(u64 *)stat;
475 } 534 }
476 } 535 }
@@ -478,6 +537,27 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
478 return data; 537 return data;
479} 538}
480 539
540static u64 *hns3_get_netdev_stats(struct net_device *netdev, u64 *data)
541{
542 struct hns3_nic_priv *priv = netdev_priv(netdev);
543 const struct rtnl_link_stats64 *net_stats;
544 struct rtnl_link_stats64 temp;
545 u8 *stat;
546 int i;
547
548 net_stats = dev_get_stats(netdev, &temp);
549 for (i = 0; i < HNS3_NETDEV_STATS_COUNT; i++) {
550 stat = (u8 *)net_stats + hns3_netdev_stats[i].stats_offset;
551 *data++ = *(u64 *)stat;
552 }
553
554 *data++ = netdev->rx_dropped.counter;
555 *data++ = netdev->tx_dropped.counter;
556 *data++ = priv->tx_timeout_count;
557
558 return data;
559}
560
481/* hns3_get_stats - get detail statistics. 561/* hns3_get_stats - get detail statistics.
482 * @netdev: net device 562 * @netdev: net device
483 * @stats: statistics info. 563 * @stats: statistics info.
@@ -494,7 +574,7 @@ static void hns3_get_stats(struct net_device *netdev,
494 return; 574 return;
495 } 575 }
496 576
497 h->ae_algo->ops->update_stats(h, &netdev->stats); 577 p = hns3_get_netdev_stats(netdev, p);
498 578
499 /* get per-queue stats */ 579 /* get per-queue stats */
500 p = hns3_get_stats_tqps(h, p); 580 p = hns3_get_stats_tqps(h, p);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index f5baba216e48..3c3159b2d3bf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -556,8 +556,6 @@ struct hclge_config_auto_neg_cmd {
556 u8 rsv[20]; 556 u8 rsv[20];
557}; 557};
558 558
559#define HCLGE_MAC_MIN_MTU 64
560#define HCLGE_MAC_MAX_MTU 9728
561#define HCLGE_MAC_UPLINK_PORT 0x100 559#define HCLGE_MAC_UPLINK_PORT 0x100
562 560
563struct hclge_config_max_frm_size_cmd { 561struct hclge_config_max_frm_size_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 0874acf5ef39..d7352f5f75c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -17,6 +17,7 @@
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/if_vlan.h>
20#include <net/rtnetlink.h> 21#include <net/rtnetlink.h>
21#include "hclge_cmd.h" 22#include "hclge_cmd.h"
22#include "hclge_dcb.h" 23#include "hclge_dcb.h"
@@ -35,6 +36,7 @@
35static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 36static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
36 enum hclge_mta_dmac_sel_type mta_mac_sel, 37 enum hclge_mta_dmac_sel_type mta_mac_sel,
37 bool enable); 38 bool enable);
39static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
38static int hclge_init_vlan_config(struct hclge_dev *hdev); 40static int hclge_init_vlan_config(struct hclge_dev *hdev);
39static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 41static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
40 42
@@ -279,8 +281,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
280 {"mac_tx_undersize_pkt_num", 282 {"mac_tx_undersize_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
282 {"mac_tx_overrsize_pkt_num", 284 {"mac_tx_oversize_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)}, 285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
284 {"mac_tx_64_oct_pkt_num", 286 {"mac_tx_64_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
286 {"mac_tx_65_127_oct_pkt_num", 288 {"mac_tx_65_127_oct_pkt_num",
@@ -293,8 +295,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
294 {"mac_tx_1024_1518_oct_pkt_num", 296 {"mac_tx_1024_1518_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
296 {"mac_tx_1519_max_oct_pkt_num", 298 {"mac_tx_1519_2047_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)}, 299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
300 {"mac_tx_2048_4095_oct_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
302 {"mac_tx_4096_8191_oct_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
304 {"mac_tx_8192_12287_oct_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
306 {"mac_tx_8192_9216_oct_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
308 {"mac_tx_9217_12287_oct_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
310 {"mac_tx_12288_16383_oct_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
312 {"mac_tx_1519_max_good_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
314 {"mac_tx_1519_max_bad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
298 {"mac_rx_total_pkt_num", 316 {"mac_rx_total_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
300 {"mac_rx_total_oct_num", 318 {"mac_rx_total_oct_num",
@@ -315,8 +333,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
316 {"mac_rx_undersize_pkt_num", 334 {"mac_rx_undersize_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
318 {"mac_rx_overrsize_pkt_num", 336 {"mac_rx_oversize_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)}, 337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
320 {"mac_rx_64_oct_pkt_num", 338 {"mac_rx_64_oct_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
322 {"mac_rx_65_127_oct_pkt_num", 340 {"mac_rx_65_127_oct_pkt_num",
@@ -329,33 +347,49 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 347 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
330 {"mac_rx_1024_1518_oct_pkt_num", 348 {"mac_rx_1024_1518_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 349 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
332 {"mac_rx_1519_max_oct_pkt_num", 350 {"mac_rx_1519_2047_oct_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)}, 351 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
334 352 {"mac_rx_2048_4095_oct_pkt_num",
335 {"mac_trans_fragment_pkt_num", 353 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
336 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)}, 354 {"mac_rx_4096_8191_oct_pkt_num",
337 {"mac_trans_undermin_pkt_num", 355 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
338 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)}, 356 {"mac_rx_8192_12287_oct_pkt_num",
339 {"mac_trans_jabber_pkt_num", 357 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
340 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)}, 358 {"mac_rx_8192_9216_oct_pkt_num",
341 {"mac_trans_err_all_pkt_num", 359 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
342 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)}, 360 {"mac_rx_9217_12287_oct_pkt_num",
343 {"mac_trans_from_app_good_pkt_num", 361 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)}, 362 {"mac_rx_12288_16383_oct_pkt_num",
345 {"mac_trans_from_app_bad_pkt_num", 363 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
346 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)}, 364 {"mac_rx_1519_max_good_pkt_num",
347 {"mac_rcv_fragment_pkt_num", 365 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
348 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)}, 366 {"mac_rx_1519_max_bad_pkt_num",
349 {"mac_rcv_undermin_pkt_num", 367 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
350 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)}, 368
351 {"mac_rcv_jabber_pkt_num", 369 {"mac_tx_fragment_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)}, 370 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
353 {"mac_rcv_fcs_err_pkt_num", 371 {"mac_tx_undermin_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)}, 372 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
355 {"mac_rcv_send_app_good_pkt_num", 373 {"mac_tx_jabber_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)}, 374 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
357 {"mac_rcv_send_app_bad_pkt_num", 375 {"mac_tx_err_all_pkt_num",
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)} 376 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
377 {"mac_tx_from_app_good_pkt_num",
378 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
379 {"mac_tx_from_app_bad_pkt_num",
380 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
381 {"mac_rx_fragment_pkt_num",
382 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
383 {"mac_rx_undermin_pkt_num",
384 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
385 {"mac_rx_jabber_pkt_num",
386 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
387 {"mac_rx_fcs_err_pkt_num",
388 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
389 {"mac_rx_send_app_good_pkt_num",
390 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
391 {"mac_rx_send_app_bad_pkt_num",
392 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
359}; 393};
360 394
361static int hclge_64_bit_update_stats(struct hclge_dev *hdev) 395static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
@@ -463,7 +497,7 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
463 497
464static int hclge_mac_update_stats(struct hclge_dev *hdev) 498static int hclge_mac_update_stats(struct hclge_dev *hdev)
465{ 499{
466#define HCLGE_MAC_CMD_NUM 17 500#define HCLGE_MAC_CMD_NUM 21
467#define HCLGE_RTN_DATA_NUM 4 501#define HCLGE_RTN_DATA_NUM 4
468 502
469 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 503 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
@@ -525,7 +559,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
525 return ret; 559 return ret;
526 } 560 }
527 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 561 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
528 le32_to_cpu(desc[0].data[4]); 562 le32_to_cpu(desc[0].data[1]);
529 } 563 }
530 564
531 for (i = 0; i < kinfo->num_tqps; i++) { 565 for (i = 0; i < kinfo->num_tqps; i++) {
@@ -545,7 +579,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
545 return ret; 579 return ret;
546 } 580 }
547 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 581 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
548 le32_to_cpu(desc[0].data[4]); 582 le32_to_cpu(desc[0].data[1]);
549 } 583 }
550 584
551 return 0; 585 return 0;
@@ -587,7 +621,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
587 for (i = 0; i < kinfo->num_tqps; i++) { 621 for (i = 0; i < kinfo->num_tqps; i++) {
588 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], 622 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
589 struct hclge_tqp, q); 623 struct hclge_tqp, q);
590 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", 624 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
591 tqp->index); 625 tqp->index);
592 buff = buff + ETH_GSTRING_LEN; 626 buff = buff + ETH_GSTRING_LEN;
593 } 627 }
@@ -595,7 +629,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
595 for (i = 0; i < kinfo->num_tqps; i++) { 629 for (i = 0; i < kinfo->num_tqps; i++) {
596 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], 630 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
597 struct hclge_tqp, q); 631 struct hclge_tqp, q);
598 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", 632 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
599 tqp->index); 633 tqp->index);
600 buff = buff + ETH_GSTRING_LEN; 634 buff = buff + ETH_GSTRING_LEN;
601 } 635 }
@@ -643,23 +677,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
643 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; 677 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
644 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; 678 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
645 679
646 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num; 680 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
647 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; 681 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
648 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
649 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; 682 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
650 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; 683 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
651 net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; 684 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
652 685
653 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; 686 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
654 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; 687 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
655 688
656 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; 689 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
657 net_stats->rx_length_errors = 690 net_stats->rx_length_errors =
658 hw_stats->mac_stats.mac_rx_undersize_pkt_num; 691 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
659 net_stats->rx_length_errors += 692 net_stats->rx_length_errors +=
660 hw_stats->mac_stats.mac_rx_overrsize_pkt_num; 693 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
661 net_stats->rx_over_errors = 694 net_stats->rx_over_errors =
662 hw_stats->mac_stats.mac_rx_overrsize_pkt_num; 695 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
663} 696}
664 697
665static void hclge_update_stats_for_all(struct hclge_dev *hdev) 698static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -699,6 +732,9 @@ static void hclge_update_stats(struct hnae3_handle *handle,
699 struct hclge_hw_stats *hw_stats = &hdev->hw_stats; 732 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
700 int status; 733 int status;
701 734
735 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
736 return;
737
702 status = hclge_mac_update_stats(hdev); 738 status = hclge_mac_update_stats(hdev);
703 if (status) 739 if (status)
704 dev_err(&hdev->pdev->dev, 740 dev_err(&hdev->pdev->dev,
@@ -724,6 +760,8 @@ static void hclge_update_stats(struct hnae3_handle *handle,
724 status); 760 status);
725 761
726 hclge_update_netstat(hw_stats, net_stats); 762 hclge_update_netstat(hw_stats, net_stats);
763
764 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
727} 765}
728 766
729static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 767static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
@@ -2203,8 +2241,11 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
2203 2241
2204static int hclge_mac_init(struct hclge_dev *hdev) 2242static int hclge_mac_init(struct hclge_dev *hdev)
2205{ 2243{
2244 struct hnae3_handle *handle = &hdev->vport[0].nic;
2245 struct net_device *netdev = handle->kinfo.netdev;
2206 struct hclge_mac *mac = &hdev->hw.mac; 2246 struct hclge_mac *mac = &hdev->hw.mac;
2207 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 2247 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2248 int mtu;
2208 int ret; 2249 int ret;
2209 2250
2210 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); 2251 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
@@ -2238,11 +2279,25 @@ static int hclge_mac_init(struct hclge_dev *hdev)
2238 } 2279 }
2239 2280
2240 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); 2281 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
2241 if (ret) 2282 if (ret) {
2242 dev_err(&hdev->pdev->dev, 2283 dev_err(&hdev->pdev->dev,
2243 "set default mac_vlan_mask fail ret=%d\n", ret); 2284 "set default mac_vlan_mask fail ret=%d\n", ret);
2285 return ret;
2286 }
2244 2287
2245 return ret; 2288 if (netdev)
2289 mtu = netdev->mtu;
2290 else
2291 mtu = ETH_DATA_LEN;
2292
2293 ret = hclge_set_mtu(handle, mtu);
2294 if (ret) {
2295 dev_err(&hdev->pdev->dev,
2296 "set mtu failed ret=%d\n", ret);
2297 return ret;
2298 }
2299
2300 return 0;
2246} 2301}
2247 2302
2248static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2303static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
@@ -2381,6 +2436,7 @@ static void hclge_service_timer(struct timer_list *t)
2381 struct hclge_dev *hdev = from_timer(hdev, t, service_timer); 2436 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2382 2437
2383 mod_timer(&hdev->service_timer, jiffies + HZ); 2438 mod_timer(&hdev->service_timer, jiffies + HZ);
2439 hdev->hw_stats.stats_timer++;
2384 hclge_task_schedule(hdev); 2440 hclge_task_schedule(hdev);
2385} 2441}
2386 2442
@@ -2780,9 +2836,13 @@ static void hclge_service_task(struct work_struct *work)
2780 struct hclge_dev *hdev = 2836 struct hclge_dev *hdev =
2781 container_of(work, struct hclge_dev, service_task); 2837 container_of(work, struct hclge_dev, service_task);
2782 2838
2839 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2840 hclge_update_stats_for_all(hdev);
2841 hdev->hw_stats.stats_timer = 0;
2842 }
2843
2783 hclge_update_speed_duplex(hdev); 2844 hclge_update_speed_duplex(hdev);
2784 hclge_update_link_status(hdev); 2845 hclge_update_link_status(hdev);
2785 hclge_update_stats_for_all(hdev);
2786 hclge_service_complete(hdev); 2846 hclge_service_complete(hdev);
2787} 2847}
2788 2848
@@ -4197,6 +4257,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
4197 const unsigned char *new_addr = (const unsigned char *)p; 4257 const unsigned char *new_addr = (const unsigned char *)p;
4198 struct hclge_vport *vport = hclge_get_vport(handle); 4258 struct hclge_vport *vport = hclge_get_vport(handle);
4199 struct hclge_dev *hdev = vport->back; 4259 struct hclge_dev *hdev = vport->back;
4260 int ret;
4200 4261
4201 /* mac addr check */ 4262 /* mac addr check */
4202 if (is_zero_ether_addr(new_addr) || 4263 if (is_zero_ether_addr(new_addr) ||
@@ -4208,14 +4269,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
4208 return -EINVAL; 4269 return -EINVAL;
4209 } 4270 }
4210 4271
4211 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); 4272 ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
4273 if (ret)
4274 dev_warn(&hdev->pdev->dev,
4275 "remove old uc mac address fail, ret =%d.\n",
4276 ret);
4212 4277
4213 if (!hclge_add_uc_addr(handle, new_addr)) { 4278 ret = hclge_add_uc_addr(handle, new_addr);
4214 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 4279 if (ret) {
4215 return 0; 4280 dev_err(&hdev->pdev->dev,
4281 "add uc mac address fail, ret =%d.\n",
4282 ret);
4283
4284 ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr);
4285 if (ret) {
4286 dev_err(&hdev->pdev->dev,
4287 "restore uc mac address fail, ret =%d.\n",
4288 ret);
4289 }
4290
4291 return -EIO;
4216 } 4292 }
4217 4293
4218 return -EIO; 4294 ret = hclge_mac_pause_addr_cfg(hdev, new_addr);
4295 if (ret) {
4296 dev_err(&hdev->pdev->dev,
4297 "configure mac pause address fail, ret =%d.\n",
4298 ret);
4299 return -EIO;
4300 }
4301
4302 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
4303
4304 return 0;
4219} 4305}
4220 4306
4221static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 4307static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
@@ -4241,6 +4327,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
4241 return 0; 4327 return 0;
4242} 4328}
4243 4329
4330#define HCLGE_FILTER_TYPE_VF 0
4331#define HCLGE_FILTER_TYPE_PORT 1
4332
4333static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
4334{
4335 struct hclge_vport *vport = hclge_get_vport(handle);
4336 struct hclge_dev *hdev = vport->back;
4337
4338 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
4339}
4340
4244int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, 4341int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4245 bool is_kill, u16 vlan, u8 qos, __be16 proto) 4342 bool is_kill, u16 vlan, u8 qos, __be16 proto)
4246{ 4343{
@@ -4469,8 +4566,6 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
4469 4566
4470static int hclge_init_vlan_config(struct hclge_dev *hdev) 4567static int hclge_init_vlan_config(struct hclge_dev *hdev)
4471{ 4568{
4472#define HCLGE_FILTER_TYPE_VF 0
4473#define HCLGE_FILTER_TYPE_PORT 1
4474#define HCLGE_DEF_VLAN_TYPE 0x8100 4569#define HCLGE_DEF_VLAN_TYPE 0x8100
4475 4570
4476 struct hnae3_handle *handle; 4571 struct hnae3_handle *handle;
@@ -4542,16 +4637,21 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
4542 struct hclge_config_max_frm_size_cmd *req; 4637 struct hclge_config_max_frm_size_cmd *req;
4543 struct hclge_dev *hdev = vport->back; 4638 struct hclge_dev *hdev = vport->back;
4544 struct hclge_desc desc; 4639 struct hclge_desc desc;
4640 int max_frm_size;
4545 int ret; 4641 int ret;
4546 4642
4547 if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU)) 4643 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4644
4645 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
4646 max_frm_size > HCLGE_MAC_MAX_FRAME)
4548 return -EINVAL; 4647 return -EINVAL;
4549 4648
4550 hdev->mps = new_mtu; 4649 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
4650
4551 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 4651 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
4552 4652
4553 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 4653 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
4554 req->max_frm_size = cpu_to_le16(new_mtu); 4654 req->max_frm_size = cpu_to_le16(max_frm_size);
4555 4655
4556 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4656 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4557 if (ret) { 4657 if (ret) {
@@ -4559,6 +4659,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
4559 return ret; 4659 return ret;
4560 } 4660 }
4561 4661
4662 hdev->mps = max_frm_size;
4663
4562 return 0; 4664 return 0;
4563} 4665}
4564 4666
@@ -4689,22 +4791,19 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
4689 4791
4690static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 4792static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
4691{ 4793{
4692 enum hclge_fc_mode fc_mode;
4693 int ret; 4794 int ret;
4694 4795
4695 if (rx_en && tx_en) 4796 if (rx_en && tx_en)
4696 fc_mode = HCLGE_FC_FULL; 4797 hdev->fc_mode_last_time = HCLGE_FC_FULL;
4697 else if (rx_en && !tx_en) 4798 else if (rx_en && !tx_en)
4698 fc_mode = HCLGE_FC_RX_PAUSE; 4799 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
4699 else if (!rx_en && tx_en) 4800 else if (!rx_en && tx_en)
4700 fc_mode = HCLGE_FC_TX_PAUSE; 4801 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
4701 else 4802 else
4702 fc_mode = HCLGE_FC_NONE; 4803 hdev->fc_mode_last_time = HCLGE_FC_NONE;
4703 4804
4704 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 4805 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
4705 hdev->fc_mode_last_time = fc_mode;
4706 return 0; 4806 return 0;
4707 }
4708 4807
4709 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 4808 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
4710 if (ret) { 4809 if (ret) {
@@ -4713,7 +4812,7 @@ static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
4713 return ret; 4812 return ret;
4714 } 4813 }
4715 4814
4716 hdev->tm_info.fc_mode = fc_mode; 4815 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
4717 4816
4718 return 0; 4817 return 0;
4719} 4818}
@@ -5482,6 +5581,7 @@ static const struct hnae3_ae_ops hclge_ops = {
5482 .get_sset_count = hclge_get_sset_count, 5581 .get_sset_count = hclge_get_sset_count,
5483 .get_fw_version = hclge_get_fw_version, 5582 .get_fw_version = hclge_get_fw_version,
5484 .get_mdix_mode = hclge_get_mdix_mode, 5583 .get_mdix_mode = hclge_get_mdix_mode,
5584 .enable_vlan_filter = hclge_enable_vlan_filter,
5485 .set_vlan_filter = hclge_set_port_vlan_filter, 5585 .set_vlan_filter = hclge_set_port_vlan_filter,
5486 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 5586 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
5487 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 5587 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 28cc063306c4..eeb6c8d66e4e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -101,6 +101,11 @@
101/* CMDQ register bits for RX event(=MBX event) */ 101/* CMDQ register bits for RX event(=MBX event) */
102#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 102#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
103 103
104#define HCLGE_MAC_DEFAULT_FRAME \
105 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
106#define HCLGE_MAC_MIN_FRAME 64
107#define HCLGE_MAC_MAX_FRAME 9728
108
104enum HCLGE_DEV_STATE { 109enum HCLGE_DEV_STATE {
105 HCLGE_STATE_REINITING, 110 HCLGE_STATE_REINITING,
106 HCLGE_STATE_DOWN, 111 HCLGE_STATE_DOWN,
@@ -112,6 +117,7 @@ enum HCLGE_DEV_STATE {
112 HCLGE_STATE_RST_HANDLING, 117 HCLGE_STATE_RST_HANDLING,
113 HCLGE_STATE_MBX_SERVICE_SCHED, 118 HCLGE_STATE_MBX_SERVICE_SCHED,
114 HCLGE_STATE_MBX_HANDLING, 119 HCLGE_STATE_MBX_HANDLING,
120 HCLGE_STATE_STATISTICS_UPDATING,
115 HCLGE_STATE_MAX 121 HCLGE_STATE_MAX
116}; 122};
117 123
@@ -381,14 +387,23 @@ struct hclge_mac_stats {
381 u64 mac_tx_multi_pkt_num; 387 u64 mac_tx_multi_pkt_num;
382 u64 mac_tx_broad_pkt_num; 388 u64 mac_tx_broad_pkt_num;
383 u64 mac_tx_undersize_pkt_num; 389 u64 mac_tx_undersize_pkt_num;
384 u64 mac_tx_overrsize_pkt_num; 390 u64 mac_tx_oversize_pkt_num;
385 u64 mac_tx_64_oct_pkt_num; 391 u64 mac_tx_64_oct_pkt_num;
386 u64 mac_tx_65_127_oct_pkt_num; 392 u64 mac_tx_65_127_oct_pkt_num;
387 u64 mac_tx_128_255_oct_pkt_num; 393 u64 mac_tx_128_255_oct_pkt_num;
388 u64 mac_tx_256_511_oct_pkt_num; 394 u64 mac_tx_256_511_oct_pkt_num;
389 u64 mac_tx_512_1023_oct_pkt_num; 395 u64 mac_tx_512_1023_oct_pkt_num;
390 u64 mac_tx_1024_1518_oct_pkt_num; 396 u64 mac_tx_1024_1518_oct_pkt_num;
391 u64 mac_tx_1519_max_oct_pkt_num; 397 u64 mac_tx_1519_2047_oct_pkt_num;
398 u64 mac_tx_2048_4095_oct_pkt_num;
399 u64 mac_tx_4096_8191_oct_pkt_num;
400 u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
401 u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
402 u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
403 u64 mac_tx_12288_16383_oct_pkt_num;
404 u64 mac_tx_1519_max_good_oct_pkt_num;
405 u64 mac_tx_1519_max_bad_oct_pkt_num;
406
392 u64 mac_rx_total_pkt_num; 407 u64 mac_rx_total_pkt_num;
393 u64 mac_rx_total_oct_num; 408 u64 mac_rx_total_oct_num;
394 u64 mac_rx_good_pkt_num; 409 u64 mac_rx_good_pkt_num;
@@ -399,33 +414,43 @@ struct hclge_mac_stats {
399 u64 mac_rx_multi_pkt_num; 414 u64 mac_rx_multi_pkt_num;
400 u64 mac_rx_broad_pkt_num; 415 u64 mac_rx_broad_pkt_num;
401 u64 mac_rx_undersize_pkt_num; 416 u64 mac_rx_undersize_pkt_num;
402 u64 mac_rx_overrsize_pkt_num; 417 u64 mac_rx_oversize_pkt_num;
403 u64 mac_rx_64_oct_pkt_num; 418 u64 mac_rx_64_oct_pkt_num;
404 u64 mac_rx_65_127_oct_pkt_num; 419 u64 mac_rx_65_127_oct_pkt_num;
405 u64 mac_rx_128_255_oct_pkt_num; 420 u64 mac_rx_128_255_oct_pkt_num;
406 u64 mac_rx_256_511_oct_pkt_num; 421 u64 mac_rx_256_511_oct_pkt_num;
407 u64 mac_rx_512_1023_oct_pkt_num; 422 u64 mac_rx_512_1023_oct_pkt_num;
408 u64 mac_rx_1024_1518_oct_pkt_num; 423 u64 mac_rx_1024_1518_oct_pkt_num;
409 u64 mac_rx_1519_max_oct_pkt_num; 424 u64 mac_rx_1519_2047_oct_pkt_num;
410 425 u64 mac_rx_2048_4095_oct_pkt_num;
411 u64 mac_trans_fragment_pkt_num; 426 u64 mac_rx_4096_8191_oct_pkt_num;
412 u64 mac_trans_undermin_pkt_num; 427 u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
413 u64 mac_trans_jabber_pkt_num; 428 u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
414 u64 mac_trans_err_all_pkt_num; 429 u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
415 u64 mac_trans_from_app_good_pkt_num; 430 u64 mac_rx_12288_16383_oct_pkt_num;
416 u64 mac_trans_from_app_bad_pkt_num; 431 u64 mac_rx_1519_max_good_oct_pkt_num;
417 u64 mac_rcv_fragment_pkt_num; 432 u64 mac_rx_1519_max_bad_oct_pkt_num;
418 u64 mac_rcv_undermin_pkt_num; 433
419 u64 mac_rcv_jabber_pkt_num; 434 u64 mac_tx_fragment_pkt_num;
420 u64 mac_rcv_fcs_err_pkt_num; 435 u64 mac_tx_undermin_pkt_num;
421 u64 mac_rcv_send_app_good_pkt_num; 436 u64 mac_tx_jabber_pkt_num;
422 u64 mac_rcv_send_app_bad_pkt_num; 437 u64 mac_tx_err_all_pkt_num;
438 u64 mac_tx_from_app_good_pkt_num;
439 u64 mac_tx_from_app_bad_pkt_num;
440 u64 mac_rx_fragment_pkt_num;
441 u64 mac_rx_undermin_pkt_num;
442 u64 mac_rx_jabber_pkt_num;
443 u64 mac_rx_fcs_err_pkt_num;
444 u64 mac_rx_send_app_good_pkt_num;
445 u64 mac_rx_send_app_bad_pkt_num;
423}; 446};
424 447
448#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
425struct hclge_hw_stats { 449struct hclge_hw_stats {
426 struct hclge_mac_stats mac_stats; 450 struct hclge_mac_stats mac_stats;
427 struct hclge_64_bit_stats all_64_bit_stats; 451 struct hclge_64_bit_stats all_64_bit_stats;
428 struct hclge_32_bit_stats all_32_bit_stats; 452 struct hclge_32_bit_stats all_32_bit_stats;
453 u32 stats_timer;
429}; 454};
430 455
431struct hclge_vlan_type_cfg { 456struct hclge_vlan_type_cfg {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index ea9355d82560..36bd79a77940 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
138 return hclge_cmd_send(&hdev->hw, &desc, 1); 138 return hclge_cmd_send(&hdev->hw, &desc, 1);
139} 139}
140 140
141static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
142 u8 pause_trans_gap, u16 pause_trans_time)
143{
144 struct hclge_cfg_pause_param_cmd *pause_param;
145 struct hclge_desc desc;
146
147 pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
148
149 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
150
151 ether_addr_copy(pause_param->mac_addr, addr);
152 pause_param->pause_trans_gap = pause_trans_gap;
153 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
154
155 return hclge_cmd_send(&hdev->hw, &desc, 1);
156}
157
158int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
159{
160 struct hclge_cfg_pause_param_cmd *pause_param;
161 struct hclge_desc desc;
162 u16 trans_time;
163 u8 trans_gap;
164 int ret;
165
166 pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
167
168 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
169
170 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
171 if (ret)
172 return ret;
173
174 trans_gap = pause_param->pause_trans_gap;
175 trans_time = le16_to_cpu(pause_param->pause_trans_time);
176
177 return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap,
178 trans_time);
179}
180
141static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) 181static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
142{ 182{
143 u8 tc; 183 u8 tc;
@@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1056 return hclge_tm_schd_mode_hw(hdev); 1096 return hclge_tm_schd_mode_hw(hdev);
1057} 1097}
1058 1098
1099static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev)
1100{
1101 struct hclge_mac *mac = &hdev->hw.mac;
1102
1103 return hclge_mac_pause_param_cfg(hdev, mac->mac_addr,
1104 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1105 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1106}
1107
1059static int hclge_pfc_setup_hw(struct hclge_dev *hdev) 1108static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1060{ 1109{
1061 u8 enable_bitmap = 0; 1110 u8 enable_bitmap = 0;
@@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
1102 int ret; 1151 int ret;
1103 u8 i; 1152 u8 i;
1104 1153
1105 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) 1154 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
1106 return hclge_mac_pause_setup_hw(hdev); 1155 ret = hclge_mac_pause_setup_hw(hdev);
1156 if (ret)
1157 return ret;
1158
1159 return hclge_mac_pause_param_setup_hw(hdev);
1160 }
1107 1161
1108 /* Only DCB-supported dev supports qset back pressure and pfc cmd */ 1162 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1109 if (!hnae3_dev_dcb_supported(hdev)) 1163 if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 16f413956f17..5401e7559437 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -18,6 +18,9 @@
18 18
19#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0) 19#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
20 20
21#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF
22#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
23
21/* SP or DWRR */ 24/* SP or DWRR */
22#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0) 25#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
23#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE) 26#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
@@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd {
99 u8 pri_en_bitmap; 102 u8 pri_en_bitmap;
100}; 103};
101 104
105struct hclge_cfg_pause_param_cmd {
106 u8 mac_addr[ETH_ALEN];
107 u8 pause_trans_gap;
108 u8 rsvd;
109 __le16 pause_trans_time;
110};
111
102struct hclge_port_shapping_cmd { 112struct hclge_port_shapping_cmd {
103 __le32 port_shapping_para; 113 __le32 port_shapping_para;
104}; 114};
@@ -119,4 +129,5 @@ int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
119int hclge_tm_map_cfg(struct hclge_dev *hdev); 129int hclge_tm_map_cfg(struct hclge_dev *hdev);
120int hclge_tm_init_hw(struct hclge_dev *hdev); 130int hclge_tm_init_hw(struct hclge_dev *hdev);
121int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); 131int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
132int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
122#endif 133#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 31866057bc7d..655f522e44aa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -49,7 +49,7 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
49 return status; 49 return status;
50 } 50 }
51 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 51 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
52 le32_to_cpu(desc.data[4]); 52 le32_to_cpu(desc.data[1]);
53 53
54 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, 54 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
55 true); 55 true);
@@ -63,7 +63,7 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
63 return status; 63 return status;
64 } 64 }
65 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 65 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
66 le32_to_cpu(desc.data[4]); 66 le32_to_cpu(desc.data[1]);
67 } 67 }
68 68
69 return 0; 69 return 0;
@@ -105,7 +105,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
105 for (i = 0; i < hdev->num_tqps; i++) { 105 for (i = 0; i < hdev->num_tqps; i++) {
106 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 106 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
107 struct hclgevf_tqp, q); 107 struct hclgevf_tqp, q);
108 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", 108 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
109 tqp->index); 109 tqp->index);
110 buff += ETH_GSTRING_LEN; 110 buff += ETH_GSTRING_LEN;
111 } 111 }
@@ -113,7 +113,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
113 for (i = 0; i < hdev->num_tqps; i++) { 113 for (i = 0; i < hdev->num_tqps; i++) {
114 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], 114 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
115 struct hclgevf_tqp, q); 115 struct hclgevf_tqp, q);
116 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", 116 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
117 tqp->index); 117 tqp->index);
118 buff += ETH_GSTRING_LEN; 118 buff += ETH_GSTRING_LEN;
119 } 119 }
@@ -1288,7 +1288,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1288 pci_set_master(pdev); 1288 pci_set_master(pdev);
1289 hw = &hdev->hw; 1289 hw = &hdev->hw;
1290 hw->hdev = hdev; 1290 hw->hdev = hdev;
1291 hw->io_base = pci_iomap(pdev, 2, 0);; 1291 hw->io_base = pci_iomap(pdev, 2, 0);
1292 if (!hw->io_base) { 1292 if (!hw->io_base) {
1293 dev_err(&pdev->dev, "can't map configuration register space\n"); 1293 dev_err(&pdev->dev, "can't map configuration register space\n");
1294 ret = -ENOMEM; 1294 ret = -ENOMEM;