aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c10
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c143
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c7
10 files changed, 139 insertions, 80 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 58c6231aaa00..87dece0e745d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -98,7 +98,7 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
98 .reset_level = HNAE3_GLOBAL_RESET }, 98 .reset_level = HNAE3_GLOBAL_RESET },
99 { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", 99 { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
100 .reset_level = HNAE3_GLOBAL_RESET }, 100 .reset_level = HNAE3_GLOBAL_RESET },
101 { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow", 101 { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
102 .reset_level = HNAE3_GLOBAL_RESET }, 102 .reset_level = HNAE3_GLOBAL_RESET },
103 { .int_msk = BIT(3), .msg = "tx_buf_overflow", 103 { .int_msk = BIT(3), .msg = "tx_buf_overflow",
104 .reset_level = HNAE3_GLOBAL_RESET }, 104 .reset_level = HNAE3_GLOBAL_RESET },
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4f83f97ffe8b..2e5172f61564 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1984,8 +1984,11 @@ static void __ibmvnic_reset(struct work_struct *work)
1984 rwi = get_next_rwi(adapter); 1984 rwi = get_next_rwi(adapter);
1985 while (rwi) { 1985 while (rwi) {
1986 if (adapter->state == VNIC_REMOVING || 1986 if (adapter->state == VNIC_REMOVING ||
1987 adapter->state == VNIC_REMOVED) 1987 adapter->state == VNIC_REMOVED) {
1988 goto out; 1988 kfree(rwi);
1989 rc = EBUSY;
1990 break;
1991 }
1989 1992
1990 if (adapter->force_reset_recovery) { 1993 if (adapter->force_reset_recovery) {
1991 adapter->force_reset_recovery = false; 1994 adapter->force_reset_recovery = false;
@@ -2011,7 +2014,7 @@ static void __ibmvnic_reset(struct work_struct *work)
2011 netdev_dbg(adapter->netdev, "Reset failed\n"); 2014 netdev_dbg(adapter->netdev, "Reset failed\n");
2012 free_all_rwi(adapter); 2015 free_all_rwi(adapter);
2013 } 2016 }
2014out: 2017
2015 adapter->resetting = false; 2018 adapter->resetting = false;
2016 if (we_lock_rtnl) 2019 if (we_lock_rtnl)
2017 rtnl_unlock(); 2020 rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index dc034f4e8cf6..1ce2397306b9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -36,6 +36,7 @@
36#include <net/vxlan.h> 36#include <net/vxlan.h>
37#include <net/mpls.h> 37#include <net/mpls.h>
38#include <net/xdp_sock.h> 38#include <net/xdp_sock.h>
39#include <net/xfrm.h>
39 40
40#include "ixgbe.h" 41#include "ixgbe.h"
41#include "ixgbe_common.h" 42#include "ixgbe_common.h"
@@ -2623,7 +2624,7 @@ adjust_by_size:
2623 /* 16K ints/sec to 9.2K ints/sec */ 2624 /* 16K ints/sec to 9.2K ints/sec */
2624 avg_wire_size *= 15; 2625 avg_wire_size *= 15;
2625 avg_wire_size += 11452; 2626 avg_wire_size += 11452;
2626 } else if (avg_wire_size <= 1980) { 2627 } else if (avg_wire_size < 1968) {
2627 /* 9.2K ints/sec to 8K ints/sec */ 2628 /* 9.2K ints/sec to 8K ints/sec */
2628 avg_wire_size *= 5; 2629 avg_wire_size *= 5;
2629 avg_wire_size += 22420; 2630 avg_wire_size += 22420;
@@ -2656,6 +2657,8 @@ adjust_by_size:
2656 case IXGBE_LINK_SPEED_2_5GB_FULL: 2657 case IXGBE_LINK_SPEED_2_5GB_FULL:
2657 case IXGBE_LINK_SPEED_1GB_FULL: 2658 case IXGBE_LINK_SPEED_1GB_FULL:
2658 case IXGBE_LINK_SPEED_10_FULL: 2659 case IXGBE_LINK_SPEED_10_FULL:
2660 if (avg_wire_size > 8064)
2661 avg_wire_size = 8064;
2659 itr += DIV_ROUND_UP(avg_wire_size, 2662 itr += DIV_ROUND_UP(avg_wire_size,
2660 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * 2663 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2661 IXGBE_ITR_ADAPTIVE_MIN_INC; 2664 IXGBE_ITR_ADAPTIVE_MIN_INC;
@@ -8698,7 +8701,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8698#endif /* IXGBE_FCOE */ 8701#endif /* IXGBE_FCOE */
8699 8702
8700#ifdef CONFIG_IXGBE_IPSEC 8703#ifdef CONFIG_IXGBE_IPSEC
8701 if (secpath_exists(skb) && 8704 if (xfrm_offload(skb) &&
8702 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) 8705 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8703 goto out_drop; 8706 goto out_drop;
8704#endif 8707#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index ad802a8909e0..a37dcd140f63 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -642,19 +642,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
642bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, 642bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
643 struct ixgbe_ring *tx_ring, int napi_budget) 643 struct ixgbe_ring *tx_ring, int napi_budget)
644{ 644{
645 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
645 unsigned int total_packets = 0, total_bytes = 0; 646 unsigned int total_packets = 0, total_bytes = 0;
646 u32 i = tx_ring->next_to_clean, xsk_frames = 0;
647 unsigned int budget = q_vector->tx.work_limit;
648 struct xdp_umem *umem = tx_ring->xsk_umem; 647 struct xdp_umem *umem = tx_ring->xsk_umem;
649 union ixgbe_adv_tx_desc *tx_desc; 648 union ixgbe_adv_tx_desc *tx_desc;
650 struct ixgbe_tx_buffer *tx_bi; 649 struct ixgbe_tx_buffer *tx_bi;
651 bool xmit_done; 650 u32 xsk_frames = 0;
652 651
653 tx_bi = &tx_ring->tx_buffer_info[i]; 652 tx_bi = &tx_ring->tx_buffer_info[ntc];
654 tx_desc = IXGBE_TX_DESC(tx_ring, i); 653 tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
655 i -= tx_ring->count;
656 654
657 do { 655 while (ntc != ntu) {
658 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 656 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
659 break; 657 break;
660 658
@@ -670,22 +668,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
670 668
671 tx_bi++; 669 tx_bi++;
672 tx_desc++; 670 tx_desc++;
673 i++; 671 ntc++;
674 if (unlikely(!i)) { 672 if (unlikely(ntc == tx_ring->count)) {
675 i -= tx_ring->count; 673 ntc = 0;
676 tx_bi = tx_ring->tx_buffer_info; 674 tx_bi = tx_ring->tx_buffer_info;
677 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 675 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
678 } 676 }
679 677
680 /* issue prefetch for next Tx descriptor */ 678 /* issue prefetch for next Tx descriptor */
681 prefetch(tx_desc); 679 prefetch(tx_desc);
680 }
682 681
683 /* update budget accounting */ 682 tx_ring->next_to_clean = ntc;
684 budget--;
685 } while (likely(budget));
686
687 i += tx_ring->count;
688 tx_ring->next_to_clean = i;
689 683
690 u64_stats_update_begin(&tx_ring->syncp); 684 u64_stats_update_begin(&tx_ring->syncp);
691 tx_ring->stats.bytes += total_bytes; 685 tx_ring->stats.bytes += total_bytes;
@@ -704,9 +698,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
704 xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); 698 xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
705 } 699 }
706 700
707 xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); 701 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
708
709 return budget > 0 && xmit_done;
710} 702}
711 703
712int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) 704int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 75e93ce2ed99..076f2da36f27 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -30,6 +30,7 @@
30#include <linux/bpf.h> 30#include <linux/bpf.h>
31#include <linux/bpf_trace.h> 31#include <linux/bpf_trace.h>
32#include <linux/atomic.h> 32#include <linux/atomic.h>
33#include <net/xfrm.h>
33 34
34#include "ixgbevf.h" 35#include "ixgbevf.h"
35 36
@@ -4167,7 +4168,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4167 first->protocol = vlan_get_protocol(skb); 4168 first->protocol = vlan_get_protocol(skb);
4168 4169
4169#ifdef CONFIG_IXGBEVF_IPSEC 4170#ifdef CONFIG_IXGBEVF_IPSEC
4170 if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) 4171 if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
4171 goto out_drop; 4172 goto out_drop;
4172#endif 4173#endif
4173 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); 4174 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index ef3f3d06ff1e..fce9b3a24347 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2240,7 +2240,7 @@ static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
2240 for (i = 1; i <= dev->caps.num_ports; i++) { 2240 for (i = 1; i <= dev->caps.num_ports; i++) {
2241 if (mlx4_dev_port(dev, i, &port_cap)) { 2241 if (mlx4_dev_port(dev, i, &port_cap)) {
2242 mlx4_err(dev, 2242 mlx4_err(dev,
2243 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 2243 "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
2244 } else if ((dev->caps.dmfs_high_steer_mode != 2244 } else if ((dev->caps.dmfs_high_steer_mode !=
2245 MLX4_STEERING_DMFS_A0_DEFAULT) && 2245 MLX4_STEERING_DMFS_A0_DEFAULT) &&
2246 (port_cap.dmfs_optimized_state == 2246 (port_cap.dmfs_optimized_state ==
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index d0a01e8f000a..b339125b2f09 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -232,9 +232,9 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
232 232
233 laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); 233 laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
234 if (!laddr) { 234 if (!laddr) {
235 printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name); 235 pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
236 dev_kfree_skb(skb); 236 dev_kfree_skb_any(skb);
237 return NETDEV_TX_BUSY; 237 return NETDEV_TX_OK;
238 } 238 }
239 239
240 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ 240 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index d5bbe3d6048b..05981b54eaab 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -260,9 +260,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
260 260
261 type = cmsg_hdr->type; 261 type = cmsg_hdr->type;
262 switch (type) { 262 switch (type) {
263 case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
264 nfp_flower_cmsg_portreify_rx(app, skb);
265 break;
266 case NFP_FLOWER_CMSG_TYPE_PORT_MOD: 263 case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
267 nfp_flower_cmsg_portmod_rx(app, skb); 264 nfp_flower_cmsg_portmod_rx(app, skb);
268 break; 265 break;
@@ -328,8 +325,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
328 struct nfp_flower_priv *priv = app->priv; 325 struct nfp_flower_priv *priv = app->priv;
329 struct sk_buff_head *skb_head; 326 struct sk_buff_head *skb_head;
330 327
331 if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || 328 if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
332 type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
333 skb_head = &priv->cmsg_skbs_high; 329 skb_head = &priv->cmsg_skbs_high;
334 else 330 else
335 skb_head = &priv->cmsg_skbs_low; 331 skb_head = &priv->cmsg_skbs_low;
@@ -368,6 +364,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
368 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { 364 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
369 /* Acks from the NFP that the route is added - ignore. */ 365 /* Acks from the NFP that the route is added - ignore. */
370 dev_consume_skb_any(skb); 366 dev_consume_skb_any(skb);
367 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
368 /* Handle REIFY acks outside wq to prevent RTNL conflict. */
369 nfp_flower_cmsg_portreify_rx(app, skb);
370 dev_consume_skb_any(skb);
371 } else { 371 } else {
372 nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); 372 nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
373 } 373 }
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index ecca794c55e2..05d2b478c99b 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -713,6 +713,21 @@ struct nv_skb_map {
713 struct nv_skb_map *next_tx_ctx; 713 struct nv_skb_map *next_tx_ctx;
714}; 714};
715 715
716struct nv_txrx_stats {
717 u64 stat_rx_packets;
718 u64 stat_rx_bytes; /* not always available in HW */
719 u64 stat_rx_missed_errors;
720 u64 stat_rx_dropped;
721 u64 stat_tx_packets; /* not always available in HW */
722 u64 stat_tx_bytes;
723 u64 stat_tx_dropped;
724};
725
726#define nv_txrx_stats_inc(member) \
727 __this_cpu_inc(np->txrx_stats->member)
728#define nv_txrx_stats_add(member, count) \
729 __this_cpu_add(np->txrx_stats->member, (count))
730
716/* 731/*
717 * SMP locking: 732 * SMP locking:
718 * All hardware access under netdev_priv(dev)->lock, except the performance 733 * All hardware access under netdev_priv(dev)->lock, except the performance
@@ -797,10 +812,7 @@ struct fe_priv {
797 812
798 /* RX software stats */ 813 /* RX software stats */
799 struct u64_stats_sync swstats_rx_syncp; 814 struct u64_stats_sync swstats_rx_syncp;
800 u64 stat_rx_packets; 815 struct nv_txrx_stats __percpu *txrx_stats;
801 u64 stat_rx_bytes; /* not always available in HW */
802 u64 stat_rx_missed_errors;
803 u64 stat_rx_dropped;
804 816
805 /* media detection workaround. 817 /* media detection workaround.
806 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 818 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
@@ -826,9 +838,6 @@ struct fe_priv {
826 838
827 /* TX software stats */ 839 /* TX software stats */
828 struct u64_stats_sync swstats_tx_syncp; 840 struct u64_stats_sync swstats_tx_syncp;
829 u64 stat_tx_packets; /* not always available in HW */
830 u64 stat_tx_bytes;
831 u64 stat_tx_dropped;
832 841
833 /* msi/msi-x fields */ 842 /* msi/msi-x fields */
834 u32 msi_flags; 843 u32 msi_flags;
@@ -1721,6 +1730,39 @@ static void nv_update_stats(struct net_device *dev)
1721 } 1730 }
1722} 1731}
1723 1732
1733static void nv_get_stats(int cpu, struct fe_priv *np,
1734 struct rtnl_link_stats64 *storage)
1735{
1736 struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
1737 unsigned int syncp_start;
1738 u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
1739 u64 tx_packets, tx_bytes, tx_dropped;
1740
1741 do {
1742 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
1743 rx_packets = src->stat_rx_packets;
1744 rx_bytes = src->stat_rx_bytes;
1745 rx_dropped = src->stat_rx_dropped;
1746 rx_missed_errors = src->stat_rx_missed_errors;
1747 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1748
1749 storage->rx_packets += rx_packets;
1750 storage->rx_bytes += rx_bytes;
1751 storage->rx_dropped += rx_dropped;
1752 storage->rx_missed_errors += rx_missed_errors;
1753
1754 do {
1755 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1756 tx_packets = src->stat_tx_packets;
1757 tx_bytes = src->stat_tx_bytes;
1758 tx_dropped = src->stat_tx_dropped;
1759 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1760
1761 storage->tx_packets += tx_packets;
1762 storage->tx_bytes += tx_bytes;
1763 storage->tx_dropped += tx_dropped;
1764}
1765
1724/* 1766/*
1725 * nv_get_stats64: dev->ndo_get_stats64 function 1767 * nv_get_stats64: dev->ndo_get_stats64 function
1726 * Get latest stats value from the nic. 1768 * Get latest stats value from the nic.
@@ -1733,7 +1775,7 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1733 __releases(&netdev_priv(dev)->hwstats_lock) 1775 __releases(&netdev_priv(dev)->hwstats_lock)
1734{ 1776{
1735 struct fe_priv *np = netdev_priv(dev); 1777 struct fe_priv *np = netdev_priv(dev);
1736 unsigned int syncp_start; 1778 int cpu;
1737 1779
1738 /* 1780 /*
1739 * Note: because HW stats are not always available and for 1781 * Note: because HW stats are not always available and for
@@ -1746,20 +1788,8 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1746 */ 1788 */
1747 1789
1748 /* software stats */ 1790 /* software stats */
1749 do { 1791 for_each_online_cpu(cpu)
1750 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); 1792 nv_get_stats(cpu, np, storage);
1751 storage->rx_packets = np->stat_rx_packets;
1752 storage->rx_bytes = np->stat_rx_bytes;
1753 storage->rx_dropped = np->stat_rx_dropped;
1754 storage->rx_missed_errors = np->stat_rx_missed_errors;
1755 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1756
1757 do {
1758 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1759 storage->tx_packets = np->stat_tx_packets;
1760 storage->tx_bytes = np->stat_tx_bytes;
1761 storage->tx_dropped = np->stat_tx_dropped;
1762 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1763 1793
1764 /* If the nic supports hw counters then retrieve latest values */ 1794 /* If the nic supports hw counters then retrieve latest values */
1765 if (np->driver_data & DEV_HAS_STATISTICS_V123) { 1795 if (np->driver_data & DEV_HAS_STATISTICS_V123) {
@@ -1827,7 +1857,7 @@ static int nv_alloc_rx(struct net_device *dev)
1827 } else { 1857 } else {
1828packet_dropped: 1858packet_dropped:
1829 u64_stats_update_begin(&np->swstats_rx_syncp); 1859 u64_stats_update_begin(&np->swstats_rx_syncp);
1830 np->stat_rx_dropped++; 1860 nv_txrx_stats_inc(stat_rx_dropped);
1831 u64_stats_update_end(&np->swstats_rx_syncp); 1861 u64_stats_update_end(&np->swstats_rx_syncp);
1832 return 1; 1862 return 1;
1833 } 1863 }
@@ -1869,7 +1899,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1869 } else { 1899 } else {
1870packet_dropped: 1900packet_dropped:
1871 u64_stats_update_begin(&np->swstats_rx_syncp); 1901 u64_stats_update_begin(&np->swstats_rx_syncp);
1872 np->stat_rx_dropped++; 1902 nv_txrx_stats_inc(stat_rx_dropped);
1873 u64_stats_update_end(&np->swstats_rx_syncp); 1903 u64_stats_update_end(&np->swstats_rx_syncp);
1874 return 1; 1904 return 1;
1875 } 1905 }
@@ -2013,7 +2043,7 @@ static void nv_drain_tx(struct net_device *dev)
2013 } 2043 }
2014 if (nv_release_txskb(np, &np->tx_skb[i])) { 2044 if (nv_release_txskb(np, &np->tx_skb[i])) {
2015 u64_stats_update_begin(&np->swstats_tx_syncp); 2045 u64_stats_update_begin(&np->swstats_tx_syncp);
2016 np->stat_tx_dropped++; 2046 nv_txrx_stats_inc(stat_tx_dropped);
2017 u64_stats_update_end(&np->swstats_tx_syncp); 2047 u64_stats_update_end(&np->swstats_tx_syncp);
2018 } 2048 }
2019 np->tx_skb[i].dma = 0; 2049 np->tx_skb[i].dma = 0;
@@ -2227,7 +2257,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2227 /* on DMA mapping error - drop the packet */ 2257 /* on DMA mapping error - drop the packet */
2228 dev_kfree_skb_any(skb); 2258 dev_kfree_skb_any(skb);
2229 u64_stats_update_begin(&np->swstats_tx_syncp); 2259 u64_stats_update_begin(&np->swstats_tx_syncp);
2230 np->stat_tx_dropped++; 2260 nv_txrx_stats_inc(stat_tx_dropped);
2231 u64_stats_update_end(&np->swstats_tx_syncp); 2261 u64_stats_update_end(&np->swstats_tx_syncp);
2232 return NETDEV_TX_OK; 2262 return NETDEV_TX_OK;
2233 } 2263 }
@@ -2273,7 +2303,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2273 dev_kfree_skb_any(skb); 2303 dev_kfree_skb_any(skb);
2274 np->put_tx_ctx = start_tx_ctx; 2304 np->put_tx_ctx = start_tx_ctx;
2275 u64_stats_update_begin(&np->swstats_tx_syncp); 2305 u64_stats_update_begin(&np->swstats_tx_syncp);
2276 np->stat_tx_dropped++; 2306 nv_txrx_stats_inc(stat_tx_dropped);
2277 u64_stats_update_end(&np->swstats_tx_syncp); 2307 u64_stats_update_end(&np->swstats_tx_syncp);
2278 return NETDEV_TX_OK; 2308 return NETDEV_TX_OK;
2279 } 2309 }
@@ -2384,7 +2414,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2384 /* on DMA mapping error - drop the packet */ 2414 /* on DMA mapping error - drop the packet */
2385 dev_kfree_skb_any(skb); 2415 dev_kfree_skb_any(skb);
2386 u64_stats_update_begin(&np->swstats_tx_syncp); 2416 u64_stats_update_begin(&np->swstats_tx_syncp);
2387 np->stat_tx_dropped++; 2417 nv_txrx_stats_inc(stat_tx_dropped);
2388 u64_stats_update_end(&np->swstats_tx_syncp); 2418 u64_stats_update_end(&np->swstats_tx_syncp);
2389 return NETDEV_TX_OK; 2419 return NETDEV_TX_OK;
2390 } 2420 }
@@ -2431,7 +2461,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2431 dev_kfree_skb_any(skb); 2461 dev_kfree_skb_any(skb);
2432 np->put_tx_ctx = start_tx_ctx; 2462 np->put_tx_ctx = start_tx_ctx;
2433 u64_stats_update_begin(&np->swstats_tx_syncp); 2463 u64_stats_update_begin(&np->swstats_tx_syncp);
2434 np->stat_tx_dropped++; 2464 nv_txrx_stats_inc(stat_tx_dropped);
2435 u64_stats_update_end(&np->swstats_tx_syncp); 2465 u64_stats_update_end(&np->swstats_tx_syncp);
2436 return NETDEV_TX_OK; 2466 return NETDEV_TX_OK;
2437 } 2467 }
@@ -2560,9 +2590,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
2560 && !(flags & NV_TX_RETRYCOUNT_MASK)) 2590 && !(flags & NV_TX_RETRYCOUNT_MASK))
2561 nv_legacybackoff_reseed(dev); 2591 nv_legacybackoff_reseed(dev);
2562 } else { 2592 } else {
2593 unsigned int len;
2594
2563 u64_stats_update_begin(&np->swstats_tx_syncp); 2595 u64_stats_update_begin(&np->swstats_tx_syncp);
2564 np->stat_tx_packets++; 2596 nv_txrx_stats_inc(stat_tx_packets);
2565 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2597 len = np->get_tx_ctx->skb->len;
2598 nv_txrx_stats_add(stat_tx_bytes, len);
2566 u64_stats_update_end(&np->swstats_tx_syncp); 2599 u64_stats_update_end(&np->swstats_tx_syncp);
2567 } 2600 }
2568 bytes_compl += np->get_tx_ctx->skb->len; 2601 bytes_compl += np->get_tx_ctx->skb->len;
@@ -2577,9 +2610,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
2577 && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2610 && !(flags & NV_TX2_RETRYCOUNT_MASK))
2578 nv_legacybackoff_reseed(dev); 2611 nv_legacybackoff_reseed(dev);
2579 } else { 2612 } else {
2613 unsigned int len;
2614
2580 u64_stats_update_begin(&np->swstats_tx_syncp); 2615 u64_stats_update_begin(&np->swstats_tx_syncp);
2581 np->stat_tx_packets++; 2616 nv_txrx_stats_inc(stat_tx_packets);
2582 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2617 len = np->get_tx_ctx->skb->len;
2618 nv_txrx_stats_add(stat_tx_bytes, len);
2583 u64_stats_update_end(&np->swstats_tx_syncp); 2619 u64_stats_update_end(&np->swstats_tx_syncp);
2584 } 2620 }
2585 bytes_compl += np->get_tx_ctx->skb->len; 2621 bytes_compl += np->get_tx_ctx->skb->len;
@@ -2627,9 +2663,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2627 nv_legacybackoff_reseed(dev); 2663 nv_legacybackoff_reseed(dev);
2628 } 2664 }
2629 } else { 2665 } else {
2666 unsigned int len;
2667
2630 u64_stats_update_begin(&np->swstats_tx_syncp); 2668 u64_stats_update_begin(&np->swstats_tx_syncp);
2631 np->stat_tx_packets++; 2669 nv_txrx_stats_inc(stat_tx_packets);
2632 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2670 len = np->get_tx_ctx->skb->len;
2671 nv_txrx_stats_add(stat_tx_bytes, len);
2633 u64_stats_update_end(&np->swstats_tx_syncp); 2672 u64_stats_update_end(&np->swstats_tx_syncp);
2634 } 2673 }
2635 2674
@@ -2806,6 +2845,15 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2806 } 2845 }
2807} 2846}
2808 2847
2848static void rx_missing_handler(u32 flags, struct fe_priv *np)
2849{
2850 if (flags & NV_RX_MISSEDFRAME) {
2851 u64_stats_update_begin(&np->swstats_rx_syncp);
2852 nv_txrx_stats_inc(stat_rx_missed_errors);
2853 u64_stats_update_end(&np->swstats_rx_syncp);
2854 }
2855}
2856
2809static int nv_rx_process(struct net_device *dev, int limit) 2857static int nv_rx_process(struct net_device *dev, int limit)
2810{ 2858{
2811 struct fe_priv *np = netdev_priv(dev); 2859 struct fe_priv *np = netdev_priv(dev);
@@ -2848,11 +2896,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2848 } 2896 }
2849 /* the rest are hard errors */ 2897 /* the rest are hard errors */
2850 else { 2898 else {
2851 if (flags & NV_RX_MISSEDFRAME) { 2899 rx_missing_handler(flags, np);
2852 u64_stats_update_begin(&np->swstats_rx_syncp);
2853 np->stat_rx_missed_errors++;
2854 u64_stats_update_end(&np->swstats_rx_syncp);
2855 }
2856 dev_kfree_skb(skb); 2900 dev_kfree_skb(skb);
2857 goto next_pkt; 2901 goto next_pkt;
2858 } 2902 }
@@ -2896,8 +2940,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2896 skb->protocol = eth_type_trans(skb, dev); 2940 skb->protocol = eth_type_trans(skb, dev);
2897 napi_gro_receive(&np->napi, skb); 2941 napi_gro_receive(&np->napi, skb);
2898 u64_stats_update_begin(&np->swstats_rx_syncp); 2942 u64_stats_update_begin(&np->swstats_rx_syncp);
2899 np->stat_rx_packets++; 2943 nv_txrx_stats_inc(stat_rx_packets);
2900 np->stat_rx_bytes += len; 2944 nv_txrx_stats_add(stat_rx_bytes, len);
2901 u64_stats_update_end(&np->swstats_rx_syncp); 2945 u64_stats_update_end(&np->swstats_rx_syncp);
2902next_pkt: 2946next_pkt:
2903 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2947 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
@@ -2982,8 +3026,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2982 } 3026 }
2983 napi_gro_receive(&np->napi, skb); 3027 napi_gro_receive(&np->napi, skb);
2984 u64_stats_update_begin(&np->swstats_rx_syncp); 3028 u64_stats_update_begin(&np->swstats_rx_syncp);
2985 np->stat_rx_packets++; 3029 nv_txrx_stats_inc(stat_rx_packets);
2986 np->stat_rx_bytes += len; 3030 nv_txrx_stats_add(stat_rx_bytes, len);
2987 u64_stats_update_end(&np->swstats_rx_syncp); 3031 u64_stats_update_end(&np->swstats_rx_syncp);
2988 } else { 3032 } else {
2989 dev_kfree_skb(skb); 3033 dev_kfree_skb(skb);
@@ -5651,6 +5695,12 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5651 SET_NETDEV_DEV(dev, &pci_dev->dev); 5695 SET_NETDEV_DEV(dev, &pci_dev->dev);
5652 u64_stats_init(&np->swstats_rx_syncp); 5696 u64_stats_init(&np->swstats_rx_syncp);
5653 u64_stats_init(&np->swstats_tx_syncp); 5697 u64_stats_init(&np->swstats_tx_syncp);
5698 np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
5699 if (!np->txrx_stats) {
5700 pr_err("np->txrx_stats, alloc memory error.\n");
5701 err = -ENOMEM;
5702 goto out_alloc_percpu;
5703 }
5654 5704
5655 timer_setup(&np->oom_kick, nv_do_rx_refill, 0); 5705 timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
5656 timer_setup(&np->nic_poll, nv_do_nic_poll, 0); 5706 timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
@@ -6060,6 +6110,8 @@ out_relreg:
6060out_disable: 6110out_disable:
6061 pci_disable_device(pci_dev); 6111 pci_disable_device(pci_dev);
6062out_free: 6112out_free:
6113 free_percpu(np->txrx_stats);
6114out_alloc_percpu:
6063 free_netdev(dev); 6115 free_netdev(dev);
6064out: 6116out:
6065 return err; 6117 return err;
@@ -6105,6 +6157,9 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev)
6105static void nv_remove(struct pci_dev *pci_dev) 6157static void nv_remove(struct pci_dev *pci_dev)
6106{ 6158{
6107 struct net_device *dev = pci_get_drvdata(pci_dev); 6159 struct net_device *dev = pci_get_drvdata(pci_dev);
6160 struct fe_priv *np = netdev_priv(dev);
6161
6162 free_percpu(np->txrx_stats);
6108 6163
6109 unregister_netdev(dev); 6164 unregister_netdev(dev);
6110 6165
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4083019c547a..f97a4096f8fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -873,7 +873,12 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
873 int ret; 873 int ret;
874 u32 reg, val; 874 u32 reg, val;
875 875
876 regmap_field_read(gmac->regmap_field, &val); 876 ret = regmap_field_read(gmac->regmap_field, &val);
877 if (ret) {
878 dev_err(priv->device, "Fail to read from regmap field.\n");
879 return ret;
880 }
881
877 reg = gmac->variant->default_syscon_value; 882 reg = gmac->variant->default_syscon_value;
878 if (reg != val) 883 if (reg != val)
879 dev_warn(priv->device, 884 dev_warn(priv->device,