diff options
author | david decotigny <david.decotigny@google.com> | 2011-11-16 07:15:13 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-11-16 23:14:02 -0500 |
commit | f5d827aece36300d0fe2135d7c2232c77ee07994 (patch) | |
tree | f7b1b74813c33fd0be6c148bb8e2986a74288798 /drivers/net/ethernet | |
parent | 1ec4f2d38bed30af4ef1ec1bde38e471df8f8ede (diff) |
forcedeth: implement ndo_get_stats64() API
This commit implements the ndo_get_stats64() API for forcedeth. Since
hardware stats are being updated from different contexts (process and
timer), this commit adds synchronization. For software stats, it
relies on the u64_stats_sync.h API.
Tested:
- 16-way SMP x86_64 ->
RX bytes:7244556582 (7.2 GB) TX bytes:181904254 (181.9 MB)
- pktgen + loopback: identical rx_bytes/tx_bytes and rx_packets/tx_packets
Signed-off-by: David Decotigny <david.decotigny@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/nvidia/forcedeth.c | 197 |
1 files changed, 146 insertions, 51 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 0edc5a634021..5d94c337dea2 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c | |||
@@ -65,7 +65,8 @@ | |||
65 | #include <linux/slab.h> | 65 | #include <linux/slab.h> |
66 | #include <linux/uaccess.h> | 66 | #include <linux/uaccess.h> |
67 | #include <linux/prefetch.h> | 67 | #include <linux/prefetch.h> |
68 | #include <linux/io.h> | 68 | #include <linux/u64_stats_sync.h> |
69 | #include <linux/io.h> | ||
69 | 70 | ||
70 | #include <asm/irq.h> | 71 | #include <asm/irq.h> |
71 | #include <asm/system.h> | 72 | #include <asm/system.h> |
@@ -736,6 +737,16 @@ struct nv_skb_map { | |||
736 | * - tx setup is lockless: it relies on netif_tx_lock. Actual submission | 737 | * - tx setup is lockless: it relies on netif_tx_lock. Actual submission |
737 | * needs netdev_priv(dev)->lock :-( | 738 | * needs netdev_priv(dev)->lock :-( |
738 | * - set_multicast_list: preparation lockless, relies on netif_tx_lock. | 739 | * - set_multicast_list: preparation lockless, relies on netif_tx_lock. |
740 | * | ||
741 | * Hardware stats updates are protected by hwstats_lock: | ||
742 | * - updated by nv_do_stats_poll (timer). This is meant to avoid | ||
743 | * integer wraparound in the NIC stats registers, at low frequency | ||
744 | * (0.1 Hz) | ||
745 | * - updated by nv_get_ethtool_stats + nv_get_stats64 | ||
746 | * | ||
747 | * Software stats are accessed only through 64b synchronization points | ||
748 | * and are not subject to other synchronization techniques (single | ||
749 | * update thread on the TX or RX paths). | ||
739 | */ | 750 | */ |
740 | 751 | ||
741 | /* in dev: base, irq */ | 752 | /* in dev: base, irq */ |
@@ -745,9 +756,10 @@ struct fe_priv { | |||
745 | struct net_device *dev; | 756 | struct net_device *dev; |
746 | struct napi_struct napi; | 757 | struct napi_struct napi; |
747 | 758 | ||
748 | /* General data: | 759 | /* hardware stats are updated in syscall and timer */ |
749 | * Locking: spin_lock(&np->lock); */ | 760 | spinlock_t hwstats_lock; |
750 | struct nv_ethtool_stats estats; | 761 | struct nv_ethtool_stats estats; |
762 | |||
751 | int in_shutdown; | 763 | int in_shutdown; |
752 | u32 linkspeed; | 764 | u32 linkspeed; |
753 | int duplex; | 765 | int duplex; |
@@ -798,6 +810,12 @@ struct fe_priv { | |||
798 | u32 nic_poll_irq; | 810 | u32 nic_poll_irq; |
799 | int rx_ring_size; | 811 | int rx_ring_size; |
800 | 812 | ||
813 | /* RX software stats */ | ||
814 | struct u64_stats_sync swstats_rx_syncp; | ||
815 | u64 stat_rx_packets; | ||
816 | u64 stat_rx_bytes; /* not always available in HW */ | ||
817 | u64 stat_rx_missed_errors; | ||
818 | |||
801 | /* media detection workaround. | 819 | /* media detection workaround. |
802 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 820 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
803 | */ | 821 | */ |
@@ -820,6 +838,12 @@ struct fe_priv { | |||
820 | struct nv_skb_map *tx_end_flip; | 838 | struct nv_skb_map *tx_end_flip; |
821 | int tx_stop; | 839 | int tx_stop; |
822 | 840 | ||
841 | /* TX software stats */ | ||
842 | struct u64_stats_sync swstats_tx_syncp; | ||
843 | u64 stat_tx_packets; /* not always available in HW */ | ||
844 | u64 stat_tx_bytes; | ||
845 | u64 stat_tx_dropped; | ||
846 | |||
823 | /* msi/msi-x fields */ | 847 | /* msi/msi-x fields */ |
824 | u32 msi_flags; | 848 | u32 msi_flags; |
825 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; | 849 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; |
@@ -1635,11 +1659,19 @@ static void nv_mac_reset(struct net_device *dev) | |||
1635 | pci_push(base); | 1659 | pci_push(base); |
1636 | } | 1660 | } |
1637 | 1661 | ||
1638 | static void nv_get_hw_stats(struct net_device *dev) | 1662 | /* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */ |
1663 | static void nv_update_stats(struct net_device *dev) | ||
1639 | { | 1664 | { |
1640 | struct fe_priv *np = netdev_priv(dev); | 1665 | struct fe_priv *np = netdev_priv(dev); |
1641 | u8 __iomem *base = get_hwbase(dev); | 1666 | u8 __iomem *base = get_hwbase(dev); |
1642 | 1667 | ||
1668 | /* If it happens that this is run in top-half context, then | ||
1669 | * replace the spin_lock of hwstats_lock with | ||
1670 | * spin_lock_irqsave() in calling functions. */ | ||
1671 | WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half"); | ||
1672 | assert_spin_locked(&np->hwstats_lock); | ||
1673 | |||
1674 | /* query hardware */ | ||
1643 | np->estats.tx_bytes += readl(base + NvRegTxCnt); | 1675 | np->estats.tx_bytes += readl(base + NvRegTxCnt); |
1644 | np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); | 1676 | np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); |
1645 | np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); | 1677 | np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); |
@@ -1698,40 +1730,72 @@ static void nv_get_hw_stats(struct net_device *dev) | |||
1698 | } | 1730 | } |
1699 | 1731 | ||
1700 | /* | 1732 | /* |
1701 | * nv_get_stats: dev->get_stats function | 1733 | * nv_get_stats64: dev->ndo_get_stats64 function |
1702 | * Get latest stats value from the nic. | 1734 | * Get latest stats value from the nic. |
1703 | * Called with read_lock(&dev_base_lock) held for read - | 1735 | * Called with read_lock(&dev_base_lock) held for read - |
1704 | * only synchronized against unregister_netdevice. | 1736 | * only synchronized against unregister_netdevice. |
1705 | */ | 1737 | */ |
1706 | static struct net_device_stats *nv_get_stats(struct net_device *dev) | 1738 | static struct rtnl_link_stats64* |
1739 | nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) | ||
1740 | __acquires(&netdev_priv(dev)->hwstats_lock) | ||
1741 | __releases(&netdev_priv(dev)->hwstats_lock) | ||
1707 | { | 1742 | { |
1708 | struct fe_priv *np = netdev_priv(dev); | 1743 | struct fe_priv *np = netdev_priv(dev); |
1744 | unsigned int syncp_start; | ||
1745 | |||
1746 | /* | ||
1747 | * Note: because HW stats are not always available and for | ||
1748 | * consistency reasons, the following ifconfig stats are | ||
1749 | * managed by software: rx_bytes, tx_bytes, rx_packets and | ||
1750 | * tx_packets. The related hardware stats reported by ethtool | ||
1751 | * should be equivalent to these ifconfig stats, with 4 | ||
1752 | * additional bytes per packet (Ethernet FCS CRC), except for | ||
1753 | * tx_packets when TSO kicks in. | ||
1754 | */ | ||
1755 | |||
1756 | /* software stats */ | ||
1757 | do { | ||
1758 | syncp_start = u64_stats_fetch_begin(&np->swstats_rx_syncp); | ||
1759 | storage->rx_packets = np->stat_rx_packets; | ||
1760 | storage->rx_bytes = np->stat_rx_bytes; | ||
1761 | storage->rx_missed_errors = np->stat_rx_missed_errors; | ||
1762 | } while (u64_stats_fetch_retry(&np->swstats_rx_syncp, syncp_start)); | ||
1763 | |||
1764 | do { | ||
1765 | syncp_start = u64_stats_fetch_begin(&np->swstats_tx_syncp); | ||
1766 | storage->tx_packets = np->stat_tx_packets; | ||
1767 | storage->tx_bytes = np->stat_tx_bytes; | ||
1768 | storage->tx_dropped = np->stat_tx_dropped; | ||
1769 | } while (u64_stats_fetch_retry(&np->swstats_tx_syncp, syncp_start)); | ||
1709 | 1770 | ||
1710 | /* If the nic supports hw counters then retrieve latest values */ | 1771 | /* If the nic supports hw counters then retrieve latest values */ |
1711 | if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { | 1772 | if (np->driver_data & DEV_HAS_STATISTICS_V123) { |
1712 | nv_get_hw_stats(dev); | 1773 | spin_lock_bh(&np->hwstats_lock); |
1713 | 1774 | ||
1714 | /* | 1775 | nv_update_stats(dev); |
1715 | * Note: because HW stats are not always available and | 1776 | |
1716 | * for consistency reasons, the following ifconfig | 1777 | /* generic stats */ |
1717 | * stats are managed by software: rx_bytes, tx_bytes, | 1778 | storage->rx_errors = np->estats.rx_errors_total; |
1718 | * rx_packets and tx_packets. The related hardware | 1779 | storage->tx_errors = np->estats.tx_errors_total; |
1719 | * stats reported by ethtool should be equivalent to | 1780 | |
1720 | * these ifconfig stats, with 4 additional bytes per | 1781 | /* meaningful only when NIC supports stats v3 */ |
1721 | * packet (Ethernet FCS CRC). | 1782 | storage->multicast = np->estats.rx_multicast; |
1722 | */ | 1783 | |
1784 | /* detailed rx_errors */ | ||
1785 | storage->rx_length_errors = np->estats.rx_length_error; | ||
1786 | storage->rx_over_errors = np->estats.rx_over_errors; | ||
1787 | storage->rx_crc_errors = np->estats.rx_crc_errors; | ||
1788 | storage->rx_frame_errors = np->estats.rx_frame_align_error; | ||
1789 | storage->rx_fifo_errors = np->estats.rx_drop_frame; | ||
1723 | 1790 | ||
1724 | /* copy to net_device stats */ | 1791 | /* detailed tx_errors */ |
1725 | dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; | 1792 | storage->tx_carrier_errors = np->estats.tx_carrier_errors; |
1726 | dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; | 1793 | storage->tx_fifo_errors = np->estats.tx_fifo_errors; |
1727 | dev->stats.rx_crc_errors = np->estats.rx_crc_errors; | 1794 | |
1728 | dev->stats.rx_over_errors = np->estats.rx_over_errors; | 1795 | spin_unlock_bh(&np->hwstats_lock); |
1729 | dev->stats.rx_fifo_errors = np->estats.rx_drop_frame; | ||
1730 | dev->stats.rx_errors = np->estats.rx_errors_total; | ||
1731 | dev->stats.tx_errors = np->estats.tx_errors_total; | ||
1732 | } | 1796 | } |
1733 | 1797 | ||
1734 | return &dev->stats; | 1798 | return storage; |
1735 | } | 1799 | } |
1736 | 1800 | ||
1737 | /* | 1801 | /* |
@@ -1932,8 +1996,11 @@ static void nv_drain_tx(struct net_device *dev) | |||
1932 | np->tx_ring.ex[i].bufhigh = 0; | 1996 | np->tx_ring.ex[i].bufhigh = 0; |
1933 | np->tx_ring.ex[i].buflow = 0; | 1997 | np->tx_ring.ex[i].buflow = 0; |
1934 | } | 1998 | } |
1935 | if (nv_release_txskb(np, &np->tx_skb[i])) | 1999 | if (nv_release_txskb(np, &np->tx_skb[i])) { |
1936 | dev->stats.tx_dropped++; | 2000 | u64_stats_update_begin(&np->swstats_tx_syncp); |
2001 | np->stat_tx_dropped++; | ||
2002 | u64_stats_update_end(&np->swstats_tx_syncp); | ||
2003 | } | ||
1937 | np->tx_skb[i].dma = 0; | 2004 | np->tx_skb[i].dma = 0; |
1938 | np->tx_skb[i].dma_len = 0; | 2005 | np->tx_skb[i].dma_len = 0; |
1939 | np->tx_skb[i].dma_single = 0; | 2006 | np->tx_skb[i].dma_single = 0; |
@@ -2390,11 +2457,14 @@ static int nv_tx_done(struct net_device *dev, int limit) | |||
2390 | if (np->desc_ver == DESC_VER_1) { | 2457 | if (np->desc_ver == DESC_VER_1) { |
2391 | if (flags & NV_TX_LASTPACKET) { | 2458 | if (flags & NV_TX_LASTPACKET) { |
2392 | if (flags & NV_TX_ERROR) { | 2459 | if (flags & NV_TX_ERROR) { |
2393 | if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) | 2460 | if ((flags & NV_TX_RETRYERROR) |
2461 | && !(flags & NV_TX_RETRYCOUNT_MASK)) | ||
2394 | nv_legacybackoff_reseed(dev); | 2462 | nv_legacybackoff_reseed(dev); |
2395 | } else { | 2463 | } else { |
2396 | dev->stats.tx_packets++; | 2464 | u64_stats_update_begin(&np->swstats_tx_syncp); |
2397 | dev->stats.tx_bytes += np->get_tx_ctx->skb->len; | 2465 | np->stat_tx_packets++; |
2466 | np->stat_tx_bytes += np->get_tx_ctx->skb->len; | ||
2467 | u64_stats_update_end(&np->swstats_tx_syncp); | ||
2398 | } | 2468 | } |
2399 | dev_kfree_skb_any(np->get_tx_ctx->skb); | 2469 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2400 | np->get_tx_ctx->skb = NULL; | 2470 | np->get_tx_ctx->skb = NULL; |
@@ -2403,11 +2473,14 @@ static int nv_tx_done(struct net_device *dev, int limit) | |||
2403 | } else { | 2473 | } else { |
2404 | if (flags & NV_TX2_LASTPACKET) { | 2474 | if (flags & NV_TX2_LASTPACKET) { |
2405 | if (flags & NV_TX2_ERROR) { | 2475 | if (flags & NV_TX2_ERROR) { |
2406 | if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) | 2476 | if ((flags & NV_TX2_RETRYERROR) |
2477 | && !(flags & NV_TX2_RETRYCOUNT_MASK)) | ||
2407 | nv_legacybackoff_reseed(dev); | 2478 | nv_legacybackoff_reseed(dev); |
2408 | } else { | 2479 | } else { |
2409 | dev->stats.tx_packets++; | 2480 | u64_stats_update_begin(&np->swstats_tx_syncp); |
2410 | dev->stats.tx_bytes += np->get_tx_ctx->skb->len; | 2481 | np->stat_tx_packets++; |
2482 | np->stat_tx_bytes += np->get_tx_ctx->skb->len; | ||
2483 | u64_stats_update_end(&np->swstats_tx_syncp); | ||
2411 | } | 2484 | } |
2412 | dev_kfree_skb_any(np->get_tx_ctx->skb); | 2485 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2413 | np->get_tx_ctx->skb = NULL; | 2486 | np->get_tx_ctx->skb = NULL; |
@@ -2441,15 +2514,18 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) | |||
2441 | 2514 | ||
2442 | if (flags & NV_TX2_LASTPACKET) { | 2515 | if (flags & NV_TX2_LASTPACKET) { |
2443 | if (flags & NV_TX2_ERROR) { | 2516 | if (flags & NV_TX2_ERROR) { |
2444 | if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { | 2517 | if ((flags & NV_TX2_RETRYERROR) |
2518 | && !(flags & NV_TX2_RETRYCOUNT_MASK)) { | ||
2445 | if (np->driver_data & DEV_HAS_GEAR_MODE) | 2519 | if (np->driver_data & DEV_HAS_GEAR_MODE) |
2446 | nv_gear_backoff_reseed(dev); | 2520 | nv_gear_backoff_reseed(dev); |
2447 | else | 2521 | else |
2448 | nv_legacybackoff_reseed(dev); | 2522 | nv_legacybackoff_reseed(dev); |
2449 | } | 2523 | } |
2450 | } else { | 2524 | } else { |
2451 | dev->stats.tx_packets++; | 2525 | u64_stats_update_begin(&np->swstats_tx_syncp); |
2452 | dev->stats.tx_bytes += np->get_tx_ctx->skb->len; | 2526 | np->stat_tx_packets++; |
2527 | np->stat_tx_bytes += np->get_tx_ctx->skb->len; | ||
2528 | u64_stats_update_end(&np->swstats_tx_syncp); | ||
2453 | } | 2529 | } |
2454 | 2530 | ||
2455 | dev_kfree_skb_any(np->get_tx_ctx->skb); | 2531 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
@@ -2662,8 +2738,11 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2662 | } | 2738 | } |
2663 | /* the rest are hard errors */ | 2739 | /* the rest are hard errors */ |
2664 | else { | 2740 | else { |
2665 | if (flags & NV_RX_MISSEDFRAME) | 2741 | if (flags & NV_RX_MISSEDFRAME) { |
2666 | dev->stats.rx_missed_errors++; | 2742 | u64_stats_update_begin(&np->swstats_rx_syncp); |
2743 | np->stat_rx_missed_errors++; | ||
2744 | u64_stats_update_end(&np->swstats_rx_syncp); | ||
2745 | } | ||
2667 | dev_kfree_skb(skb); | 2746 | dev_kfree_skb(skb); |
2668 | goto next_pkt; | 2747 | goto next_pkt; |
2669 | } | 2748 | } |
@@ -2706,8 +2785,10 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2706 | skb_put(skb, len); | 2785 | skb_put(skb, len); |
2707 | skb->protocol = eth_type_trans(skb, dev); | 2786 | skb->protocol = eth_type_trans(skb, dev); |
2708 | napi_gro_receive(&np->napi, skb); | 2787 | napi_gro_receive(&np->napi, skb); |
2709 | dev->stats.rx_packets++; | 2788 | u64_stats_update_begin(&np->swstats_rx_syncp); |
2710 | dev->stats.rx_bytes += len; | 2789 | np->stat_rx_packets++; |
2790 | np->stat_rx_bytes += len; | ||
2791 | u64_stats_update_end(&np->swstats_rx_syncp); | ||
2711 | next_pkt: | 2792 | next_pkt: |
2712 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) | 2793 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) |
2713 | np->get_rx.orig = np->first_rx.orig; | 2794 | np->get_rx.orig = np->first_rx.orig; |
@@ -2790,8 +2871,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) | |||
2790 | __vlan_hwaccel_put_tag(skb, vid); | 2871 | __vlan_hwaccel_put_tag(skb, vid); |
2791 | } | 2872 | } |
2792 | napi_gro_receive(&np->napi, skb); | 2873 | napi_gro_receive(&np->napi, skb); |
2793 | dev->stats.rx_packets++; | 2874 | u64_stats_update_begin(&np->swstats_rx_syncp); |
2794 | dev->stats.rx_bytes += len; | 2875 | np->stat_rx_packets++; |
2876 | np->stat_rx_bytes += len; | ||
2877 | u64_stats_update_end(&np->swstats_rx_syncp); | ||
2795 | } else { | 2878 | } else { |
2796 | dev_kfree_skb(skb); | 2879 | dev_kfree_skb(skb); |
2797 | } | 2880 | } |
@@ -4000,11 +4083,18 @@ static void nv_poll_controller(struct net_device *dev) | |||
4000 | #endif | 4083 | #endif |
4001 | 4084 | ||
4002 | static void nv_do_stats_poll(unsigned long data) | 4085 | static void nv_do_stats_poll(unsigned long data) |
4086 | __acquires(&netdev_priv(dev)->hwstats_lock) | ||
4087 | __releases(&netdev_priv(dev)->hwstats_lock) | ||
4003 | { | 4088 | { |
4004 | struct net_device *dev = (struct net_device *) data; | 4089 | struct net_device *dev = (struct net_device *) data; |
4005 | struct fe_priv *np = netdev_priv(dev); | 4090 | struct fe_priv *np = netdev_priv(dev); |
4006 | 4091 | ||
4007 | nv_get_hw_stats(dev); | 4092 | /* If lock is currently taken, the stats are being refreshed |
4093 | * and hence fresh enough */ | ||
4094 | if (spin_trylock(&np->hwstats_lock)) { | ||
4095 | nv_update_stats(dev); | ||
4096 | spin_unlock(&np->hwstats_lock); | ||
4097 | } | ||
4008 | 4098 | ||
4009 | if (!np->in_shutdown) | 4099 | if (!np->in_shutdown) |
4010 | mod_timer(&np->stats_poll, | 4100 | mod_timer(&np->stats_poll, |
@@ -4712,14 +4802,18 @@ static int nv_get_sset_count(struct net_device *dev, int sset) | |||
4712 | } | 4802 | } |
4713 | } | 4803 | } |
4714 | 4804 | ||
4715 | static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) | 4805 | static void nv_get_ethtool_stats(struct net_device *dev, |
4806 | struct ethtool_stats *estats, u64 *buffer) | ||
4807 | __acquires(&netdev_priv(dev)->hwstats_lock) | ||
4808 | __releases(&netdev_priv(dev)->hwstats_lock) | ||
4716 | { | 4809 | { |
4717 | struct fe_priv *np = netdev_priv(dev); | 4810 | struct fe_priv *np = netdev_priv(dev); |
4718 | 4811 | ||
4719 | /* update stats */ | 4812 | spin_lock_bh(&np->hwstats_lock); |
4720 | nv_get_hw_stats(dev); | 4813 | nv_update_stats(dev); |
4721 | 4814 | memcpy(buffer, &np->estats, | |
4722 | memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); | 4815 | nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); |
4816 | spin_unlock_bh(&np->hwstats_lock); | ||
4723 | } | 4817 | } |
4724 | 4818 | ||
4725 | static int nv_link_test(struct net_device *dev) | 4819 | static int nv_link_test(struct net_device *dev) |
@@ -5363,7 +5457,7 @@ static int nv_close(struct net_device *dev) | |||
5363 | static const struct net_device_ops nv_netdev_ops = { | 5457 | static const struct net_device_ops nv_netdev_ops = { |
5364 | .ndo_open = nv_open, | 5458 | .ndo_open = nv_open, |
5365 | .ndo_stop = nv_close, | 5459 | .ndo_stop = nv_close, |
5366 | .ndo_get_stats = nv_get_stats, | 5460 | .ndo_get_stats64 = nv_get_stats64, |
5367 | .ndo_start_xmit = nv_start_xmit, | 5461 | .ndo_start_xmit = nv_start_xmit, |
5368 | .ndo_tx_timeout = nv_tx_timeout, | 5462 | .ndo_tx_timeout = nv_tx_timeout, |
5369 | .ndo_change_mtu = nv_change_mtu, | 5463 | .ndo_change_mtu = nv_change_mtu, |
@@ -5380,7 +5474,7 @@ static const struct net_device_ops nv_netdev_ops = { | |||
5380 | static const struct net_device_ops nv_netdev_ops_optimized = { | 5474 | static const struct net_device_ops nv_netdev_ops_optimized = { |
5381 | .ndo_open = nv_open, | 5475 | .ndo_open = nv_open, |
5382 | .ndo_stop = nv_close, | 5476 | .ndo_stop = nv_close, |
5383 | .ndo_get_stats = nv_get_stats, | 5477 | .ndo_get_stats64 = nv_get_stats64, |
5384 | .ndo_start_xmit = nv_start_xmit_optimized, | 5478 | .ndo_start_xmit = nv_start_xmit_optimized, |
5385 | .ndo_tx_timeout = nv_tx_timeout, | 5479 | .ndo_tx_timeout = nv_tx_timeout, |
5386 | .ndo_change_mtu = nv_change_mtu, | 5480 | .ndo_change_mtu = nv_change_mtu, |
@@ -5419,6 +5513,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5419 | np->dev = dev; | 5513 | np->dev = dev; |
5420 | np->pci_dev = pci_dev; | 5514 | np->pci_dev = pci_dev; |
5421 | spin_lock_init(&np->lock); | 5515 | spin_lock_init(&np->lock); |
5516 | spin_lock_init(&np->hwstats_lock); | ||
5422 | SET_NETDEV_DEV(dev, &pci_dev->dev); | 5517 | SET_NETDEV_DEV(dev, &pci_dev->dev); |
5423 | 5518 | ||
5424 | init_timer(&np->oom_kick); | 5519 | init_timer(&np->oom_kick); |