diff options
author | Jamie Gloudon <jamie.gloudon@gmail.com> | 2013-01-23 13:05:04 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-01-28 18:43:02 -0500 |
commit | f7b5d1b9bd16e3ec71696abb204a8cfddd93aa62 (patch) | |
tree | 8a5789abdd06fbaf45c1b63f025b0fe510dcecc7 | |
parent | 7ab59dc15e2f42a4321ed016bcd6044a4d8de6d1 (diff) |
via-rhine: add 64bit statistics.
Switch to use ndo_get_stats64 to get 64bit statistics.
Signed-off-by: Jamie Gloudon <jamie.gloudon@gmail.com>
Tested-by: Jamie Gloudon <jamie.gloudon@gmail.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/via/via-rhine.c | 47 |
1 files changed, 39 insertions, 8 deletions
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index eab63e1d5609..ec4a5e1c6fb2 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c | |||
@@ -417,6 +417,12 @@ enum chip_cmd_bits { | |||
417 | Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, | 417 | Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, |
418 | }; | 418 | }; |
419 | 419 | ||
420 | struct rhine_stats { | ||
421 | u64 packets; | ||
422 | u64 bytes; | ||
423 | struct u64_stats_sync syncp; | ||
424 | }; | ||
425 | |||
420 | struct rhine_private { | 426 | struct rhine_private { |
421 | /* Bit mask for configured VLAN ids */ | 427 | /* Bit mask for configured VLAN ids */ |
422 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | 428 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
@@ -458,6 +464,8 @@ struct rhine_private { | |||
458 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ | 464 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ |
459 | unsigned int cur_tx, dirty_tx; | 465 | unsigned int cur_tx, dirty_tx; |
460 | unsigned int rx_buf_sz; /* Based on MTU+slack. */ | 466 | unsigned int rx_buf_sz; /* Based on MTU+slack. */ |
467 | struct rhine_stats rx_stats; | ||
468 | struct rhine_stats tx_stats; | ||
461 | u8 wolopts; | 469 | u8 wolopts; |
462 | 470 | ||
463 | u8 tx_thresh, rx_thresh; | 471 | u8 tx_thresh, rx_thresh; |
@@ -495,7 +503,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance); | |||
495 | static void rhine_tx(struct net_device *dev); | 503 | static void rhine_tx(struct net_device *dev); |
496 | static int rhine_rx(struct net_device *dev, int limit); | 504 | static int rhine_rx(struct net_device *dev, int limit); |
497 | static void rhine_set_rx_mode(struct net_device *dev); | 505 | static void rhine_set_rx_mode(struct net_device *dev); |
498 | static struct net_device_stats *rhine_get_stats(struct net_device *dev); | 506 | static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev, |
507 | struct rtnl_link_stats64 *stats); | ||
499 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 508 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
500 | static const struct ethtool_ops netdev_ethtool_ops; | 509 | static const struct ethtool_ops netdev_ethtool_ops; |
501 | static int rhine_close(struct net_device *dev); | 510 | static int rhine_close(struct net_device *dev); |
@@ -842,7 +851,7 @@ static const struct net_device_ops rhine_netdev_ops = { | |||
842 | .ndo_open = rhine_open, | 851 | .ndo_open = rhine_open, |
843 | .ndo_stop = rhine_close, | 852 | .ndo_stop = rhine_close, |
844 | .ndo_start_xmit = rhine_start_tx, | 853 | .ndo_start_xmit = rhine_start_tx, |
845 | .ndo_get_stats = rhine_get_stats, | 854 | .ndo_get_stats64 = rhine_get_stats64, |
846 | .ndo_set_rx_mode = rhine_set_rx_mode, | 855 | .ndo_set_rx_mode = rhine_set_rx_mode, |
847 | .ndo_change_mtu = eth_change_mtu, | 856 | .ndo_change_mtu = eth_change_mtu, |
848 | .ndo_validate_addr = eth_validate_addr, | 857 | .ndo_validate_addr = eth_validate_addr, |
@@ -1790,8 +1799,11 @@ static void rhine_tx(struct net_device *dev) | |||
1790 | dev->stats.collisions += txstatus & 0x0F; | 1799 | dev->stats.collisions += txstatus & 0x0F; |
1791 | netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", | 1800 | netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", |
1792 | (txstatus >> 3) & 0xF, txstatus & 0xF); | 1801 | (txstatus >> 3) & 0xF, txstatus & 0xF); |
1793 | dev->stats.tx_bytes += rp->tx_skbuff[entry]->len; | 1802 | |
1794 | dev->stats.tx_packets++; | 1803 | u64_stats_update_begin(&rp->tx_stats.syncp); |
1804 | rp->tx_stats.bytes += rp->tx_skbuff[entry]->len; | ||
1805 | rp->tx_stats.packets++; | ||
1806 | u64_stats_update_end(&rp->tx_stats.syncp); | ||
1795 | } | 1807 | } |
1796 | /* Free the original skb. */ | 1808 | /* Free the original skb. */ |
1797 | if (rp->tx_skbuff_dma[entry]) { | 1809 | if (rp->tx_skbuff_dma[entry]) { |
@@ -1923,8 +1935,11 @@ static int rhine_rx(struct net_device *dev, int limit) | |||
1923 | if (unlikely(desc_length & DescTag)) | 1935 | if (unlikely(desc_length & DescTag)) |
1924 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 1936 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
1925 | netif_receive_skb(skb); | 1937 | netif_receive_skb(skb); |
1926 | dev->stats.rx_bytes += pkt_len; | 1938 | |
1927 | dev->stats.rx_packets++; | 1939 | u64_stats_update_begin(&rp->rx_stats.syncp); |
1940 | rp->rx_stats.bytes += pkt_len; | ||
1941 | rp->rx_stats.packets++; | ||
1942 | u64_stats_update_end(&rp->rx_stats.syncp); | ||
1928 | } | 1943 | } |
1929 | entry = (++rp->cur_rx) % RX_RING_SIZE; | 1944 | entry = (++rp->cur_rx) % RX_RING_SIZE; |
1930 | rp->rx_head_desc = &rp->rx_ring[entry]; | 1945 | rp->rx_head_desc = &rp->rx_ring[entry]; |
@@ -2019,15 +2034,31 @@ out_unlock: | |||
2019 | mutex_unlock(&rp->task_lock); | 2034 | mutex_unlock(&rp->task_lock); |
2020 | } | 2035 | } |
2021 | 2036 | ||
2022 | static struct net_device_stats *rhine_get_stats(struct net_device *dev) | 2037 | static struct rtnl_link_stats64 * |
2038 | rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | ||
2023 | { | 2039 | { |
2024 | struct rhine_private *rp = netdev_priv(dev); | 2040 | struct rhine_private *rp = netdev_priv(dev); |
2041 | unsigned int start; | ||
2025 | 2042 | ||
2026 | spin_lock_bh(&rp->lock); | 2043 | spin_lock_bh(&rp->lock); |
2027 | rhine_update_rx_crc_and_missed_errord(rp); | 2044 | rhine_update_rx_crc_and_missed_errord(rp); |
2028 | spin_unlock_bh(&rp->lock); | 2045 | spin_unlock_bh(&rp->lock); |
2029 | 2046 | ||
2030 | return &dev->stats; | 2047 | netdev_stats_to_stats64(stats, &dev->stats); |
2048 | |||
2049 | do { | ||
2050 | start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp); | ||
2051 | stats->rx_packets = rp->rx_stats.packets; | ||
2052 | stats->rx_bytes = rp->rx_stats.bytes; | ||
2053 | } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start)); | ||
2054 | |||
2055 | do { | ||
2056 | start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp); | ||
2057 | stats->tx_packets = rp->tx_stats.packets; | ||
2058 | stats->tx_bytes = rp->tx_stats.bytes; | ||
2059 | } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start)); | ||
2060 | |||
2061 | return stats; | ||
2031 | } | 2062 | } |
2032 | 2063 | ||
2033 | static void rhine_set_rx_mode(struct net_device *dev) | 2064 | static void rhine_set_rx_mode(struct net_device *dev) |