aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bna/bnad.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-09-02 15:45:02 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-02 15:45:02 -0400
commit250e061e1d3e26600236a3dd9172e7f5f5916c00 (patch)
tree4f52da41f1b254538b75e4ba84a8e4f1c2a23b2b /drivers/net/bna/bnad.c
parentdeffd77759e3ceb936f0760cc54a213881577a83 (diff)
bna: fix stats handling
get_stats() method incorrectly clears a global array before folding various stats. This can break SNMP applications. Switch to 64 bit flavor to work on a user supplied buffer, and provide 64bit counters even on 32bit arches. Fix a bug in bnad_netdev_hwstats_fill(), for rx_fifo_errors, missing a folding (only the last counter was taken into account) Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Acked-by: Rasesh Mody <rmody@brocade.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bna/bnad.c')
-rw-r--r--drivers/net/bna/bnad.c52
1 files changed, 24 insertions, 28 deletions
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index cbc1d563a0c..79c4c244144 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -1964,25 +1964,24 @@ bnad_enable_default_bcast(struct bnad *bnad)
1964 1964
1965/* Statistics utilities */ 1965/* Statistics utilities */
1966void 1966void
1967bnad_netdev_qstats_fill(struct bnad *bnad) 1967bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
1968{ 1968{
1969 struct net_device_stats *net_stats = &bnad->net_stats;
1970 int i, j; 1969 int i, j;
1971 1970
1972 for (i = 0; i < bnad->num_rx; i++) { 1971 for (i = 0; i < bnad->num_rx; i++) {
1973 for (j = 0; j < bnad->num_rxp_per_rx; j++) { 1972 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1974 if (bnad->rx_info[i].rx_ctrl[j].ccb) { 1973 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1975 net_stats->rx_packets += bnad->rx_info[i]. 1974 stats->rx_packets += bnad->rx_info[i].
1976 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; 1975 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
1977 net_stats->rx_bytes += bnad->rx_info[i]. 1976 stats->rx_bytes += bnad->rx_info[i].
1978 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; 1977 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
1979 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && 1978 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1980 bnad->rx_info[i].rx_ctrl[j].ccb-> 1979 bnad->rx_info[i].rx_ctrl[j].ccb->
1981 rcb[1]->rxq) { 1980 rcb[1]->rxq) {
1982 net_stats->rx_packets += 1981 stats->rx_packets +=
1983 bnad->rx_info[i].rx_ctrl[j]. 1982 bnad->rx_info[i].rx_ctrl[j].
1984 ccb->rcb[1]->rxq->rx_packets; 1983 ccb->rcb[1]->rxq->rx_packets;
1985 net_stats->rx_bytes += 1984 stats->rx_bytes +=
1986 bnad->rx_info[i].rx_ctrl[j]. 1985 bnad->rx_info[i].rx_ctrl[j].
1987 ccb->rcb[1]->rxq->rx_bytes; 1986 ccb->rcb[1]->rxq->rx_bytes;
1988 } 1987 }
@@ -1992,9 +1991,9 @@ bnad_netdev_qstats_fill(struct bnad *bnad)
1992 for (i = 0; i < bnad->num_tx; i++) { 1991 for (i = 0; i < bnad->num_tx; i++) {
1993 for (j = 0; j < bnad->num_txq_per_tx; j++) { 1992 for (j = 0; j < bnad->num_txq_per_tx; j++) {
1994 if (bnad->tx_info[i].tcb[j]) { 1993 if (bnad->tx_info[i].tcb[j]) {
1995 net_stats->tx_packets += 1994 stats->tx_packets +=
1996 bnad->tx_info[i].tcb[j]->txq->tx_packets; 1995 bnad->tx_info[i].tcb[j]->txq->tx_packets;
1997 net_stats->tx_bytes += 1996 stats->tx_bytes +=
1998 bnad->tx_info[i].tcb[j]->txq->tx_bytes; 1997 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
1999 } 1998 }
2000 } 1999 }
@@ -2005,37 +2004,36 @@ bnad_netdev_qstats_fill(struct bnad *bnad)
2005 * Must be called with the bna_lock held. 2004 * Must be called with the bna_lock held.
2006 */ 2005 */
2007void 2006void
2008bnad_netdev_hwstats_fill(struct bnad *bnad) 2007bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2009{ 2008{
2010 struct bfi_ll_stats_mac *mac_stats; 2009 struct bfi_ll_stats_mac *mac_stats;
2011 struct net_device_stats *net_stats = &bnad->net_stats;
2012 u64 bmap; 2010 u64 bmap;
2013 int i; 2011 int i;
2014 2012
2015 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats; 2013 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2016 net_stats->rx_errors = 2014 stats->rx_errors =
2017 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + 2015 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2018 mac_stats->rx_frame_length_error + mac_stats->rx_code_error + 2016 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2019 mac_stats->rx_undersize; 2017 mac_stats->rx_undersize;
2020 net_stats->tx_errors = mac_stats->tx_fcs_error + 2018 stats->tx_errors = mac_stats->tx_fcs_error +
2021 mac_stats->tx_undersize; 2019 mac_stats->tx_undersize;
2022 net_stats->rx_dropped = mac_stats->rx_drop; 2020 stats->rx_dropped = mac_stats->rx_drop;
2023 net_stats->tx_dropped = mac_stats->tx_drop; 2021 stats->tx_dropped = mac_stats->tx_drop;
2024 net_stats->multicast = mac_stats->rx_multicast; 2022 stats->multicast = mac_stats->rx_multicast;
2025 net_stats->collisions = mac_stats->tx_total_collision; 2023 stats->collisions = mac_stats->tx_total_collision;
2026 2024
2027 net_stats->rx_length_errors = mac_stats->rx_frame_length_error; 2025 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2028 2026
2029 /* receive ring buffer overflow ?? */ 2027 /* receive ring buffer overflow ?? */
2030 2028
2031 net_stats->rx_crc_errors = mac_stats->rx_fcs_error; 2029 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2032 net_stats->rx_frame_errors = mac_stats->rx_alignment_error; 2030 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2033 /* recv'r fifo overrun */ 2031 /* recv'r fifo overrun */
2034 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] | 2032 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2035 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32); 2033 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2036 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) { 2034 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2037 if (bmap & 1) { 2035 if (bmap & 1) {
2038 net_stats->rx_fifo_errors = 2036 stats->rx_fifo_errors +=
2039 bnad->stats.bna_stats-> 2037 bnad->stats.bna_stats->
2040 hw_stats->rxf_stats[i].frame_drops; 2038 hw_stats->rxf_stats[i].frame_drops;
2041 break; 2039 break;
@@ -2638,22 +2636,20 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2638 * Used spin_lock to synchronize reading of stats structures, which 2636 * Used spin_lock to synchronize reading of stats structures, which
2639 * is written by BNA under the same lock. 2637 * is written by BNA under the same lock.
2640 */ 2638 */
2641static struct net_device_stats * 2639static struct rtnl_link_stats64 *
2642bnad_get_netdev_stats(struct net_device *netdev) 2640bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2643{ 2641{
2644 struct bnad *bnad = netdev_priv(netdev); 2642 struct bnad *bnad = netdev_priv(netdev);
2645 unsigned long flags; 2643 unsigned long flags;
2646 2644
2647 spin_lock_irqsave(&bnad->bna_lock, flags); 2645 spin_lock_irqsave(&bnad->bna_lock, flags);
2648 2646
2649 memset(&bnad->net_stats, 0, sizeof(struct net_device_stats)); 2647 bnad_netdev_qstats_fill(bnad, stats);
2650 2648 bnad_netdev_hwstats_fill(bnad, stats);
2651 bnad_netdev_qstats_fill(bnad);
2652 bnad_netdev_hwstats_fill(bnad);
2653 2649
2654 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2650 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2655 2651
2656 return &bnad->net_stats; 2652 return stats;
2657} 2653}
2658 2654
2659static void 2655static void
@@ -2858,7 +2854,7 @@ static const struct net_device_ops bnad_netdev_ops = {
2858 .ndo_open = bnad_open, 2854 .ndo_open = bnad_open,
2859 .ndo_stop = bnad_stop, 2855 .ndo_stop = bnad_stop,
2860 .ndo_start_xmit = bnad_start_xmit, 2856 .ndo_start_xmit = bnad_start_xmit,
2861 .ndo_get_stats = bnad_get_netdev_stats, 2857 .ndo_get_stats64 = bnad_get_stats64,
2862 .ndo_set_rx_mode = bnad_set_rx_mode, 2858 .ndo_set_rx_mode = bnad_set_rx_mode,
2863 .ndo_set_multicast_list = bnad_set_rx_mode, 2859 .ndo_set_multicast_list = bnad_set_rx_mode,
2864 .ndo_validate_addr = eth_validate_addr, 2860 .ndo_validate_addr = eth_validate_addr,