aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2009-11-03 08:49:31 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-04 08:01:38 -0500
commit885ee398de2354cf4526a3ecaf9e84e292393007 (patch)
tree5012c783395b402d9e61facda407355dcbd3cfdb /drivers/net/qlge
parent1e34e307d0ebe536feb39c957c849a485bc81486 (diff)
qlge: Clean up netdev->stats usage.
Don't access netdev->stats in IO path. Save them in tx_ring/rx_rings and add them up when get_stats API is called. Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r--drivers/net/qlge/qlge.h8
-rw-r--r--drivers/net/qlge/qlge_main.c46
2 files changed, 49 insertions, 5 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 5e4d3439043e..1f59f054452d 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1263,6 +1263,9 @@ struct tx_ring {
1263 atomic_t queue_stopped; /* Turns queue off when full. */ 1263 atomic_t queue_stopped; /* Turns queue off when full. */
1264 struct delayed_work tx_work; 1264 struct delayed_work tx_work;
1265 struct ql_adapter *qdev; 1265 struct ql_adapter *qdev;
1266 u64 tx_packets;
1267 u64 tx_bytes;
1268 u64 tx_errors;
1266}; 1269};
1267 1270
1268/* 1271/*
@@ -1329,6 +1332,11 @@ struct rx_ring {
1329 struct napi_struct napi; 1332 struct napi_struct napi;
1330 u8 reserved; 1333 u8 reserved;
1331 struct ql_adapter *qdev; 1334 struct ql_adapter *qdev;
1335 u64 rx_packets;
1336 u64 rx_multicast;
1337 u64 rx_bytes;
1338 u64 rx_dropped;
1339 u64 rx_errors;
1332}; 1340};
1333 1341
1334/* 1342/*
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 4a075484e151..0de596ad8a7e 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1661,6 +1661,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1661 if (unlikely(!skb)) { 1661 if (unlikely(!skb)) {
1662 QPRINTK(qdev, RX_STATUS, DEBUG, 1662 QPRINTK(qdev, RX_STATUS, DEBUG,
1663 "No skb available, drop packet.\n"); 1663 "No skb available, drop packet.\n");
1664 rx_ring->rx_dropped++;
1664 return; 1665 return;
1665 } 1666 }
1666 1667
@@ -1669,6 +1670,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1669 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n", 1670 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1670 ib_mac_rsp->flags2); 1671 ib_mac_rsp->flags2);
1671 dev_kfree_skb_any(skb); 1672 dev_kfree_skb_any(skb);
1673 rx_ring->rx_errors++;
1672 return; 1674 return;
1673 } 1675 }
1674 1676
@@ -1677,6 +1679,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1677 */ 1679 */
1678 if (skb->len > ndev->mtu + ETH_HLEN) { 1680 if (skb->len > ndev->mtu + ETH_HLEN) {
1679 dev_kfree_skb_any(skb); 1681 dev_kfree_skb_any(skb);
1682 rx_ring->rx_dropped++;
1680 return; 1683 return;
1681 } 1684 }
1682 1685
@@ -1697,6 +1700,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1697 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", 1700 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1698 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1701 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1699 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); 1702 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1703 rx_ring->rx_multicast++;
1700 } 1704 }
1701 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { 1705 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1702 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); 1706 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
@@ -1728,8 +1732,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1728 } 1732 }
1729 } 1733 }
1730 1734
1731 ndev->stats.rx_packets++; 1735 rx_ring->rx_packets++;
1732 ndev->stats.rx_bytes += skb->len; 1736 rx_ring->rx_bytes += skb->len;
1733 skb_record_rx_queue(skb, rx_ring->cq_id); 1737 skb_record_rx_queue(skb, rx_ring->cq_id);
1734 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1738 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1735 if (qdev->vlgrp && 1739 if (qdev->vlgrp &&
@@ -1753,7 +1757,6 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1753static void ql_process_mac_tx_intr(struct ql_adapter *qdev, 1757static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1754 struct ob_mac_iocb_rsp *mac_rsp) 1758 struct ob_mac_iocb_rsp *mac_rsp)
1755{ 1759{
1756 struct net_device *ndev = qdev->ndev;
1757 struct tx_ring *tx_ring; 1760 struct tx_ring *tx_ring;
1758 struct tx_ring_desc *tx_ring_desc; 1761 struct tx_ring_desc *tx_ring_desc;
1759 1762
@@ -1761,8 +1764,8 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1761 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; 1764 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1762 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; 1765 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1763 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); 1766 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1764 ndev->stats.tx_bytes += (tx_ring_desc->skb)->len; 1767 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
1765 ndev->stats.tx_packets++; 1768 tx_ring->tx_packets++;
1766 dev_kfree_skb(tx_ring_desc->skb); 1769 dev_kfree_skb(tx_ring_desc->skb);
1767 tx_ring_desc->skb = NULL; 1770 tx_ring_desc->skb = NULL;
1768 1771
@@ -2205,6 +2208,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2205 __func__, tx_ring_idx); 2208 __func__, tx_ring_idx);
2206 netif_stop_subqueue(ndev, tx_ring->wq_id); 2209 netif_stop_subqueue(ndev, tx_ring->wq_id);
2207 atomic_inc(&tx_ring->queue_stopped); 2210 atomic_inc(&tx_ring->queue_stopped);
2211 tx_ring->tx_errors++;
2208 return NETDEV_TX_BUSY; 2212 return NETDEV_TX_BUSY;
2209 } 2213 }
2210 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; 2214 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
@@ -2239,6 +2243,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2239 NETDEV_TX_OK) { 2243 NETDEV_TX_OK) {
2240 QPRINTK(qdev, TX_QUEUED, ERR, 2244 QPRINTK(qdev, TX_QUEUED, ERR,
2241 "Could not map the segments.\n"); 2245 "Could not map the segments.\n");
2246 tx_ring->tx_errors++;
2242 return NETDEV_TX_BUSY; 2247 return NETDEV_TX_BUSY;
2243 } 2248 }
2244 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); 2249 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
@@ -3817,6 +3822,37 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3817static struct net_device_stats *qlge_get_stats(struct net_device 3822static struct net_device_stats *qlge_get_stats(struct net_device
3818 *ndev) 3823 *ndev)
3819{ 3824{
3825 struct ql_adapter *qdev = netdev_priv(ndev);
3826 struct rx_ring *rx_ring = &qdev->rx_ring[0];
3827 struct tx_ring *tx_ring = &qdev->tx_ring[0];
3828 unsigned long pkts, mcast, dropped, errors, bytes;
3829 int i;
3830
3831 /* Get RX stats. */
3832 pkts = mcast = dropped = errors = bytes = 0;
3833 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
3834 pkts += rx_ring->rx_packets;
3835 bytes += rx_ring->rx_bytes;
3836 dropped += rx_ring->rx_dropped;
3837 errors += rx_ring->rx_errors;
3838 mcast += rx_ring->rx_multicast;
3839 }
3840 ndev->stats.rx_packets = pkts;
3841 ndev->stats.rx_bytes = bytes;
3842 ndev->stats.rx_dropped = dropped;
3843 ndev->stats.rx_errors = errors;
3844 ndev->stats.multicast = mcast;
3845
3846 /* Get TX stats. */
3847 pkts = errors = bytes = 0;
3848 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
3849 pkts += tx_ring->tx_packets;
3850 bytes += tx_ring->tx_bytes;
3851 errors += tx_ring->tx_errors;
3852 }
3853 ndev->stats.tx_packets = pkts;
3854 ndev->stats.tx_bytes = bytes;
3855 ndev->stats.tx_errors = errors;
3820 return &ndev->stats; 3856 return &ndev->stats;
3821} 3857}
3822 3858