aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bmac.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2007-10-03 20:41:50 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:51:16 -0400
commit09f75cd7bf13720738e6a196cc0107ce9a5bd5a0 (patch)
tree4c85b0b395abe7f88c87162fc22570e5de255cb1 /drivers/net/bmac.c
parentff8ac60948ba819b89e9c87083e8050fc2f89999 (diff)
[NET] drivers/net: statistics cleanup #1 -- save memory and shrink code
We now have struct net_device_stats embedded in struct net_device, and the default ->get_stats() hook does the obvious thing for us. Run through drivers/net/* and remove the driver-local storage of statistics, and driver-local ->get_stats() hook where applicable. This was just the low-hanging fruit in drivers/net; plenty more drivers remain to be updated. [ Resolved conflicts with napi_struct changes and fix sunqe build regression... -DaveM ] Signed-off-by: Jeff Garzik <jeff@garzik.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bmac.c')
-rw-r--r--drivers/net/bmac.c48
1 files changed, 19 insertions, 29 deletions
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index ee157f5a5dbc..2761441f6644 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -75,7 +75,6 @@ struct bmac_data {
75 int tx_fill; 75 int tx_fill;
76 int tx_empty; 76 int tx_empty;
77 unsigned char tx_fullup; 77 unsigned char tx_fullup;
78 struct net_device_stats stats;
79 struct timer_list tx_timeout; 78 struct timer_list tx_timeout;
80 int timeout_active; 79 int timeout_active;
81 int sleeping; 80 int sleeping;
@@ -145,7 +144,6 @@ static unsigned char *bmac_emergency_rxbuf;
145static int bmac_open(struct net_device *dev); 144static int bmac_open(struct net_device *dev);
146static int bmac_close(struct net_device *dev); 145static int bmac_close(struct net_device *dev);
147static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); 146static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
148static struct net_device_stats *bmac_stats(struct net_device *dev);
149static void bmac_set_multicast(struct net_device *dev); 147static void bmac_set_multicast(struct net_device *dev);
150static void bmac_reset_and_enable(struct net_device *dev); 148static void bmac_reset_and_enable(struct net_device *dev);
151static void bmac_start_chip(struct net_device *dev); 149static void bmac_start_chip(struct net_device *dev);
@@ -668,7 +666,7 @@ static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
668 bp->tx_bufs[bp->tx_fill] = skb; 666 bp->tx_bufs[bp->tx_fill] = skb;
669 bp->tx_fill = i; 667 bp->tx_fill = i;
670 668
671 bp->stats.tx_bytes += skb->len; 669 dev->stats.tx_bytes += skb->len;
672 670
673 dbdma_continue(td); 671 dbdma_continue(td);
674 672
@@ -707,8 +705,8 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
707 nb = RX_BUFLEN - residual - 2; 705 nb = RX_BUFLEN - residual - 2;
708 if (nb < (ETHERMINPACKET - ETHERCRC)) { 706 if (nb < (ETHERMINPACKET - ETHERCRC)) {
709 skb = NULL; 707 skb = NULL;
710 bp->stats.rx_length_errors++; 708 dev->stats.rx_length_errors++;
711 bp->stats.rx_errors++; 709 dev->stats.rx_errors++;
712 } else { 710 } else {
713 skb = bp->rx_bufs[i]; 711 skb = bp->rx_bufs[i];
714 bp->rx_bufs[i] = NULL; 712 bp->rx_bufs[i] = NULL;
@@ -719,10 +717,10 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
719 skb->protocol = eth_type_trans(skb, dev); 717 skb->protocol = eth_type_trans(skb, dev);
720 netif_rx(skb); 718 netif_rx(skb);
721 dev->last_rx = jiffies; 719 dev->last_rx = jiffies;
722 ++bp->stats.rx_packets; 720 ++dev->stats.rx_packets;
723 bp->stats.rx_bytes += nb; 721 dev->stats.rx_bytes += nb;
724 } else { 722 } else {
725 ++bp->stats.rx_dropped; 723 ++dev->stats.rx_dropped;
726 } 724 }
727 dev->last_rx = jiffies; 725 dev->last_rx = jiffies;
728 if ((skb = bp->rx_bufs[i]) == NULL) { 726 if ((skb = bp->rx_bufs[i]) == NULL) {
@@ -785,7 +783,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
785 } 783 }
786 784
787 if (bp->tx_bufs[bp->tx_empty]) { 785 if (bp->tx_bufs[bp->tx_empty]) {
788 ++bp->stats.tx_packets; 786 ++dev->stats.tx_packets;
789 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); 787 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
790 } 788 }
791 bp->tx_bufs[bp->tx_empty] = NULL; 789 bp->tx_bufs[bp->tx_empty] = NULL;
@@ -807,13 +805,6 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
807 return IRQ_HANDLED; 805 return IRQ_HANDLED;
808} 806}
809 807
810static struct net_device_stats *bmac_stats(struct net_device *dev)
811{
812 struct bmac_data *p = netdev_priv(dev);
813
814 return &p->stats;
815}
816
817#ifndef SUNHME_MULTICAST 808#ifndef SUNHME_MULTICAST
818/* Real fast bit-reversal algorithm, 6-bit values */ 809/* Real fast bit-reversal algorithm, 6-bit values */
819static int reverse6[64] = { 810static int reverse6[64] = {
@@ -1080,17 +1071,17 @@ static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
1080 } 1071 }
1081 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ 1072 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1082 /* bmac_txdma_intr_inner(irq, dev_id); */ 1073 /* bmac_txdma_intr_inner(irq, dev_id); */
1083 /* if (status & FrameReceived) bp->stats.rx_dropped++; */ 1074 /* if (status & FrameReceived) dev->stats.rx_dropped++; */
1084 if (status & RxErrorMask) bp->stats.rx_errors++; 1075 if (status & RxErrorMask) dev->stats.rx_errors++;
1085 if (status & RxCRCCntExp) bp->stats.rx_crc_errors++; 1076 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
1086 if (status & RxLenCntExp) bp->stats.rx_length_errors++; 1077 if (status & RxLenCntExp) dev->stats.rx_length_errors++;
1087 if (status & RxOverFlow) bp->stats.rx_over_errors++; 1078 if (status & RxOverFlow) dev->stats.rx_over_errors++;
1088 if (status & RxAlignCntExp) bp->stats.rx_frame_errors++; 1079 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
1089 1080
1090 /* if (status & FrameSent) bp->stats.tx_dropped++; */ 1081 /* if (status & FrameSent) dev->stats.tx_dropped++; */
1091 if (status & TxErrorMask) bp->stats.tx_errors++; 1082 if (status & TxErrorMask) dev->stats.tx_errors++;
1092 if (status & TxUnderrun) bp->stats.tx_fifo_errors++; 1083 if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
1093 if (status & TxNormalCollExp) bp->stats.collisions++; 1084 if (status & TxNormalCollExp) dev->stats.collisions++;
1094 return IRQ_HANDLED; 1085 return IRQ_HANDLED;
1095} 1086}
1096 1087
@@ -1324,7 +1315,6 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
1324 dev->stop = bmac_close; 1315 dev->stop = bmac_close;
1325 dev->ethtool_ops = &bmac_ethtool_ops; 1316 dev->ethtool_ops = &bmac_ethtool_ops;
1326 dev->hard_start_xmit = bmac_output; 1317 dev->hard_start_xmit = bmac_output;
1327 dev->get_stats = bmac_stats;
1328 dev->set_multicast_list = bmac_set_multicast; 1318 dev->set_multicast_list = bmac_set_multicast;
1329 dev->set_mac_address = bmac_set_address; 1319 dev->set_mac_address = bmac_set_address;
1330 1320
@@ -1542,7 +1532,7 @@ static void bmac_tx_timeout(unsigned long data)
1542 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", 1532 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1543 bp->tx_empty, bp->tx_fill, bp->tx_fullup)); 1533 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1544 i = bp->tx_empty; 1534 i = bp->tx_empty;
1545 ++bp->stats.tx_errors; 1535 ++dev->stats.tx_errors;
1546 if (i != bp->tx_fill) { 1536 if (i != bp->tx_fill) {
1547 dev_kfree_skb(bp->tx_bufs[i]); 1537 dev_kfree_skb(bp->tx_bufs[i]);
1548 bp->tx_bufs[i] = NULL; 1538 bp->tx_bufs[i] = NULL;