aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ibmlana.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2007-10-03 20:41:50 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:51:16 -0400
commit09f75cd7bf13720738e6a196cc0107ce9a5bd5a0 (patch)
tree4c85b0b395abe7f88c87162fc22570e5de255cb1 /drivers/net/ibmlana.c
parentff8ac60948ba819b89e9c87083e8050fc2f89999 (diff)
[NET] drivers/net: statistics cleanup #1 -- save memory and shrink code
We now have struct net_device_stats embedded in struct net_device, and the default ->get_stats() hook does the obvious thing for us. Run through drivers/net/* and remove the driver-local storage of statistics, and driver-local ->get_stats() hook where applicable. This was just the low-hanging fruit in drivers/net; plenty more drivers remain to be updated. [ Resolved conflicts with napi_struct changes and fix sunqe build regression... -DaveM ] Signed-off-by: Jeff Garzik <jeff@garzik.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ibmlana.c')
-rw-r--r--drivers/net/ibmlana.c37
1 files changed, 14 insertions, 23 deletions
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 67d82fa7659d..eebf39acf586 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -591,7 +591,7 @@ static void irqrx_handler(struct net_device *dev)
591 591
592 skb = dev_alloc_skb(rda.length + 2); 592 skb = dev_alloc_skb(rda.length + 2);
593 if (skb == NULL) 593 if (skb == NULL)
594 priv->stat.rx_dropped++; 594 dev->stats.rx_dropped++;
595 else { 595 else {
596 /* copy out data */ 596 /* copy out data */
597 597
@@ -606,8 +606,8 @@ static void irqrx_handler(struct net_device *dev)
606 606
607 /* bookkeeping */ 607 /* bookkeeping */
608 dev->last_rx = jiffies; 608 dev->last_rx = jiffies;
609 priv->stat.rx_packets++; 609 dev->stats.rx_packets++;
610 priv->stat.rx_bytes += rda.length; 610 dev->stats.rx_bytes += rda.length;
611 611
612 /* pass to the upper layers */ 612 /* pass to the upper layers */
613 netif_rx(skb); 613 netif_rx(skb);
@@ -617,11 +617,11 @@ static void irqrx_handler(struct net_device *dev)
617 /* otherwise check error status bits and increase statistics */ 617 /* otherwise check error status bits and increase statistics */
618 618
619 else { 619 else {
620 priv->stat.rx_errors++; 620 dev->stats.rx_errors++;
621 if (rda.status & RCREG_FAER) 621 if (rda.status & RCREG_FAER)
622 priv->stat.rx_frame_errors++; 622 dev->stats.rx_frame_errors++;
623 if (rda.status & RCREG_CRCR) 623 if (rda.status & RCREG_CRCR)
624 priv->stat.rx_crc_errors++; 624 dev->stats.rx_crc_errors++;
625 } 625 }
626 626
627 /* descriptor processed, will become new last descriptor in queue */ 627 /* descriptor processed, will become new last descriptor in queue */
@@ -656,8 +656,8 @@ static void irqtx_handler(struct net_device *dev)
656 memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); 656 memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
657 657
658 /* update statistics */ 658 /* update statistics */
659 priv->stat.tx_packets++; 659 dev->stats.tx_packets++;
660 priv->stat.tx_bytes += tda.length; 660 dev->stats.tx_bytes += tda.length;
661 661
662 /* update our pointers */ 662 /* update our pointers */
663 priv->txused[priv->currtxdescr] = 0; 663 priv->txused[priv->currtxdescr] = 0;
@@ -680,15 +680,15 @@ static void irqtxerr_handler(struct net_device *dev)
680 memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); 680 memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
681 681
682 /* update statistics */ 682 /* update statistics */
683 priv->stat.tx_errors++; 683 dev->stats.tx_errors++;
684 if (tda.status & (TCREG_NCRS | TCREG_CRSL)) 684 if (tda.status & (TCREG_NCRS | TCREG_CRSL))
685 priv->stat.tx_carrier_errors++; 685 dev->stats.tx_carrier_errors++;
686 if (tda.status & TCREG_EXC) 686 if (tda.status & TCREG_EXC)
687 priv->stat.tx_aborted_errors++; 687 dev->stats.tx_aborted_errors++;
688 if (tda.status & TCREG_OWC) 688 if (tda.status & TCREG_OWC)
689 priv->stat.tx_window_errors++; 689 dev->stats.tx_window_errors++;
690 if (tda.status & TCREG_FU) 690 if (tda.status & TCREG_FU)
691 priv->stat.tx_fifo_errors++; 691 dev->stats.tx_fifo_errors++;
692 692
693 /* update our pointers */ 693 /* update our pointers */
694 priv->txused[priv->currtxdescr] = 0; 694 priv->txused[priv->currtxdescr] = 0;
@@ -824,7 +824,7 @@ static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
824 824
825 if (priv->txusedcnt >= TXBUFCNT) { 825 if (priv->txusedcnt >= TXBUFCNT) {
826 retval = -EIO; 826 retval = -EIO;
827 priv->stat.tx_dropped++; 827 dev->stats.tx_dropped++;
828 goto tx_done; 828 goto tx_done;
829 } 829 }
830 830
@@ -876,14 +876,6 @@ tx_done:
876 return retval; 876 return retval;
877} 877}
878 878
879/* return pointer to Ethernet statistics */
880
881static struct net_device_stats *ibmlana_stats(struct net_device *dev)
882{
883 ibmlana_priv *priv = netdev_priv(dev);
884 return &priv->stat;
885}
886
887/* switch receiver mode. */ 879/* switch receiver mode. */
888 880
889static void ibmlana_set_multicast_list(struct net_device *dev) 881static void ibmlana_set_multicast_list(struct net_device *dev)
@@ -978,7 +970,6 @@ static int ibmlana_probe(struct net_device *dev)
978 dev->stop = ibmlana_close; 970 dev->stop = ibmlana_close;
979 dev->hard_start_xmit = ibmlana_tx; 971 dev->hard_start_xmit = ibmlana_tx;
980 dev->do_ioctl = NULL; 972 dev->do_ioctl = NULL;
981 dev->get_stats = ibmlana_stats;
982 dev->set_multicast_list = ibmlana_set_multicast_list; 973 dev->set_multicast_list = ibmlana_set_multicast_list;
983 dev->flags |= IFF_MULTICAST; 974 dev->flags |= IFF_MULTICAST;
984 975