aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sunlance.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2007-10-03 20:41:50 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:51:16 -0400
commit09f75cd7bf13720738e6a196cc0107ce9a5bd5a0 (patch)
tree4c85b0b395abe7f88c87162fc22570e5de255cb1 /drivers/net/sunlance.c
parentff8ac60948ba819b89e9c87083e8050fc2f89999 (diff)
[NET] drivers/net: statistics cleanup #1 -- save memory and shrink code
We now have struct net_device_stats embedded in struct net_device, and the default ->get_stats() hook does the obvious thing for us. Run through drivers/net/* and remove the driver-local storage of statistics, and driver-local ->get_stats() hook where applicable. This was just the low-hanging fruit in drivers/net; plenty more drivers remain to be updated. [ Resolved conflicts with napi_struct changes and fix sunqe build regression... -DaveM ] Signed-off-by: Jeff Garzik <jeff@garzik.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sunlance.c')
-rw-r--r--drivers/net/sunlance.c87
1 files changed, 39 insertions, 48 deletions
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 17d66c1185cd..7bf5c90b7749 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -248,7 +248,6 @@ struct lance_private {
248 int rx_new, tx_new; 248 int rx_new, tx_new;
249 int rx_old, tx_old; 249 int rx_old, tx_old;
250 250
251 struct net_device_stats stats;
252 struct sbus_dma *ledma; /* If set this points to ledma */ 251 struct sbus_dma *ledma; /* If set this points to ledma */
253 char tpe; /* cable-selection is TPE */ 252 char tpe; /* cable-selection is TPE */
254 char auto_select; /* cable-selection by carrier */ 253 char auto_select; /* cable-selection by carrier */
@@ -519,17 +518,17 @@ static void lance_rx_dvma(struct net_device *dev)
519 518
520 /* We got an incomplete frame? */ 519 /* We got an incomplete frame? */
521 if ((bits & LE_R1_POK) != LE_R1_POK) { 520 if ((bits & LE_R1_POK) != LE_R1_POK) {
522 lp->stats.rx_over_errors++; 521 dev->stats.rx_over_errors++;
523 lp->stats.rx_errors++; 522 dev->stats.rx_errors++;
524 } else if (bits & LE_R1_ERR) { 523 } else if (bits & LE_R1_ERR) {
525 /* Count only the end frame as a rx error, 524 /* Count only the end frame as a rx error,
526 * not the beginning 525 * not the beginning
527 */ 526 */
528 if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; 527 if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
529 if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; 528 if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
530 if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; 529 if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
531 if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; 530 if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
532 if (bits & LE_R1_EOP) lp->stats.rx_errors++; 531 if (bits & LE_R1_EOP) dev->stats.rx_errors++;
533 } else { 532 } else {
534 len = (rd->mblength & 0xfff) - 4; 533 len = (rd->mblength & 0xfff) - 4;
535 skb = dev_alloc_skb(len + 2); 534 skb = dev_alloc_skb(len + 2);
@@ -537,14 +536,14 @@ static void lance_rx_dvma(struct net_device *dev)
537 if (skb == NULL) { 536 if (skb == NULL) {
538 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 537 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
539 dev->name); 538 dev->name);
540 lp->stats.rx_dropped++; 539 dev->stats.rx_dropped++;
541 rd->mblength = 0; 540 rd->mblength = 0;
542 rd->rmd1_bits = LE_R1_OWN; 541 rd->rmd1_bits = LE_R1_OWN;
543 lp->rx_new = RX_NEXT(entry); 542 lp->rx_new = RX_NEXT(entry);
544 return; 543 return;
545 } 544 }
546 545
547 lp->stats.rx_bytes += len; 546 dev->stats.rx_bytes += len;
548 547
549 skb_reserve(skb, 2); /* 16 byte align */ 548 skb_reserve(skb, 2); /* 16 byte align */
550 skb_put(skb, len); /* make room */ 549 skb_put(skb, len); /* make room */
@@ -554,7 +553,7 @@ static void lance_rx_dvma(struct net_device *dev)
554 skb->protocol = eth_type_trans(skb, dev); 553 skb->protocol = eth_type_trans(skb, dev);
555 netif_rx(skb); 554 netif_rx(skb);
556 dev->last_rx = jiffies; 555 dev->last_rx = jiffies;
557 lp->stats.rx_packets++; 556 dev->stats.rx_packets++;
558 } 557 }
559 558
560 /* Return the packet to the pool */ 559 /* Return the packet to the pool */
@@ -586,12 +585,12 @@ static void lance_tx_dvma(struct net_device *dev)
586 if (bits & LE_T1_ERR) { 585 if (bits & LE_T1_ERR) {
587 u16 status = td->misc; 586 u16 status = td->misc;
588 587
589 lp->stats.tx_errors++; 588 dev->stats.tx_errors++;
590 if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; 589 if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
591 if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; 590 if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
592 591
593 if (status & LE_T3_CLOS) { 592 if (status & LE_T3_CLOS) {
594 lp->stats.tx_carrier_errors++; 593 dev->stats.tx_carrier_errors++;
595 if (lp->auto_select) { 594 if (lp->auto_select) {
596 lp->tpe = 1 - lp->tpe; 595 lp->tpe = 1 - lp->tpe;
597 printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", 596 printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
@@ -608,7 +607,7 @@ static void lance_tx_dvma(struct net_device *dev)
608 * transmitter, restart the adapter. 607 * transmitter, restart the adapter.
609 */ 608 */
610 if (status & (LE_T3_BUF|LE_T3_UFL)) { 609 if (status & (LE_T3_BUF|LE_T3_UFL)) {
611 lp->stats.tx_fifo_errors++; 610 dev->stats.tx_fifo_errors++;
612 611
613 printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", 612 printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
614 dev->name); 613 dev->name);
@@ -626,13 +625,13 @@ static void lance_tx_dvma(struct net_device *dev)
626 625
627 /* One collision before packet was sent. */ 626 /* One collision before packet was sent. */
628 if (bits & LE_T1_EONE) 627 if (bits & LE_T1_EONE)
629 lp->stats.collisions++; 628 dev->stats.collisions++;
630 629
631 /* More than one collision, be optimistic. */ 630 /* More than one collision, be optimistic. */
632 if (bits & LE_T1_EMORE) 631 if (bits & LE_T1_EMORE)
633 lp->stats.collisions += 2; 632 dev->stats.collisions += 2;
634 633
635 lp->stats.tx_packets++; 634 dev->stats.tx_packets++;
636 } 635 }
637 636
638 j = TX_NEXT(j); 637 j = TX_NEXT(j);
@@ -692,17 +691,17 @@ static void lance_rx_pio(struct net_device *dev)
692 691
693 /* We got an incomplete frame? */ 692 /* We got an incomplete frame? */
694 if ((bits & LE_R1_POK) != LE_R1_POK) { 693 if ((bits & LE_R1_POK) != LE_R1_POK) {
695 lp->stats.rx_over_errors++; 694 dev->stats.rx_over_errors++;
696 lp->stats.rx_errors++; 695 dev->stats.rx_errors++;
697 } else if (bits & LE_R1_ERR) { 696 } else if (bits & LE_R1_ERR) {
698 /* Count only the end frame as a rx error, 697 /* Count only the end frame as a rx error,
699 * not the beginning 698 * not the beginning
700 */ 699 */
701 if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; 700 if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
702 if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; 701 if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
703 if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; 702 if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
704 if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; 703 if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
705 if (bits & LE_R1_EOP) lp->stats.rx_errors++; 704 if (bits & LE_R1_EOP) dev->stats.rx_errors++;
706 } else { 705 } else {
707 len = (sbus_readw(&rd->mblength) & 0xfff) - 4; 706 len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
708 skb = dev_alloc_skb(len + 2); 707 skb = dev_alloc_skb(len + 2);
@@ -710,14 +709,14 @@ static void lance_rx_pio(struct net_device *dev)
710 if (skb == NULL) { 709 if (skb == NULL) {
711 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 710 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
712 dev->name); 711 dev->name);
713 lp->stats.rx_dropped++; 712 dev->stats.rx_dropped++;
714 sbus_writew(0, &rd->mblength); 713 sbus_writew(0, &rd->mblength);
715 sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); 714 sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
716 lp->rx_new = RX_NEXT(entry); 715 lp->rx_new = RX_NEXT(entry);
717 return; 716 return;
718 } 717 }
719 718
720 lp->stats.rx_bytes += len; 719 dev->stats.rx_bytes += len;
721 720
722 skb_reserve (skb, 2); /* 16 byte align */ 721 skb_reserve (skb, 2); /* 16 byte align */
723 skb_put(skb, len); /* make room */ 722 skb_put(skb, len); /* make room */
@@ -725,7 +724,7 @@ static void lance_rx_pio(struct net_device *dev)
725 skb->protocol = eth_type_trans(skb, dev); 724 skb->protocol = eth_type_trans(skb, dev);
726 netif_rx(skb); 725 netif_rx(skb);
727 dev->last_rx = jiffies; 726 dev->last_rx = jiffies;
728 lp->stats.rx_packets++; 727 dev->stats.rx_packets++;
729 } 728 }
730 729
731 /* Return the packet to the pool */ 730 /* Return the packet to the pool */
@@ -757,12 +756,12 @@ static void lance_tx_pio(struct net_device *dev)
757 if (bits & LE_T1_ERR) { 756 if (bits & LE_T1_ERR) {
758 u16 status = sbus_readw(&td->misc); 757 u16 status = sbus_readw(&td->misc);
759 758
760 lp->stats.tx_errors++; 759 dev->stats.tx_errors++;
761 if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; 760 if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
762 if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; 761 if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
763 762
764 if (status & LE_T3_CLOS) { 763 if (status & LE_T3_CLOS) {
765 lp->stats.tx_carrier_errors++; 764 dev->stats.tx_carrier_errors++;
766 if (lp->auto_select) { 765 if (lp->auto_select) {
767 lp->tpe = 1 - lp->tpe; 766 lp->tpe = 1 - lp->tpe;
768 printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", 767 printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
@@ -779,7 +778,7 @@ static void lance_tx_pio(struct net_device *dev)
779 * transmitter, restart the adapter. 778 * transmitter, restart the adapter.
780 */ 779 */
781 if (status & (LE_T3_BUF|LE_T3_UFL)) { 780 if (status & (LE_T3_BUF|LE_T3_UFL)) {
782 lp->stats.tx_fifo_errors++; 781 dev->stats.tx_fifo_errors++;
783 782
784 printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", 783 printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
785 dev->name); 784 dev->name);
@@ -797,13 +796,13 @@ static void lance_tx_pio(struct net_device *dev)
797 796
798 /* One collision before packet was sent. */ 797 /* One collision before packet was sent. */
799 if (bits & LE_T1_EONE) 798 if (bits & LE_T1_EONE)
800 lp->stats.collisions++; 799 dev->stats.collisions++;
801 800
802 /* More than one collision, be optimistic. */ 801 /* More than one collision, be optimistic. */
803 if (bits & LE_T1_EMORE) 802 if (bits & LE_T1_EMORE)
804 lp->stats.collisions += 2; 803 dev->stats.collisions += 2;
805 804
806 lp->stats.tx_packets++; 805 dev->stats.tx_packets++;
807 } 806 }
808 807
809 j = TX_NEXT(j); 808 j = TX_NEXT(j);
@@ -844,10 +843,10 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id)
844 lp->tx(dev); 843 lp->tx(dev);
845 844
846 if (csr0 & LE_C0_BABL) 845 if (csr0 & LE_C0_BABL)
847 lp->stats.tx_errors++; 846 dev->stats.tx_errors++;
848 847
849 if (csr0 & LE_C0_MISS) 848 if (csr0 & LE_C0_MISS)
850 lp->stats.rx_errors++; 849 dev->stats.rx_errors++;
851 850
852 if (csr0 & LE_C0_MERR) { 851 if (csr0 & LE_C0_MERR) {
853 if (lp->dregs) { 852 if (lp->dregs) {
@@ -1127,7 +1126,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
1127 1126
1128 spin_lock_irq(&lp->lock); 1127 spin_lock_irq(&lp->lock);
1129 1128
1130 lp->stats.tx_bytes += len; 1129 dev->stats.tx_bytes += len;
1131 1130
1132 entry = lp->tx_new & TX_RING_MOD_MASK; 1131 entry = lp->tx_new & TX_RING_MOD_MASK;
1133 if (lp->pio_buffer) { 1132 if (lp->pio_buffer) {
@@ -1170,13 +1169,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
1170 return 0; 1169 return 0;
1171} 1170}
1172 1171
1173static struct net_device_stats *lance_get_stats(struct net_device *dev)
1174{
1175 struct lance_private *lp = netdev_priv(dev);
1176
1177 return &lp->stats;
1178}
1179
1180/* taken from the depca driver */ 1172/* taken from the depca driver */
1181static void lance_load_multicast(struct net_device *dev) 1173static void lance_load_multicast(struct net_device *dev)
1182{ 1174{
@@ -1463,7 +1455,6 @@ no_link_test:
1463 dev->hard_start_xmit = &lance_start_xmit; 1455 dev->hard_start_xmit = &lance_start_xmit;
1464 dev->tx_timeout = &lance_tx_timeout; 1456 dev->tx_timeout = &lance_tx_timeout;
1465 dev->watchdog_timeo = 5*HZ; 1457 dev->watchdog_timeo = 5*HZ;
1466 dev->get_stats = &lance_get_stats;
1467 dev->set_multicast_list = &lance_set_multicast; 1458 dev->set_multicast_list = &lance_set_multicast;
1468 dev->ethtool_ops = &sparc_lance_ethtool_ops; 1459 dev->ethtool_ops = &sparc_lance_ethtool_ops;
1469 1460