diff options
83 files changed, 948 insertions, 1763 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c index 98e0bc4628a2..be71868d1513 100644 --- a/drivers/net/3c501.c +++ b/drivers/net/3c501.c | |||
@@ -315,7 +315,6 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr) | |||
315 | dev->tx_timeout = &el_timeout; | 315 | dev->tx_timeout = &el_timeout; |
316 | dev->watchdog_timeo = HZ; | 316 | dev->watchdog_timeo = HZ; |
317 | dev->stop = &el1_close; | 317 | dev->stop = &el1_close; |
318 | dev->get_stats = &el1_get_stats; | ||
319 | dev->set_multicast_list = &set_multicast_list; | 318 | dev->set_multicast_list = &set_multicast_list; |
320 | dev->ethtool_ops = &netdev_ethtool_ops; | 319 | dev->ethtool_ops = &netdev_ethtool_ops; |
321 | return 0; | 320 | return 0; |
@@ -374,7 +373,7 @@ static void el_timeout(struct net_device *dev) | |||
374 | if (el_debug) | 373 | if (el_debug) |
375 | printk (KERN_DEBUG "%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n", | 374 | printk (KERN_DEBUG "%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n", |
376 | dev->name, inb(TX_STATUS), inb(AX_STATUS), inb(RX_STATUS)); | 375 | dev->name, inb(TX_STATUS), inb(AX_STATUS), inb(RX_STATUS)); |
377 | lp->stats.tx_errors++; | 376 | dev->stats.tx_errors++; |
378 | outb(TX_NORM, TX_CMD); | 377 | outb(TX_NORM, TX_CMD); |
379 | outb(RX_NORM, RX_CMD); | 378 | outb(RX_NORM, RX_CMD); |
380 | outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */ | 379 | outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */ |
@@ -441,7 +440,7 @@ static int el_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
441 | lp->tx_pkt_start = gp_start; | 440 | lp->tx_pkt_start = gp_start; |
442 | lp->collisions = 0; | 441 | lp->collisions = 0; |
443 | 442 | ||
444 | lp->stats.tx_bytes += skb->len; | 443 | dev->stats.tx_bytes += skb->len; |
445 | 444 | ||
446 | /* | 445 | /* |
447 | * Command mode with status cleared should [in theory] | 446 | * Command mode with status cleared should [in theory] |
@@ -588,7 +587,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) | |||
588 | printk (KERN_DEBUG "%s: Transmit failed 16 times, Ethernet jammed?\n",dev->name); | 587 | printk (KERN_DEBUG "%s: Transmit failed 16 times, Ethernet jammed?\n",dev->name); |
589 | outb(AX_SYS, AX_CMD); | 588 | outb(AX_SYS, AX_CMD); |
590 | lp->txing = 0; | 589 | lp->txing = 0; |
591 | lp->stats.tx_aborted_errors++; | 590 | dev->stats.tx_aborted_errors++; |
592 | netif_wake_queue(dev); | 591 | netif_wake_queue(dev); |
593 | } | 592 | } |
594 | else if (txsr & TX_COLLISION) | 593 | else if (txsr & TX_COLLISION) |
@@ -606,7 +605,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) | |||
606 | outb(AX_SYS, AX_CMD); | 605 | outb(AX_SYS, AX_CMD); |
607 | outw(lp->tx_pkt_start, GP_LOW); | 606 | outw(lp->tx_pkt_start, GP_LOW); |
608 | outb(AX_XMIT, AX_CMD); | 607 | outb(AX_XMIT, AX_CMD); |
609 | lp->stats.collisions++; | 608 | dev->stats.collisions++; |
610 | spin_unlock(&lp->lock); | 609 | spin_unlock(&lp->lock); |
611 | goto out; | 610 | goto out; |
612 | } | 611 | } |
@@ -615,7 +614,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) | |||
615 | /* | 614 | /* |
616 | * It worked.. we will now fall through and receive | 615 | * It worked.. we will now fall through and receive |
617 | */ | 616 | */ |
618 | lp->stats.tx_packets++; | 617 | dev->stats.tx_packets++; |
619 | if (el_debug > 6) | 618 | if (el_debug > 6) |
620 | printk(KERN_DEBUG " Tx succeeded %s\n", | 619 | printk(KERN_DEBUG " Tx succeeded %s\n", |
621 | (txsr & TX_RDY) ? "." : "but tx is busy!"); | 620 | (txsr & TX_RDY) ? "." : "but tx is busy!"); |
@@ -640,10 +639,10 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) | |||
640 | * Just reading rx_status fixes most errors. | 639 | * Just reading rx_status fixes most errors. |
641 | */ | 640 | */ |
642 | if (rxsr & RX_MISSED) | 641 | if (rxsr & RX_MISSED) |
643 | lp->stats.rx_missed_errors++; | 642 | dev->stats.rx_missed_errors++; |
644 | else if (rxsr & RX_RUNT) | 643 | else if (rxsr & RX_RUNT) |
645 | { /* Handled to avoid board lock-up. */ | 644 | { /* Handled to avoid board lock-up. */ |
646 | lp->stats.rx_length_errors++; | 645 | dev->stats.rx_length_errors++; |
647 | if (el_debug > 5) | 646 | if (el_debug > 5) |
648 | printk(KERN_DEBUG " runt.\n"); | 647 | printk(KERN_DEBUG " runt.\n"); |
649 | } | 648 | } |
@@ -694,7 +693,6 @@ out: | |||
694 | 693 | ||
695 | static void el_receive(struct net_device *dev) | 694 | static void el_receive(struct net_device *dev) |
696 | { | 695 | { |
697 | struct net_local *lp = netdev_priv(dev); | ||
698 | int ioaddr = dev->base_addr; | 696 | int ioaddr = dev->base_addr; |
699 | int pkt_len; | 697 | int pkt_len; |
700 | struct sk_buff *skb; | 698 | struct sk_buff *skb; |
@@ -708,7 +706,7 @@ static void el_receive(struct net_device *dev) | |||
708 | { | 706 | { |
709 | if (el_debug) | 707 | if (el_debug) |
710 | printk(KERN_DEBUG "%s: bogus packet, length=%d\n", dev->name, pkt_len); | 708 | printk(KERN_DEBUG "%s: bogus packet, length=%d\n", dev->name, pkt_len); |
711 | lp->stats.rx_over_errors++; | 709 | dev->stats.rx_over_errors++; |
712 | return; | 710 | return; |
713 | } | 711 | } |
714 | 712 | ||
@@ -727,7 +725,7 @@ static void el_receive(struct net_device *dev) | |||
727 | if (skb == NULL) | 725 | if (skb == NULL) |
728 | { | 726 | { |
729 | printk(KERN_INFO "%s: Memory squeeze, dropping packet.\n", dev->name); | 727 | printk(KERN_INFO "%s: Memory squeeze, dropping packet.\n", dev->name); |
730 | lp->stats.rx_dropped++; | 728 | dev->stats.rx_dropped++; |
731 | return; | 729 | return; |
732 | } | 730 | } |
733 | else | 731 | else |
@@ -742,8 +740,8 @@ static void el_receive(struct net_device *dev) | |||
742 | skb->protocol=eth_type_trans(skb,dev); | 740 | skb->protocol=eth_type_trans(skb,dev); |
743 | netif_rx(skb); | 741 | netif_rx(skb); |
744 | dev->last_rx = jiffies; | 742 | dev->last_rx = jiffies; |
745 | lp->stats.rx_packets++; | 743 | dev->stats.rx_packets++; |
746 | lp->stats.rx_bytes+=pkt_len; | 744 | dev->stats.rx_bytes+=pkt_len; |
747 | } | 745 | } |
748 | return; | 746 | return; |
749 | } | 747 | } |
@@ -811,23 +809,6 @@ static int el1_close(struct net_device *dev) | |||
811 | } | 809 | } |
812 | 810 | ||
813 | /** | 811 | /** |
814 | * el1_get_stats: | ||
815 | * @dev: The card to get the statistics for | ||
816 | * | ||
817 | * In smarter devices this function is needed to pull statistics off the | ||
818 | * board itself. The 3c501 has no hardware statistics. We maintain them all | ||
819 | * so they are by definition always up to date. | ||
820 | * | ||
821 | * Returns the statistics for the card from the card private data | ||
822 | */ | ||
823 | |||
824 | static struct net_device_stats *el1_get_stats(struct net_device *dev) | ||
825 | { | ||
826 | struct net_local *lp = netdev_priv(dev); | ||
827 | return &lp->stats; | ||
828 | } | ||
829 | |||
830 | /** | ||
831 | * set_multicast_list: | 812 | * set_multicast_list: |
832 | * @dev: The device to adjust | 813 | * @dev: The device to adjust |
833 | * | 814 | * |
diff --git a/drivers/net/3c501.h b/drivers/net/3c501.h index c56a2c62f7de..cfec64efff78 100644 --- a/drivers/net/3c501.h +++ b/drivers/net/3c501.h | |||
@@ -11,7 +11,6 @@ static irqreturn_t el_interrupt(int irq, void *dev_id); | |||
11 | static void el_receive(struct net_device *dev); | 11 | static void el_receive(struct net_device *dev); |
12 | static void el_reset(struct net_device *dev); | 12 | static void el_reset(struct net_device *dev); |
13 | static int el1_close(struct net_device *dev); | 13 | static int el1_close(struct net_device *dev); |
14 | static struct net_device_stats *el1_get_stats(struct net_device *dev); | ||
15 | static void set_multicast_list(struct net_device *dev); | 14 | static void set_multicast_list(struct net_device *dev); |
16 | static const struct ethtool_ops netdev_ethtool_ops; | 15 | static const struct ethtool_ops netdev_ethtool_ops; |
17 | 16 | ||
@@ -29,7 +28,6 @@ static int el_debug = EL_DEBUG; | |||
29 | 28 | ||
30 | struct net_local | 29 | struct net_local |
31 | { | 30 | { |
32 | struct net_device_stats stats; | ||
33 | int tx_pkt_start; /* The length of the current Tx packet. */ | 31 | int tx_pkt_start; /* The length of the current Tx packet. */ |
34 | int collisions; /* Tx collisions this packet */ | 32 | int collisions; /* Tx collisions this packet */ |
35 | int loading; /* Spot buffer load collisions */ | 33 | int loading; /* Spot buffer load collisions */ |
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index fac6edff2b8b..3d06271c3a8b 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c | |||
@@ -118,7 +118,6 @@ enum commands { | |||
118 | 118 | ||
119 | /* Information that need to be kept for each board. */ | 119 | /* Information that need to be kept for each board. */ |
120 | struct net_local { | 120 | struct net_local { |
121 | struct net_device_stats stats; | ||
122 | int last_restart; | 121 | int last_restart; |
123 | ushort rx_head; | 122 | ushort rx_head; |
124 | ushort rx_tail; | 123 | ushort rx_tail; |
@@ -289,7 +288,6 @@ static int el16_send_packet(struct sk_buff *skb, struct net_device *dev); | |||
289 | static irqreturn_t el16_interrupt(int irq, void *dev_id); | 288 | static irqreturn_t el16_interrupt(int irq, void *dev_id); |
290 | static void el16_rx(struct net_device *dev); | 289 | static void el16_rx(struct net_device *dev); |
291 | static int el16_close(struct net_device *dev); | 290 | static int el16_close(struct net_device *dev); |
292 | static struct net_device_stats *el16_get_stats(struct net_device *dev); | ||
293 | static void el16_tx_timeout (struct net_device *dev); | 291 | static void el16_tx_timeout (struct net_device *dev); |
294 | 292 | ||
295 | static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad); | 293 | static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad); |
@@ -455,7 +453,6 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr) | |||
455 | dev->open = el16_open; | 453 | dev->open = el16_open; |
456 | dev->stop = el16_close; | 454 | dev->stop = el16_close; |
457 | dev->hard_start_xmit = el16_send_packet; | 455 | dev->hard_start_xmit = el16_send_packet; |
458 | dev->get_stats = el16_get_stats; | ||
459 | dev->tx_timeout = el16_tx_timeout; | 456 | dev->tx_timeout = el16_tx_timeout; |
460 | dev->watchdog_timeo = TX_TIMEOUT; | 457 | dev->watchdog_timeo = TX_TIMEOUT; |
461 | dev->ethtool_ops = &netdev_ethtool_ops; | 458 | dev->ethtool_ops = &netdev_ethtool_ops; |
@@ -489,7 +486,7 @@ static void el16_tx_timeout (struct net_device *dev) | |||
489 | readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" : | 486 | readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" : |
490 | "network cable problem"); | 487 | "network cable problem"); |
491 | /* Try to restart the adaptor. */ | 488 | /* Try to restart the adaptor. */ |
492 | if (lp->last_restart == lp->stats.tx_packets) { | 489 | if (lp->last_restart == dev->stats.tx_packets) { |
493 | if (net_debug > 1) | 490 | if (net_debug > 1) |
494 | printk ("Resetting board.\n"); | 491 | printk ("Resetting board.\n"); |
495 | /* Completely reset the adaptor. */ | 492 | /* Completely reset the adaptor. */ |
@@ -501,7 +498,7 @@ static void el16_tx_timeout (struct net_device *dev) | |||
501 | printk ("Kicking board.\n"); | 498 | printk ("Kicking board.\n"); |
502 | writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD); | 499 | writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD); |
503 | outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */ | 500 | outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */ |
504 | lp->last_restart = lp->stats.tx_packets; | 501 | lp->last_restart = dev->stats.tx_packets; |
505 | } | 502 | } |
506 | dev->trans_start = jiffies; | 503 | dev->trans_start = jiffies; |
507 | netif_wake_queue (dev); | 504 | netif_wake_queue (dev); |
@@ -520,7 +517,7 @@ static int el16_send_packet (struct sk_buff *skb, struct net_device *dev) | |||
520 | 517 | ||
521 | spin_lock_irqsave (&lp->lock, flags); | 518 | spin_lock_irqsave (&lp->lock, flags); |
522 | 519 | ||
523 | lp->stats.tx_bytes += length; | 520 | dev->stats.tx_bytes += length; |
524 | /* Disable the 82586's input to the interrupt line. */ | 521 | /* Disable the 82586's input to the interrupt line. */ |
525 | outb (0x80, ioaddr + MISC_CTRL); | 522 | outb (0x80, ioaddr + MISC_CTRL); |
526 | 523 | ||
@@ -579,14 +576,14 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id) | |||
579 | } | 576 | } |
580 | /* Tx unsuccessful or some interesting status bit set. */ | 577 | /* Tx unsuccessful or some interesting status bit set. */ |
581 | if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) { | 578 | if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) { |
582 | lp->stats.tx_errors++; | 579 | dev->stats.tx_errors++; |
583 | if (tx_status & 0x0600) lp->stats.tx_carrier_errors++; | 580 | if (tx_status & 0x0600) dev->stats.tx_carrier_errors++; |
584 | if (tx_status & 0x0100) lp->stats.tx_fifo_errors++; | 581 | if (tx_status & 0x0100) dev->stats.tx_fifo_errors++; |
585 | if (!(tx_status & 0x0040)) lp->stats.tx_heartbeat_errors++; | 582 | if (!(tx_status & 0x0040)) dev->stats.tx_heartbeat_errors++; |
586 | if (tx_status & 0x0020) lp->stats.tx_aborted_errors++; | 583 | if (tx_status & 0x0020) dev->stats.tx_aborted_errors++; |
587 | lp->stats.collisions += tx_status & 0xf; | 584 | dev->stats.collisions += tx_status & 0xf; |
588 | } | 585 | } |
589 | lp->stats.tx_packets++; | 586 | dev->stats.tx_packets++; |
590 | if (net_debug > 5) | 587 | if (net_debug > 5) |
591 | printk("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status); | 588 | printk("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status); |
592 | lp->tx_reap += TX_BUF_SIZE; | 589 | lp->tx_reap += TX_BUF_SIZE; |
@@ -665,17 +662,6 @@ static int el16_close(struct net_device *dev) | |||
665 | return 0; | 662 | return 0; |
666 | } | 663 | } |
667 | 664 | ||
668 | /* Get the current statistics. This may be called with the card open or | ||
669 | closed. */ | ||
670 | static struct net_device_stats *el16_get_stats(struct net_device *dev) | ||
671 | { | ||
672 | struct net_local *lp = netdev_priv(dev); | ||
673 | |||
674 | /* ToDo: decide if there are any useful statistics from the SCB. */ | ||
675 | |||
676 | return &lp->stats; | ||
677 | } | ||
678 | |||
679 | /* Initialize the Rx-block list. */ | 665 | /* Initialize the Rx-block list. */ |
680 | static void init_rx_bufs(struct net_device *dev) | 666 | static void init_rx_bufs(struct net_device *dev) |
681 | { | 667 | { |
@@ -852,12 +838,12 @@ static void el16_rx(struct net_device *dev) | |||
852 | pkt_len); | 838 | pkt_len); |
853 | } else if ((frame_status & 0x2000) == 0) { | 839 | } else if ((frame_status & 0x2000) == 0) { |
854 | /* Frame Rxed, but with error. */ | 840 | /* Frame Rxed, but with error. */ |
855 | lp->stats.rx_errors++; | 841 | dev->stats.rx_errors++; |
856 | if (frame_status & 0x0800) lp->stats.rx_crc_errors++; | 842 | if (frame_status & 0x0800) dev->stats.rx_crc_errors++; |
857 | if (frame_status & 0x0400) lp->stats.rx_frame_errors++; | 843 | if (frame_status & 0x0400) dev->stats.rx_frame_errors++; |
858 | if (frame_status & 0x0200) lp->stats.rx_fifo_errors++; | 844 | if (frame_status & 0x0200) dev->stats.rx_fifo_errors++; |
859 | if (frame_status & 0x0100) lp->stats.rx_over_errors++; | 845 | if (frame_status & 0x0100) dev->stats.rx_over_errors++; |
860 | if (frame_status & 0x0080) lp->stats.rx_length_errors++; | 846 | if (frame_status & 0x0080) dev->stats.rx_length_errors++; |
861 | } else { | 847 | } else { |
862 | /* Malloc up new buffer. */ | 848 | /* Malloc up new buffer. */ |
863 | struct sk_buff *skb; | 849 | struct sk_buff *skb; |
@@ -866,7 +852,7 @@ static void el16_rx(struct net_device *dev) | |||
866 | skb = dev_alloc_skb(pkt_len+2); | 852 | skb = dev_alloc_skb(pkt_len+2); |
867 | if (skb == NULL) { | 853 | if (skb == NULL) { |
868 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); | 854 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); |
869 | lp->stats.rx_dropped++; | 855 | dev->stats.rx_dropped++; |
870 | break; | 856 | break; |
871 | } | 857 | } |
872 | 858 | ||
@@ -878,8 +864,8 @@ static void el16_rx(struct net_device *dev) | |||
878 | skb->protocol=eth_type_trans(skb,dev); | 864 | skb->protocol=eth_type_trans(skb,dev); |
879 | netif_rx(skb); | 865 | netif_rx(skb); |
880 | dev->last_rx = jiffies; | 866 | dev->last_rx = jiffies; |
881 | lp->stats.rx_packets++; | 867 | dev->stats.rx_packets++; |
882 | lp->stats.rx_bytes += pkt_len; | 868 | dev->stats.rx_bytes += pkt_len; |
883 | } | 869 | } |
884 | 870 | ||
885 | /* Clear the status word and set End-of-List on the rx frame. */ | 871 | /* Clear the status word and set End-of-List on the rx frame. */ |
diff --git a/drivers/net/7990.c b/drivers/net/7990.c index e89ace109a5d..224e0bff1ae0 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c | |||
@@ -305,18 +305,18 @@ static int lance_rx (struct net_device *dev) | |||
305 | 305 | ||
306 | /* We got an incomplete frame? */ | 306 | /* We got an incomplete frame? */ |
307 | if ((bits & LE_R1_POK) != LE_R1_POK) { | 307 | if ((bits & LE_R1_POK) != LE_R1_POK) { |
308 | lp->stats.rx_over_errors++; | 308 | dev->stats.rx_over_errors++; |
309 | lp->stats.rx_errors++; | 309 | dev->stats.rx_errors++; |
310 | continue; | 310 | continue; |
311 | } else if (bits & LE_R1_ERR) { | 311 | } else if (bits & LE_R1_ERR) { |
312 | /* Count only the end frame as a rx error, | 312 | /* Count only the end frame as a rx error, |
313 | * not the beginning | 313 | * not the beginning |
314 | */ | 314 | */ |
315 | if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; | 315 | if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; |
316 | if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; | 316 | if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; |
317 | if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; | 317 | if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; |
318 | if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; | 318 | if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; |
319 | if (bits & LE_R1_EOP) lp->stats.rx_errors++; | 319 | if (bits & LE_R1_EOP) dev->stats.rx_errors++; |
320 | } else { | 320 | } else { |
321 | len = (rd->mblength & 0xfff) - 4; | 321 | len = (rd->mblength & 0xfff) - 4; |
322 | skb = dev_alloc_skb (len+2); | 322 | skb = dev_alloc_skb (len+2); |
@@ -324,7 +324,7 @@ static int lance_rx (struct net_device *dev) | |||
324 | if (skb == 0) { | 324 | if (skb == 0) { |
325 | printk ("%s: Memory squeeze, deferring packet.\n", | 325 | printk ("%s: Memory squeeze, deferring packet.\n", |
326 | dev->name); | 326 | dev->name); |
327 | lp->stats.rx_dropped++; | 327 | dev->stats.rx_dropped++; |
328 | rd->mblength = 0; | 328 | rd->mblength = 0; |
329 | rd->rmd1_bits = LE_R1_OWN; | 329 | rd->rmd1_bits = LE_R1_OWN; |
330 | lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; | 330 | lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; |
@@ -339,8 +339,8 @@ static int lance_rx (struct net_device *dev) | |||
339 | skb->protocol = eth_type_trans (skb, dev); | 339 | skb->protocol = eth_type_trans (skb, dev); |
340 | netif_rx (skb); | 340 | netif_rx (skb); |
341 | dev->last_rx = jiffies; | 341 | dev->last_rx = jiffies; |
342 | lp->stats.rx_packets++; | 342 | dev->stats.rx_packets++; |
343 | lp->stats.rx_bytes += len; | 343 | dev->stats.rx_bytes += len; |
344 | } | 344 | } |
345 | 345 | ||
346 | /* Return the packet to the pool */ | 346 | /* Return the packet to the pool */ |
@@ -377,12 +377,12 @@ static int lance_tx (struct net_device *dev) | |||
377 | if (td->tmd1_bits & LE_T1_ERR) { | 377 | if (td->tmd1_bits & LE_T1_ERR) { |
378 | status = td->misc; | 378 | status = td->misc; |
379 | 379 | ||
380 | lp->stats.tx_errors++; | 380 | dev->stats.tx_errors++; |
381 | if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; | 381 | if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; |
382 | if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; | 382 | if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; |
383 | 383 | ||
384 | if (status & LE_T3_CLOS) { | 384 | if (status & LE_T3_CLOS) { |
385 | lp->stats.tx_carrier_errors++; | 385 | dev->stats.tx_carrier_errors++; |
386 | if (lp->auto_select) { | 386 | if (lp->auto_select) { |
387 | lp->tpe = 1 - lp->tpe; | 387 | lp->tpe = 1 - lp->tpe; |
388 | printk("%s: Carrier Lost, trying %s\n", | 388 | printk("%s: Carrier Lost, trying %s\n", |
@@ -400,7 +400,7 @@ static int lance_tx (struct net_device *dev) | |||
400 | /* buffer errors and underflows turn off the transmitter */ | 400 | /* buffer errors and underflows turn off the transmitter */ |
401 | /* Restart the adapter */ | 401 | /* Restart the adapter */ |
402 | if (status & (LE_T3_BUF|LE_T3_UFL)) { | 402 | if (status & (LE_T3_BUF|LE_T3_UFL)) { |
403 | lp->stats.tx_fifo_errors++; | 403 | dev->stats.tx_fifo_errors++; |
404 | 404 | ||
405 | printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", | 405 | printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", |
406 | dev->name); | 406 | dev->name); |
@@ -420,13 +420,13 @@ static int lance_tx (struct net_device *dev) | |||
420 | 420 | ||
421 | /* One collision before packet was sent. */ | 421 | /* One collision before packet was sent. */ |
422 | if (td->tmd1_bits & LE_T1_EONE) | 422 | if (td->tmd1_bits & LE_T1_EONE) |
423 | lp->stats.collisions++; | 423 | dev->stats.collisions++; |
424 | 424 | ||
425 | /* More than one collision, be optimistic. */ | 425 | /* More than one collision, be optimistic. */ |
426 | if (td->tmd1_bits & LE_T1_EMORE) | 426 | if (td->tmd1_bits & LE_T1_EMORE) |
427 | lp->stats.collisions += 2; | 427 | dev->stats.collisions += 2; |
428 | 428 | ||
429 | lp->stats.tx_packets++; | 429 | dev->stats.tx_packets++; |
430 | } | 430 | } |
431 | 431 | ||
432 | j = (j + 1) & lp->tx_ring_mod_mask; | 432 | j = (j + 1) & lp->tx_ring_mod_mask; |
@@ -471,9 +471,9 @@ lance_interrupt (int irq, void *dev_id) | |||
471 | 471 | ||
472 | /* Log misc errors. */ | 472 | /* Log misc errors. */ |
473 | if (csr0 & LE_C0_BABL) | 473 | if (csr0 & LE_C0_BABL) |
474 | lp->stats.tx_errors++; /* Tx babble. */ | 474 | dev->stats.tx_errors++; /* Tx babble. */ |
475 | if (csr0 & LE_C0_MISS) | 475 | if (csr0 & LE_C0_MISS) |
476 | lp->stats.rx_errors++; /* Missed a Rx frame. */ | 476 | dev->stats.rx_errors++; /* Missed a Rx frame. */ |
477 | if (csr0 & LE_C0_MERR) { | 477 | if (csr0 & LE_C0_MERR) { |
478 | printk("%s: Bus master arbitration failure, status %4.4x.\n", | 478 | printk("%s: Bus master arbitration failure, status %4.4x.\n", |
479 | dev->name, csr0); | 479 | dev->name, csr0); |
@@ -589,13 +589,6 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
589 | return 0; | 589 | return 0; |
590 | } | 590 | } |
591 | 591 | ||
592 | struct net_device_stats *lance_get_stats (struct net_device *dev) | ||
593 | { | ||
594 | struct lance_private *lp = netdev_priv(dev); | ||
595 | |||
596 | return &lp->stats; | ||
597 | } | ||
598 | |||
599 | /* taken from the depca driver via a2065.c */ | 592 | /* taken from the depca driver via a2065.c */ |
600 | static void lance_load_multicast (struct net_device *dev) | 593 | static void lance_load_multicast (struct net_device *dev) |
601 | { | 594 | { |
diff --git a/drivers/net/7990.h b/drivers/net/7990.h index b1212b5ed92f..0a5837b96421 100644 --- a/drivers/net/7990.h +++ b/drivers/net/7990.h | |||
@@ -111,7 +111,6 @@ struct lance_private | |||
111 | int lance_log_rx_bufs, lance_log_tx_bufs; | 111 | int lance_log_rx_bufs, lance_log_tx_bufs; |
112 | int rx_ring_mod_mask, tx_ring_mod_mask; | 112 | int rx_ring_mod_mask, tx_ring_mod_mask; |
113 | 113 | ||
114 | struct net_device_stats stats; | ||
115 | int tpe; /* TPE is selected */ | 114 | int tpe; /* TPE is selected */ |
116 | int auto_select; /* cable-selection is by carrier */ | 115 | int auto_select; /* cable-selection is by carrier */ |
117 | unsigned short busmaster_regval; | 116 | unsigned short busmaster_regval; |
@@ -246,7 +245,6 @@ struct lance_private | |||
246 | extern int lance_open(struct net_device *dev); | 245 | extern int lance_open(struct net_device *dev); |
247 | extern int lance_close (struct net_device *dev); | 246 | extern int lance_close (struct net_device *dev); |
248 | extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev); | 247 | extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev); |
249 | extern struct net_device_stats *lance_get_stats (struct net_device *dev); | ||
250 | extern void lance_set_multicast (struct net_device *dev); | 248 | extern void lance_set_multicast (struct net_device *dev); |
251 | extern void lance_tx_timeout(struct net_device *dev); | 249 | extern void lance_tx_timeout(struct net_device *dev); |
252 | #ifdef CONFIG_NET_POLL_CONTROLLER | 250 | #ifdef CONFIG_NET_POLL_CONTROLLER |
diff --git a/drivers/net/82596.c b/drivers/net/82596.c index 43dffdca708f..6b03416731de 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c | |||
@@ -326,7 +326,6 @@ struct i596_private { | |||
326 | struct i596_cmd *cmd_head; | 326 | struct i596_cmd *cmd_head; |
327 | int cmd_backlog; | 327 | int cmd_backlog; |
328 | unsigned long last_cmd; | 328 | unsigned long last_cmd; |
329 | struct net_device_stats stats; | ||
330 | struct i596_rfd rfds[RX_RING_SIZE]; | 329 | struct i596_rfd rfds[RX_RING_SIZE]; |
331 | struct i596_rbd rbds[RX_RING_SIZE]; | 330 | struct i596_rbd rbds[RX_RING_SIZE]; |
332 | struct tx_cmd tx_cmds[TX_RING_SIZE]; | 331 | struct tx_cmd tx_cmds[TX_RING_SIZE]; |
@@ -360,7 +359,6 @@ static int i596_open(struct net_device *dev); | |||
360 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); | 359 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); |
361 | static irqreturn_t i596_interrupt(int irq, void *dev_id); | 360 | static irqreturn_t i596_interrupt(int irq, void *dev_id); |
362 | static int i596_close(struct net_device *dev); | 361 | static int i596_close(struct net_device *dev); |
363 | static struct net_device_stats *i596_get_stats(struct net_device *dev); | ||
364 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); | 362 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); |
365 | static void i596_tx_timeout (struct net_device *dev); | 363 | static void i596_tx_timeout (struct net_device *dev); |
366 | static void print_eth(unsigned char *buf, char *str); | 364 | static void print_eth(unsigned char *buf, char *str); |
@@ -828,7 +826,7 @@ memory_squeeze: | |||
828 | if (skb == NULL) { | 826 | if (skb == NULL) { |
829 | /* XXX tulip.c can defer packets here!! */ | 827 | /* XXX tulip.c can defer packets here!! */ |
830 | printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); | 828 | printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); |
831 | lp->stats.rx_dropped++; | 829 | dev->stats.rx_dropped++; |
832 | } | 830 | } |
833 | else { | 831 | else { |
834 | if (!rx_in_place) { | 832 | if (!rx_in_place) { |
@@ -844,28 +842,28 @@ memory_squeeze: | |||
844 | #endif | 842 | #endif |
845 | netif_rx(skb); | 843 | netif_rx(skb); |
846 | dev->last_rx = jiffies; | 844 | dev->last_rx = jiffies; |
847 | lp->stats.rx_packets++; | 845 | dev->stats.rx_packets++; |
848 | lp->stats.rx_bytes+=pkt_len; | 846 | dev->stats.rx_bytes+=pkt_len; |
849 | } | 847 | } |
850 | } | 848 | } |
851 | else { | 849 | else { |
852 | DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", | 850 | DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", |
853 | dev->name, rfd->stat)); | 851 | dev->name, rfd->stat)); |
854 | lp->stats.rx_errors++; | 852 | dev->stats.rx_errors++; |
855 | if ((rfd->stat) & 0x0001) | 853 | if ((rfd->stat) & 0x0001) |
856 | lp->stats.collisions++; | 854 | dev->stats.collisions++; |
857 | if ((rfd->stat) & 0x0080) | 855 | if ((rfd->stat) & 0x0080) |
858 | lp->stats.rx_length_errors++; | 856 | dev->stats.rx_length_errors++; |
859 | if ((rfd->stat) & 0x0100) | 857 | if ((rfd->stat) & 0x0100) |
860 | lp->stats.rx_over_errors++; | 858 | dev->stats.rx_over_errors++; |
861 | if ((rfd->stat) & 0x0200) | 859 | if ((rfd->stat) & 0x0200) |
862 | lp->stats.rx_fifo_errors++; | 860 | dev->stats.rx_fifo_errors++; |
863 | if ((rfd->stat) & 0x0400) | 861 | if ((rfd->stat) & 0x0400) |
864 | lp->stats.rx_frame_errors++; | 862 | dev->stats.rx_frame_errors++; |
865 | if ((rfd->stat) & 0x0800) | 863 | if ((rfd->stat) & 0x0800) |
866 | lp->stats.rx_crc_errors++; | 864 | dev->stats.rx_crc_errors++; |
867 | if ((rfd->stat) & 0x1000) | 865 | if ((rfd->stat) & 0x1000) |
868 | lp->stats.rx_length_errors++; | 866 | dev->stats.rx_length_errors++; |
869 | } | 867 | } |
870 | 868 | ||
871 | /* Clear the buffer descriptor count and EOF + F flags */ | 869 | /* Clear the buffer descriptor count and EOF + F flags */ |
@@ -916,8 +914,8 @@ static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) | |||
916 | 914 | ||
917 | dev_kfree_skb(skb); | 915 | dev_kfree_skb(skb); |
918 | 916 | ||
919 | lp->stats.tx_errors++; | 917 | dev->stats.tx_errors++; |
920 | lp->stats.tx_aborted_errors++; | 918 | dev->stats.tx_aborted_errors++; |
921 | 919 | ||
922 | ptr->v_next = ptr->b_next = I596_NULL; | 920 | ptr->v_next = ptr->b_next = I596_NULL; |
923 | tx_cmd->cmd.command = 0; /* Mark as free */ | 921 | tx_cmd->cmd.command = 0; /* Mark as free */ |
@@ -1038,10 +1036,10 @@ static void i596_tx_timeout (struct net_device *dev) | |||
1038 | DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n", | 1036 | DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n", |
1039 | dev->name)); | 1037 | dev->name)); |
1040 | 1038 | ||
1041 | lp->stats.tx_errors++; | 1039 | dev->stats.tx_errors++; |
1042 | 1040 | ||
1043 | /* Try to restart the adaptor */ | 1041 | /* Try to restart the adaptor */ |
1044 | if (lp->last_restart == lp->stats.tx_packets) { | 1042 | if (lp->last_restart == dev->stats.tx_packets) { |
1045 | DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n")); | 1043 | DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n")); |
1046 | /* Shutdown and restart */ | 1044 | /* Shutdown and restart */ |
1047 | i596_reset (dev, lp, ioaddr); | 1045 | i596_reset (dev, lp, ioaddr); |
@@ -1050,7 +1048,7 @@ static void i596_tx_timeout (struct net_device *dev) | |||
1050 | DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n")); | 1048 | DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n")); |
1051 | lp->scb.command = CUC_START | RX_START; | 1049 | lp->scb.command = CUC_START | RX_START; |
1052 | CA (dev); | 1050 | CA (dev); |
1053 | lp->last_restart = lp->stats.tx_packets; | 1051 | lp->last_restart = dev->stats.tx_packets; |
1054 | } | 1052 | } |
1055 | 1053 | ||
1056 | dev->trans_start = jiffies; | 1054 | dev->trans_start = jiffies; |
@@ -1082,7 +1080,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1082 | if (tx_cmd->cmd.command) { | 1080 | if (tx_cmd->cmd.command) { |
1083 | printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n", | 1081 | printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n", |
1084 | dev->name); | 1082 | dev->name); |
1085 | lp->stats.tx_dropped++; | 1083 | dev->stats.tx_dropped++; |
1086 | 1084 | ||
1087 | dev_kfree_skb(skb); | 1085 | dev_kfree_skb(skb); |
1088 | } else { | 1086 | } else { |
@@ -1107,8 +1105,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1107 | DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); | 1105 | DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); |
1108 | i596_add_cmd(dev, &tx_cmd->cmd); | 1106 | i596_add_cmd(dev, &tx_cmd->cmd); |
1109 | 1107 | ||
1110 | lp->stats.tx_packets++; | 1108 | dev->stats.tx_packets++; |
1111 | lp->stats.tx_bytes += length; | 1109 | dev->stats.tx_bytes += length; |
1112 | } | 1110 | } |
1113 | 1111 | ||
1114 | netif_start_queue(dev); | 1112 | netif_start_queue(dev); |
@@ -1237,7 +1235,6 @@ struct net_device * __init i82596_probe(int unit) | |||
1237 | dev->open = i596_open; | 1235 | dev->open = i596_open; |
1238 | dev->stop = i596_close; | 1236 | dev->stop = i596_close; |
1239 | dev->hard_start_xmit = i596_start_xmit; | 1237 | dev->hard_start_xmit = i596_start_xmit; |
1240 | dev->get_stats = i596_get_stats; | ||
1241 | dev->set_multicast_list = set_multicast_list; | 1238 | dev->set_multicast_list = set_multicast_list; |
1242 | dev->tx_timeout = i596_tx_timeout; | 1239 | dev->tx_timeout = i596_tx_timeout; |
1243 | dev->watchdog_timeo = TX_TIMEOUT; | 1240 | dev->watchdog_timeo = TX_TIMEOUT; |
@@ -1343,17 +1340,17 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) | |||
1343 | if ((ptr->status) & STAT_OK) { | 1340 | if ((ptr->status) & STAT_OK) { |
1344 | DEB(DEB_TXADDR,print_eth(skb->data, "tx-done")); | 1341 | DEB(DEB_TXADDR,print_eth(skb->data, "tx-done")); |
1345 | } else { | 1342 | } else { |
1346 | lp->stats.tx_errors++; | 1343 | dev->stats.tx_errors++; |
1347 | if ((ptr->status) & 0x0020) | 1344 | if ((ptr->status) & 0x0020) |
1348 | lp->stats.collisions++; | 1345 | dev->stats.collisions++; |
1349 | if (!((ptr->status) & 0x0040)) | 1346 | if (!((ptr->status) & 0x0040)) |
1350 | lp->stats.tx_heartbeat_errors++; | 1347 | dev->stats.tx_heartbeat_errors++; |
1351 | if ((ptr->status) & 0x0400) | 1348 | if ((ptr->status) & 0x0400) |
1352 | lp->stats.tx_carrier_errors++; | 1349 | dev->stats.tx_carrier_errors++; |
1353 | if ((ptr->status) & 0x0800) | 1350 | if ((ptr->status) & 0x0800) |
1354 | lp->stats.collisions++; | 1351 | dev->stats.collisions++; |
1355 | if ((ptr->status) & 0x1000) | 1352 | if ((ptr->status) & 0x1000) |
1356 | lp->stats.tx_aborted_errors++; | 1353 | dev->stats.tx_aborted_errors++; |
1357 | } | 1354 | } |
1358 | 1355 | ||
1359 | dev_kfree_skb_irq(skb); | 1356 | dev_kfree_skb_irq(skb); |
@@ -1408,8 +1405,8 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) | |||
1408 | if (netif_running(dev)) { | 1405 | if (netif_running(dev)) { |
1409 | DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status)); | 1406 | DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status)); |
1410 | ack_cmd |= RX_START; | 1407 | ack_cmd |= RX_START; |
1411 | lp->stats.rx_errors++; | 1408 | dev->stats.rx_errors++; |
1412 | lp->stats.rx_fifo_errors++; | 1409 | dev->stats.rx_fifo_errors++; |
1413 | rebuild_rx_bufs(dev); | 1410 | rebuild_rx_bufs(dev); |
1414 | } | 1411 | } |
1415 | } | 1412 | } |
@@ -1492,14 +1489,6 @@ static int i596_close(struct net_device *dev) | |||
1492 | return 0; | 1489 | return 0; |
1493 | } | 1490 | } |
1494 | 1491 | ||
1495 | static struct net_device_stats * | ||
1496 | i596_get_stats(struct net_device *dev) | ||
1497 | { | ||
1498 | struct i596_private *lp = dev->priv; | ||
1499 | |||
1500 | return &lp->stats; | ||
1501 | } | ||
1502 | |||
1503 | /* | 1492 | /* |
1504 | * Set or clear the multicast filter for this adaptor. | 1493 | * Set or clear the multicast filter for this adaptor. |
1505 | */ | 1494 | */ |
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index fa0c6cb3d798..77773ce52eff 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c | |||
@@ -119,7 +119,6 @@ struct lance_private { | |||
119 | int lance_log_rx_bufs, lance_log_tx_bufs; | 119 | int lance_log_rx_bufs, lance_log_tx_bufs; |
120 | int rx_ring_mod_mask, tx_ring_mod_mask; | 120 | int rx_ring_mod_mask, tx_ring_mod_mask; |
121 | 121 | ||
122 | struct net_device_stats stats; | ||
123 | int tpe; /* cable-selection is TPE */ | 122 | int tpe; /* cable-selection is TPE */ |
124 | int auto_select; /* cable-selection by carrier */ | 123 | int auto_select; /* cable-selection by carrier */ |
125 | unsigned short busmaster_regval; | 124 | unsigned short busmaster_regval; |
@@ -294,18 +293,18 @@ static int lance_rx (struct net_device *dev) | |||
294 | 293 | ||
295 | /* We got an incomplete frame? */ | 294 | /* We got an incomplete frame? */ |
296 | if ((bits & LE_R1_POK) != LE_R1_POK) { | 295 | if ((bits & LE_R1_POK) != LE_R1_POK) { |
297 | lp->stats.rx_over_errors++; | 296 | dev->stats.rx_over_errors++; |
298 | lp->stats.rx_errors++; | 297 | dev->stats.rx_errors++; |
299 | continue; | 298 | continue; |
300 | } else if (bits & LE_R1_ERR) { | 299 | } else if (bits & LE_R1_ERR) { |
301 | /* Count only the end frame as a rx error, | 300 | /* Count only the end frame as a rx error, |
302 | * not the beginning | 301 | * not the beginning |
303 | */ | 302 | */ |
304 | if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; | 303 | if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; |
305 | if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; | 304 | if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; |
306 | if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; | 305 | if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; |
307 | if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; | 306 | if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; |
308 | if (bits & LE_R1_EOP) lp->stats.rx_errors++; | 307 | if (bits & LE_R1_EOP) dev->stats.rx_errors++; |
309 | } else { | 308 | } else { |
310 | len = (rd->mblength & 0xfff) - 4; | 309 | len = (rd->mblength & 0xfff) - 4; |
311 | skb = dev_alloc_skb (len+2); | 310 | skb = dev_alloc_skb (len+2); |
@@ -313,7 +312,7 @@ static int lance_rx (struct net_device *dev) | |||
313 | if (skb == 0) { | 312 | if (skb == 0) { |
314 | printk(KERN_WARNING "%s: Memory squeeze, " | 313 | printk(KERN_WARNING "%s: Memory squeeze, " |
315 | "deferring packet.\n", dev->name); | 314 | "deferring packet.\n", dev->name); |
316 | lp->stats.rx_dropped++; | 315 | dev->stats.rx_dropped++; |
317 | rd->mblength = 0; | 316 | rd->mblength = 0; |
318 | rd->rmd1_bits = LE_R1_OWN; | 317 | rd->rmd1_bits = LE_R1_OWN; |
319 | lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; | 318 | lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; |
@@ -328,8 +327,8 @@ static int lance_rx (struct net_device *dev) | |||
328 | skb->protocol = eth_type_trans (skb, dev); | 327 | skb->protocol = eth_type_trans (skb, dev); |
329 | netif_rx (skb); | 328 | netif_rx (skb); |
330 | dev->last_rx = jiffies; | 329 | dev->last_rx = jiffies; |
331 | lp->stats.rx_packets++; | 330 | dev->stats.rx_packets++; |
332 | lp->stats.rx_bytes += len; | 331 | dev->stats.rx_bytes += len; |
333 | } | 332 | } |
334 | 333 | ||
335 | /* Return the packet to the pool */ | 334 | /* Return the packet to the pool */ |
@@ -364,12 +363,12 @@ static int lance_tx (struct net_device *dev) | |||
364 | if (td->tmd1_bits & LE_T1_ERR) { | 363 | if (td->tmd1_bits & LE_T1_ERR) { |
365 | status = td->misc; | 364 | status = td->misc; |
366 | 365 | ||
367 | lp->stats.tx_errors++; | 366 | dev->stats.tx_errors++; |
368 | if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; | 367 | if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; |
369 | if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; | 368 | if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; |
370 | 369 | ||
371 | if (status & LE_T3_CLOS) { | 370 | if (status & LE_T3_CLOS) { |
372 | lp->stats.tx_carrier_errors++; | 371 | dev->stats.tx_carrier_errors++; |
373 | if (lp->auto_select) { | 372 | if (lp->auto_select) { |
374 | lp->tpe = 1 - lp->tpe; | 373 | lp->tpe = 1 - lp->tpe; |
375 | printk(KERN_ERR "%s: Carrier Lost, " | 374 | printk(KERN_ERR "%s: Carrier Lost, " |
@@ -388,7 +387,7 @@ static int lance_tx (struct net_device *dev) | |||
388 | /* buffer errors and underflows turn off the transmitter */ | 387 | /* buffer errors and underflows turn off the transmitter */ |
389 | /* Restart the adapter */ | 388 | /* Restart the adapter */ |
390 | if (status & (LE_T3_BUF|LE_T3_UFL)) { | 389 | if (status & (LE_T3_BUF|LE_T3_UFL)) { |
391 | lp->stats.tx_fifo_errors++; | 390 | dev->stats.tx_fifo_errors++; |
392 | 391 | ||
393 | printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, " | 392 | printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, " |
394 | "restarting\n", dev->name); | 393 | "restarting\n", dev->name); |
@@ -408,13 +407,13 @@ static int lance_tx (struct net_device *dev) | |||
408 | 407 | ||
409 | /* One collision before packet was sent. */ | 408 | /* One collision before packet was sent. */ |
410 | if (td->tmd1_bits & LE_T1_EONE) | 409 | if (td->tmd1_bits & LE_T1_EONE) |
411 | lp->stats.collisions++; | 410 | dev->stats.collisions++; |
412 | 411 | ||
413 | /* More than one collision, be optimistic. */ | 412 | /* More than one collision, be optimistic. */ |
414 | if (td->tmd1_bits & LE_T1_EMORE) | 413 | if (td->tmd1_bits & LE_T1_EMORE) |
415 | lp->stats.collisions += 2; | 414 | dev->stats.collisions += 2; |
416 | 415 | ||
417 | lp->stats.tx_packets++; | 416 | dev->stats.tx_packets++; |
418 | } | 417 | } |
419 | 418 | ||
420 | j = (j + 1) & lp->tx_ring_mod_mask; | 419 | j = (j + 1) & lp->tx_ring_mod_mask; |
@@ -459,9 +458,9 @@ static irqreturn_t lance_interrupt (int irq, void *dev_id) | |||
459 | 458 | ||
460 | /* Log misc errors. */ | 459 | /* Log misc errors. */ |
461 | if (csr0 & LE_C0_BABL) | 460 | if (csr0 & LE_C0_BABL) |
462 | lp->stats.tx_errors++; /* Tx babble. */ | 461 | dev->stats.tx_errors++; /* Tx babble. */ |
463 | if (csr0 & LE_C0_MISS) | 462 | if (csr0 & LE_C0_MISS) |
464 | lp->stats.rx_errors++; /* Missed a Rx frame. */ | 463 | dev->stats.rx_errors++; /* Missed a Rx frame. */ |
465 | if (csr0 & LE_C0_MERR) { | 464 | if (csr0 & LE_C0_MERR) { |
466 | printk(KERN_ERR "%s: Bus master arbitration failure, status " | 465 | printk(KERN_ERR "%s: Bus master arbitration failure, status " |
467 | "%4.4x.\n", dev->name, csr0); | 466 | "%4.4x.\n", dev->name, csr0); |
@@ -606,7 +605,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
606 | /* Now, give the packet to the lance */ | 605 | /* Now, give the packet to the lance */ |
607 | ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); | 606 | ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); |
608 | lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; | 607 | lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; |
609 | lp->stats.tx_bytes += skblen; | 608 | dev->stats.tx_bytes += skblen; |
610 | 609 | ||
611 | if (TX_BUFFS_AVAIL <= 0) | 610 | if (TX_BUFFS_AVAIL <= 0) |
612 | netif_stop_queue(dev); | 611 | netif_stop_queue(dev); |
@@ -621,13 +620,6 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
621 | return status; | 620 | return status; |
622 | } | 621 | } |
623 | 622 | ||
624 | static struct net_device_stats *lance_get_stats (struct net_device *dev) | ||
625 | { | ||
626 | struct lance_private *lp = netdev_priv(dev); | ||
627 | |||
628 | return &lp->stats; | ||
629 | } | ||
630 | |||
631 | /* taken from the depca driver */ | 623 | /* taken from the depca driver */ |
632 | static void lance_load_multicast (struct net_device *dev) | 624 | static void lance_load_multicast (struct net_device *dev) |
633 | { | 625 | { |
@@ -782,7 +774,6 @@ static int __devinit a2065_init_one(struct zorro_dev *z, | |||
782 | dev->hard_start_xmit = &lance_start_xmit; | 774 | dev->hard_start_xmit = &lance_start_xmit; |
783 | dev->tx_timeout = &lance_tx_timeout; | 775 | dev->tx_timeout = &lance_tx_timeout; |
784 | dev->watchdog_timeo = 5*HZ; | 776 | dev->watchdog_timeo = 5*HZ; |
785 | dev->get_stats = &lance_get_stats; | ||
786 | dev->set_multicast_list = &lance_set_multicast; | 777 | dev->set_multicast_list = &lance_set_multicast; |
787 | dev->dma = 0; | 778 | dev->dma = 0; |
788 | 779 | ||
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index d20148e69fa1..a124fdb2bce6 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c | |||
@@ -109,7 +109,6 @@ typedef unsigned char uchar; | |||
109 | 109 | ||
110 | /* Information that need to be kept for each board. */ | 110 | /* Information that need to be kept for each board. */ |
111 | struct net_local { | 111 | struct net_local { |
112 | struct net_device_stats stats; | ||
113 | spinlock_t lock; | 112 | spinlock_t lock; |
114 | unsigned char mc_filter[8]; | 113 | unsigned char mc_filter[8]; |
115 | uint jumpered:1; /* Set iff the board has jumper config. */ | 114 | uint jumpered:1; /* Set iff the board has jumper config. */ |
@@ -164,7 +163,6 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev); | |||
164 | static irqreturn_t net_interrupt(int irq, void *dev_id); | 163 | static irqreturn_t net_interrupt(int irq, void *dev_id); |
165 | static void net_rx(struct net_device *dev); | 164 | static void net_rx(struct net_device *dev); |
166 | static int net_close(struct net_device *dev); | 165 | static int net_close(struct net_device *dev); |
167 | static struct net_device_stats *net_get_stats(struct net_device *dev); | ||
168 | static void set_rx_mode(struct net_device *dev); | 166 | static void set_rx_mode(struct net_device *dev); |
169 | static void net_tx_timeout (struct net_device *dev); | 167 | static void net_tx_timeout (struct net_device *dev); |
170 | 168 | ||
@@ -456,7 +454,6 @@ found: | |||
456 | dev->open = net_open; | 454 | dev->open = net_open; |
457 | dev->stop = net_close; | 455 | dev->stop = net_close; |
458 | dev->hard_start_xmit = net_send_packet; | 456 | dev->hard_start_xmit = net_send_packet; |
459 | dev->get_stats = net_get_stats; | ||
460 | dev->set_multicast_list = &set_rx_mode; | 457 | dev->set_multicast_list = &set_rx_mode; |
461 | dev->tx_timeout = net_tx_timeout; | 458 | dev->tx_timeout = net_tx_timeout; |
462 | dev->watchdog_timeo = TX_TIMEOUT; | 459 | dev->watchdog_timeo = TX_TIMEOUT; |
@@ -571,7 +568,7 @@ static void net_tx_timeout (struct net_device *dev) | |||
571 | dev->name, inw(ioaddr + TX_STATUS), inw(ioaddr + TX_INTR), inw(ioaddr + TX_MODE), | 568 | dev->name, inw(ioaddr + TX_STATUS), inw(ioaddr + TX_INTR), inw(ioaddr + TX_MODE), |
572 | inw(ioaddr + CONFIG_0), inw(ioaddr + DATAPORT), inw(ioaddr + TX_START), | 569 | inw(ioaddr + CONFIG_0), inw(ioaddr + DATAPORT), inw(ioaddr + TX_START), |
573 | inw(ioaddr + MODE13 - 1), inw(ioaddr + RX_CTRL)); | 570 | inw(ioaddr + MODE13 - 1), inw(ioaddr + RX_CTRL)); |
574 | lp->stats.tx_errors++; | 571 | dev->stats.tx_errors++; |
575 | /* ToDo: We should try to restart the adaptor... */ | 572 | /* ToDo: We should try to restart the adaptor... */ |
576 | outw(0xffff, ioaddr + MODE24); | 573 | outw(0xffff, ioaddr + MODE24); |
577 | outw (0xffff, ioaddr + TX_STATUS); | 574 | outw (0xffff, ioaddr + TX_STATUS); |
@@ -691,10 +688,10 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) | |||
691 | printk("%s: 16 Collision occur during Txing.\n", dev->name); | 688 | printk("%s: 16 Collision occur during Txing.\n", dev->name); |
692 | /* Cancel sending a packet. */ | 689 | /* Cancel sending a packet. */ |
693 | outb(0x03, ioaddr + COL16CNTL); | 690 | outb(0x03, ioaddr + COL16CNTL); |
694 | lp->stats.collisions++; | 691 | dev->stats.collisions++; |
695 | } | 692 | } |
696 | if (status & 0x82) { | 693 | if (status & 0x82) { |
697 | lp->stats.tx_packets++; | 694 | dev->stats.tx_packets++; |
698 | /* The Tx queue has any packets and is not being | 695 | /* The Tx queue has any packets and is not being |
699 | transferred a packet from the host, start | 696 | transferred a packet from the host, start |
700 | transmitting. */ | 697 | transmitting. */ |
@@ -719,7 +716,6 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) | |||
719 | static void | 716 | static void |
720 | net_rx(struct net_device *dev) | 717 | net_rx(struct net_device *dev) |
721 | { | 718 | { |
722 | struct net_local *lp = netdev_priv(dev); | ||
723 | int ioaddr = dev->base_addr; | 719 | int ioaddr = dev->base_addr; |
724 | int boguscount = 5; | 720 | int boguscount = 5; |
725 | 721 | ||
@@ -738,11 +734,11 @@ net_rx(struct net_device *dev) | |||
738 | #endif | 734 | #endif |
739 | 735 | ||
740 | if ((status & 0xF0) != 0x20) { /* There was an error. */ | 736 | if ((status & 0xF0) != 0x20) { /* There was an error. */ |
741 | lp->stats.rx_errors++; | 737 | dev->stats.rx_errors++; |
742 | if (status & 0x08) lp->stats.rx_length_errors++; | 738 | if (status & 0x08) dev->stats.rx_length_errors++; |
743 | if (status & 0x04) lp->stats.rx_frame_errors++; | 739 | if (status & 0x04) dev->stats.rx_frame_errors++; |
744 | if (status & 0x02) lp->stats.rx_crc_errors++; | 740 | if (status & 0x02) dev->stats.rx_crc_errors++; |
745 | if (status & 0x01) lp->stats.rx_over_errors++; | 741 | if (status & 0x01) dev->stats.rx_over_errors++; |
746 | } else { | 742 | } else { |
747 | /* Malloc up new buffer. */ | 743 | /* Malloc up new buffer. */ |
748 | struct sk_buff *skb; | 744 | struct sk_buff *skb; |
@@ -753,7 +749,7 @@ net_rx(struct net_device *dev) | |||
753 | /* Prime the FIFO and then flush the packet. */ | 749 | /* Prime the FIFO and then flush the packet. */ |
754 | inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); | 750 | inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); |
755 | outb(0x05, ioaddr + RX_CTRL); | 751 | outb(0x05, ioaddr + RX_CTRL); |
756 | lp->stats.rx_errors++; | 752 | dev->stats.rx_errors++; |
757 | break; | 753 | break; |
758 | } | 754 | } |
759 | skb = dev_alloc_skb(pkt_len+3); | 755 | skb = dev_alloc_skb(pkt_len+3); |
@@ -763,7 +759,7 @@ net_rx(struct net_device *dev) | |||
763 | /* Prime the FIFO and then flush the packet. */ | 759 | /* Prime the FIFO and then flush the packet. */ |
764 | inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); | 760 | inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); |
765 | outb(0x05, ioaddr + RX_CTRL); | 761 | outb(0x05, ioaddr + RX_CTRL); |
766 | lp->stats.rx_dropped++; | 762 | dev->stats.rx_dropped++; |
767 | break; | 763 | break; |
768 | } | 764 | } |
769 | skb_reserve(skb,2); | 765 | skb_reserve(skb,2); |
@@ -772,8 +768,8 @@ net_rx(struct net_device *dev) | |||
772 | skb->protocol=eth_type_trans(skb, dev); | 768 | skb->protocol=eth_type_trans(skb, dev); |
773 | netif_rx(skb); | 769 | netif_rx(skb); |
774 | dev->last_rx = jiffies; | 770 | dev->last_rx = jiffies; |
775 | lp->stats.rx_packets++; | 771 | dev->stats.rx_packets++; |
776 | lp->stats.rx_bytes += pkt_len; | 772 | dev->stats.rx_bytes += pkt_len; |
777 | } | 773 | } |
778 | if (--boguscount <= 0) | 774 | if (--boguscount <= 0) |
779 | break; | 775 | break; |
@@ -822,17 +818,6 @@ static int net_close(struct net_device *dev) | |||
822 | return 0; | 818 | return 0; |
823 | } | 819 | } |
824 | 820 | ||
825 | /* Get the current statistics. | ||
826 | This may be called with the card open or closed. | ||
827 | There are no on-chip counters, so this function is trivial. | ||
828 | */ | ||
829 | static struct net_device_stats * | ||
830 | net_get_stats(struct net_device *dev) | ||
831 | { | ||
832 | struct net_local *lp = netdev_priv(dev); | ||
833 | return &lp->stats; | ||
834 | } | ||
835 | |||
836 | /* | 821 | /* |
837 | Set the multicast/promiscuous mode for this adaptor. | 822 | Set the multicast/promiscuous mode for this adaptor. |
838 | */ | 823 | */ |
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index 17b9dbf7bd68..8bf548e1cb4e 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c | |||
@@ -224,7 +224,6 @@ struct lance_private { | |||
224 | int dirty_tx; /* Ring entries to be freed. */ | 224 | int dirty_tx; /* Ring entries to be freed. */ |
225 | /* copy function */ | 225 | /* copy function */ |
226 | void *(*memcpy_f)( void *, const void *, size_t ); | 226 | void *(*memcpy_f)( void *, const void *, size_t ); |
227 | struct net_device_stats stats; | ||
228 | /* This must be long for set_bit() */ | 227 | /* This must be long for set_bit() */ |
229 | long tx_full; | 228 | long tx_full; |
230 | spinlock_t devlock; | 229 | spinlock_t devlock; |
@@ -347,7 +346,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); | |||
347 | static irqreturn_t lance_interrupt( int irq, void *dev_id ); | 346 | static irqreturn_t lance_interrupt( int irq, void *dev_id ); |
348 | static int lance_rx( struct net_device *dev ); | 347 | static int lance_rx( struct net_device *dev ); |
349 | static int lance_close( struct net_device *dev ); | 348 | static int lance_close( struct net_device *dev ); |
350 | static struct net_device_stats *lance_get_stats( struct net_device *dev ); | ||
351 | static void set_multicast_list( struct net_device *dev ); | 349 | static void set_multicast_list( struct net_device *dev ); |
352 | static int lance_set_mac_address( struct net_device *dev, void *addr ); | 350 | static int lance_set_mac_address( struct net_device *dev, void *addr ); |
353 | static void lance_tx_timeout (struct net_device *dev); | 351 | static void lance_tx_timeout (struct net_device *dev); |
@@ -631,7 +629,6 @@ static unsigned long __init lance_probe1( struct net_device *dev, | |||
631 | dev->open = &lance_open; | 629 | dev->open = &lance_open; |
632 | dev->hard_start_xmit = &lance_start_xmit; | 630 | dev->hard_start_xmit = &lance_start_xmit; |
633 | dev->stop = &lance_close; | 631 | dev->stop = &lance_close; |
634 | dev->get_stats = &lance_get_stats; | ||
635 | dev->set_multicast_list = &set_multicast_list; | 632 | dev->set_multicast_list = &set_multicast_list; |
636 | dev->set_mac_address = &lance_set_mac_address; | 633 | dev->set_mac_address = &lance_set_mac_address; |
637 | 634 | ||
@@ -639,13 +636,6 @@ static unsigned long __init lance_probe1( struct net_device *dev, | |||
639 | dev->tx_timeout = lance_tx_timeout; | 636 | dev->tx_timeout = lance_tx_timeout; |
640 | dev->watchdog_timeo = TX_TIMEOUT; | 637 | dev->watchdog_timeo = TX_TIMEOUT; |
641 | 638 | ||
642 | |||
643 | #if 0 | ||
644 | dev->start = 0; | ||
645 | #endif | ||
646 | |||
647 | memset( &lp->stats, 0, sizeof(lp->stats) ); | ||
648 | |||
649 | return( 1 ); | 639 | return( 1 ); |
650 | } | 640 | } |
651 | 641 | ||
@@ -753,7 +743,7 @@ static void lance_tx_timeout (struct net_device *dev) | |||
753 | * little endian mode. | 743 | * little endian mode. |
754 | */ | 744 | */ |
755 | REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); | 745 | REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); |
756 | lp->stats.tx_errors++; | 746 | dev->stats.tx_errors++; |
757 | #ifndef final_version | 747 | #ifndef final_version |
758 | { int i; | 748 | { int i; |
759 | DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n", | 749 | DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n", |
@@ -841,7 +831,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) | |||
841 | head->misc = 0; | 831 | head->misc = 0; |
842 | lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); | 832 | lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); |
843 | head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; | 833 | head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; |
844 | lp->stats.tx_bytes += skb->len; | 834 | dev->stats.tx_bytes += skb->len; |
845 | dev_kfree_skb( skb ); | 835 | dev_kfree_skb( skb ); |
846 | lp->cur_tx++; | 836 | lp->cur_tx++; |
847 | while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { | 837 | while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { |
@@ -912,13 +902,13 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id ) | |||
912 | if (status & TMD1_ERR) { | 902 | if (status & TMD1_ERR) { |
913 | /* There was an major error, log it. */ | 903 | /* There was an major error, log it. */ |
914 | int err_status = MEM->tx_head[entry].misc; | 904 | int err_status = MEM->tx_head[entry].misc; |
915 | lp->stats.tx_errors++; | 905 | dev->stats.tx_errors++; |
916 | if (err_status & TMD3_RTRY) lp->stats.tx_aborted_errors++; | 906 | if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++; |
917 | if (err_status & TMD3_LCAR) lp->stats.tx_carrier_errors++; | 907 | if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++; |
918 | if (err_status & TMD3_LCOL) lp->stats.tx_window_errors++; | 908 | if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++; |
919 | if (err_status & TMD3_UFLO) { | 909 | if (err_status & TMD3_UFLO) { |
920 | /* Ackk! On FIFO errors the Tx unit is turned off! */ | 910 | /* Ackk! On FIFO errors the Tx unit is turned off! */ |
921 | lp->stats.tx_fifo_errors++; | 911 | dev->stats.tx_fifo_errors++; |
922 | /* Remove this verbosity later! */ | 912 | /* Remove this verbosity later! */ |
923 | DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n", | 913 | DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n", |
924 | dev->name, csr0 )); | 914 | dev->name, csr0 )); |
@@ -927,8 +917,8 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id ) | |||
927 | } | 917 | } |
928 | } else { | 918 | } else { |
929 | if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF)) | 919 | if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF)) |
930 | lp->stats.collisions++; | 920 | dev->stats.collisions++; |
931 | lp->stats.tx_packets++; | 921 | dev->stats.tx_packets++; |
932 | } | 922 | } |
933 | 923 | ||
934 | /* XXX MSch: free skb?? */ | 924 | /* XXX MSch: free skb?? */ |
@@ -955,8 +945,8 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id ) | |||
955 | } | 945 | } |
956 | 946 | ||
957 | /* Log misc errors. */ | 947 | /* Log misc errors. */ |
958 | if (csr0 & CSR0_BABL) lp->stats.tx_errors++; /* Tx babble. */ | 948 | if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ |
959 | if (csr0 & CSR0_MISS) lp->stats.rx_errors++; /* Missed a Rx frame. */ | 949 | if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ |
960 | if (csr0 & CSR0_MERR) { | 950 | if (csr0 & CSR0_MERR) { |
961 | DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " | 951 | DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " |
962 | "status %04x.\n", dev->name, csr0 )); | 952 | "status %04x.\n", dev->name, csr0 )); |
@@ -997,11 +987,11 @@ static int lance_rx( struct net_device *dev ) | |||
997 | buffers it's possible for a jabber packet to use two | 987 | buffers it's possible for a jabber packet to use two |
998 | buffers, with only the last correctly noting the error. */ | 988 | buffers, with only the last correctly noting the error. */ |
999 | if (status & RMD1_ENP) /* Only count a general error at the */ | 989 | if (status & RMD1_ENP) /* Only count a general error at the */ |
1000 | lp->stats.rx_errors++; /* end of a packet.*/ | 990 | dev->stats.rx_errors++; /* end of a packet.*/ |
1001 | if (status & RMD1_FRAM) lp->stats.rx_frame_errors++; | 991 | if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; |
1002 | if (status & RMD1_OFLO) lp->stats.rx_over_errors++; | 992 | if (status & RMD1_OFLO) dev->stats.rx_over_errors++; |
1003 | if (status & RMD1_CRC) lp->stats.rx_crc_errors++; | 993 | if (status & RMD1_CRC) dev->stats.rx_crc_errors++; |
1004 | if (status & RMD1_BUFF) lp->stats.rx_fifo_errors++; | 994 | if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; |
1005 | head->flag &= (RMD1_ENP|RMD1_STP); | 995 | head->flag &= (RMD1_ENP|RMD1_STP); |
1006 | } else { | 996 | } else { |
1007 | /* Malloc up new buffer, compatible with net-3. */ | 997 | /* Malloc up new buffer, compatible with net-3. */ |
@@ -1010,7 +1000,7 @@ static int lance_rx( struct net_device *dev ) | |||
1010 | 1000 | ||
1011 | if (pkt_len < 60) { | 1001 | if (pkt_len < 60) { |
1012 | printk( "%s: Runt packet!\n", dev->name ); | 1002 | printk( "%s: Runt packet!\n", dev->name ); |
1013 | lp->stats.rx_errors++; | 1003 | dev->stats.rx_errors++; |
1014 | } | 1004 | } |
1015 | else { | 1005 | else { |
1016 | skb = dev_alloc_skb( pkt_len+2 ); | 1006 | skb = dev_alloc_skb( pkt_len+2 ); |
@@ -1023,7 +1013,7 @@ static int lance_rx( struct net_device *dev ) | |||
1023 | break; | 1013 | break; |
1024 | 1014 | ||
1025 | if (i > RX_RING_SIZE - 2) { | 1015 | if (i > RX_RING_SIZE - 2) { |
1026 | lp->stats.rx_dropped++; | 1016 | dev->stats.rx_dropped++; |
1027 | head->flag |= RMD1_OWN_CHIP; | 1017 | head->flag |= RMD1_OWN_CHIP; |
1028 | lp->cur_rx++; | 1018 | lp->cur_rx++; |
1029 | } | 1019 | } |
@@ -1052,8 +1042,8 @@ static int lance_rx( struct net_device *dev ) | |||
1052 | skb->protocol = eth_type_trans( skb, dev ); | 1042 | skb->protocol = eth_type_trans( skb, dev ); |
1053 | netif_rx( skb ); | 1043 | netif_rx( skb ); |
1054 | dev->last_rx = jiffies; | 1044 | dev->last_rx = jiffies; |
1055 | lp->stats.rx_packets++; | 1045 | dev->stats.rx_packets++; |
1056 | lp->stats.rx_bytes += pkt_len; | 1046 | dev->stats.rx_bytes += pkt_len; |
1057 | } | 1047 | } |
1058 | } | 1048 | } |
1059 | 1049 | ||
@@ -1090,14 +1080,6 @@ static int lance_close( struct net_device *dev ) | |||
1090 | } | 1080 | } |
1091 | 1081 | ||
1092 | 1082 | ||
1093 | static struct net_device_stats *lance_get_stats( struct net_device *dev ) | ||
1094 | |||
1095 | { struct lance_private *lp = (struct lance_private *)dev->priv; | ||
1096 | |||
1097 | return &lp->stats; | ||
1098 | } | ||
1099 | |||
1100 | |||
1101 | /* Set or clear the multicast filter for this adaptor. | 1083 | /* Set or clear the multicast filter for this adaptor. |
1102 | num_addrs == -1 Promiscuous mode, receive all packets | 1084 | num_addrs == -1 Promiscuous mode, receive all packets |
1103 | num_addrs == 0 Normal mode, clear multicast list | 1085 | num_addrs == 0 Normal mode, clear multicast list |
diff --git a/drivers/net/atp.c b/drivers/net/atp.c index 6020d5ec38b9..cec2e3672cd0 100644 --- a/drivers/net/atp.c +++ b/drivers/net/atp.c | |||
@@ -171,7 +171,6 @@ static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,}; | |||
171 | struct net_local { | 171 | struct net_local { |
172 | spinlock_t lock; | 172 | spinlock_t lock; |
173 | struct net_device *next_module; | 173 | struct net_device *next_module; |
174 | struct net_device_stats stats; | ||
175 | struct timer_list timer; /* Media selection timer. */ | 174 | struct timer_list timer; /* Media selection timer. */ |
176 | long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ | 175 | long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ |
177 | int saved_tx_size; | 176 | int saved_tx_size; |
@@ -205,7 +204,6 @@ static irqreturn_t atp_interrupt(int irq, void *dev_id); | |||
205 | static void net_rx(struct net_device *dev); | 204 | static void net_rx(struct net_device *dev); |
206 | static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode); | 205 | static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode); |
207 | static int net_close(struct net_device *dev); | 206 | static int net_close(struct net_device *dev); |
208 | static struct net_device_stats *net_get_stats(struct net_device *dev); | ||
209 | static void set_rx_mode_8002(struct net_device *dev); | 207 | static void set_rx_mode_8002(struct net_device *dev); |
210 | static void set_rx_mode_8012(struct net_device *dev); | 208 | static void set_rx_mode_8012(struct net_device *dev); |
211 | static void tx_timeout(struct net_device *dev); | 209 | static void tx_timeout(struct net_device *dev); |
@@ -348,7 +346,6 @@ static int __init atp_probe1(long ioaddr) | |||
348 | dev->open = net_open; | 346 | dev->open = net_open; |
349 | dev->stop = net_close; | 347 | dev->stop = net_close; |
350 | dev->hard_start_xmit = atp_send_packet; | 348 | dev->hard_start_xmit = atp_send_packet; |
351 | dev->get_stats = net_get_stats; | ||
352 | dev->set_multicast_list = | 349 | dev->set_multicast_list = |
353 | lp->chip_type == RTL8002 ? &set_rx_mode_8002 : &set_rx_mode_8012; | 350 | lp->chip_type == RTL8002 ? &set_rx_mode_8002 : &set_rx_mode_8012; |
354 | dev->tx_timeout = tx_timeout; | 351 | dev->tx_timeout = tx_timeout; |
@@ -538,18 +535,17 @@ static void write_packet(long ioaddr, int length, unsigned char *packet, int pad | |||
538 | 535 | ||
539 | static void tx_timeout(struct net_device *dev) | 536 | static void tx_timeout(struct net_device *dev) |
540 | { | 537 | { |
541 | struct net_local *np = netdev_priv(dev); | ||
542 | long ioaddr = dev->base_addr; | 538 | long ioaddr = dev->base_addr; |
543 | 539 | ||
544 | printk(KERN_WARNING "%s: Transmit timed out, %s?\n", dev->name, | 540 | printk(KERN_WARNING "%s: Transmit timed out, %s?\n", dev->name, |
545 | inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem" | 541 | inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem" |
546 | : "IRQ conflict"); | 542 | : "IRQ conflict"); |
547 | np->stats.tx_errors++; | 543 | dev->stats.tx_errors++; |
548 | /* Try to restart the adapter. */ | 544 | /* Try to restart the adapter. */ |
549 | hardware_init(dev); | 545 | hardware_init(dev); |
550 | dev->trans_start = jiffies; | 546 | dev->trans_start = jiffies; |
551 | netif_wake_queue(dev); | 547 | netif_wake_queue(dev); |
552 | np->stats.tx_errors++; | 548 | dev->stats.tx_errors++; |
553 | } | 549 | } |
554 | 550 | ||
555 | static int atp_send_packet(struct sk_buff *skb, struct net_device *dev) | 551 | static int atp_send_packet(struct sk_buff *skb, struct net_device *dev) |
@@ -629,7 +625,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) | |||
629 | /* We acknowledged the normal Rx interrupt, so if the interrupt | 625 | /* We acknowledged the normal Rx interrupt, so if the interrupt |
630 | is still outstanding we must have a Rx error. */ | 626 | is still outstanding we must have a Rx error. */ |
631 | if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */ | 627 | if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */ |
632 | lp->stats.rx_over_errors++; | 628 | dev->stats.rx_over_errors++; |
633 | /* Set to no-accept mode long enough to remove a packet. */ | 629 | /* Set to no-accept mode long enough to remove a packet. */ |
634 | write_reg_high(ioaddr, CMR2, CMR2h_OFF); | 630 | write_reg_high(ioaddr, CMR2, CMR2h_OFF); |
635 | net_rx(dev); | 631 | net_rx(dev); |
@@ -649,9 +645,9 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) | |||
649 | and reinitialize the adapter. */ | 645 | and reinitialize the adapter. */ |
650 | write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK); | 646 | write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK); |
651 | if (status & (ISR_TxErr<<3)) { | 647 | if (status & (ISR_TxErr<<3)) { |
652 | lp->stats.collisions++; | 648 | dev->stats.collisions++; |
653 | if (++lp->re_tx > 15) { | 649 | if (++lp->re_tx > 15) { |
654 | lp->stats.tx_aborted_errors++; | 650 | dev->stats.tx_aborted_errors++; |
655 | hardware_init(dev); | 651 | hardware_init(dev); |
656 | break; | 652 | break; |
657 | } | 653 | } |
@@ -660,7 +656,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) | |||
660 | write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit); | 656 | write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit); |
661 | } else { | 657 | } else { |
662 | /* Finish up the transmit. */ | 658 | /* Finish up the transmit. */ |
663 | lp->stats.tx_packets++; | 659 | dev->stats.tx_packets++; |
664 | lp->pac_cnt_in_tx_buf--; | 660 | lp->pac_cnt_in_tx_buf--; |
665 | if ( lp->saved_tx_size) { | 661 | if ( lp->saved_tx_size) { |
666 | trigger_send(ioaddr, lp->saved_tx_size); | 662 | trigger_send(ioaddr, lp->saved_tx_size); |
@@ -678,7 +674,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) | |||
678 | "%ld jiffies status %02x CMR1 %02x.\n", dev->name, | 674 | "%ld jiffies status %02x CMR1 %02x.\n", dev->name, |
679 | num_tx_since_rx, jiffies - dev->last_rx, status, | 675 | num_tx_since_rx, jiffies - dev->last_rx, status, |
680 | (read_nibble(ioaddr, CMR1) >> 3) & 15); | 676 | (read_nibble(ioaddr, CMR1) >> 3) & 15); |
681 | lp->stats.rx_missed_errors++; | 677 | dev->stats.rx_missed_errors++; |
682 | hardware_init(dev); | 678 | hardware_init(dev); |
683 | num_tx_since_rx = 0; | 679 | num_tx_since_rx = 0; |
684 | break; | 680 | break; |
@@ -735,13 +731,13 @@ static void atp_timed_checker(unsigned long data) | |||
735 | struct net_local *lp = netdev_priv(atp_timed_dev); | 731 | struct net_local *lp = netdev_priv(atp_timed_dev); |
736 | write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]); | 732 | write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]); |
737 | if (i == 2) | 733 | if (i == 2) |
738 | lp->stats.tx_errors++; | 734 | dev->stats.tx_errors++; |
739 | else if (i == 3) | 735 | else if (i == 3) |
740 | lp->stats.tx_dropped++; | 736 | dev->stats.tx_dropped++; |
741 | else if (i == 4) | 737 | else if (i == 4) |
742 | lp->stats.collisions++; | 738 | dev->stats.collisions++; |
743 | else | 739 | else |
744 | lp->stats.rx_errors++; | 740 | dev->stats.rx_errors++; |
745 | } | 741 | } |
746 | #endif | 742 | #endif |
747 | } | 743 | } |
@@ -765,14 +761,14 @@ static void net_rx(struct net_device *dev) | |||
765 | printk(KERN_DEBUG " rx_count %04x %04x %04x %04x..", rx_head.pad, | 761 | printk(KERN_DEBUG " rx_count %04x %04x %04x %04x..", rx_head.pad, |
766 | rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr); | 762 | rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr); |
767 | if ((rx_head.rx_status & 0x77) != 0x01) { | 763 | if ((rx_head.rx_status & 0x77) != 0x01) { |
768 | lp->stats.rx_errors++; | 764 | dev->stats.rx_errors++; |
769 | if (rx_head.rx_status & 0x0004) lp->stats.rx_frame_errors++; | 765 | if (rx_head.rx_status & 0x0004) dev->stats.rx_frame_errors++; |
770 | else if (rx_head.rx_status & 0x0002) lp->stats.rx_crc_errors++; | 766 | else if (rx_head.rx_status & 0x0002) dev->stats.rx_crc_errors++; |
771 | if (net_debug > 3) | 767 | if (net_debug > 3) |
772 | printk(KERN_DEBUG "%s: Unknown ATP Rx error %04x.\n", | 768 | printk(KERN_DEBUG "%s: Unknown ATP Rx error %04x.\n", |
773 | dev->name, rx_head.rx_status); | 769 | dev->name, rx_head.rx_status); |
774 | if (rx_head.rx_status & 0x0020) { | 770 | if (rx_head.rx_status & 0x0020) { |
775 | lp->stats.rx_fifo_errors++; | 771 | dev->stats.rx_fifo_errors++; |
776 | write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE); | 772 | write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE); |
777 | write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); | 773 | write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); |
778 | } else if (rx_head.rx_status & 0x0050) | 774 | } else if (rx_head.rx_status & 0x0050) |
@@ -787,7 +783,7 @@ static void net_rx(struct net_device *dev) | |||
787 | if (skb == NULL) { | 783 | if (skb == NULL) { |
788 | printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", | 784 | printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", |
789 | dev->name); | 785 | dev->name); |
790 | lp->stats.rx_dropped++; | 786 | dev->stats.rx_dropped++; |
791 | goto done; | 787 | goto done; |
792 | } | 788 | } |
793 | 789 | ||
@@ -796,8 +792,8 @@ static void net_rx(struct net_device *dev) | |||
796 | skb->protocol = eth_type_trans(skb, dev); | 792 | skb->protocol = eth_type_trans(skb, dev); |
797 | netif_rx(skb); | 793 | netif_rx(skb); |
798 | dev->last_rx = jiffies; | 794 | dev->last_rx = jiffies; |
799 | lp->stats.rx_packets++; | 795 | dev->stats.rx_packets++; |
800 | lp->stats.rx_bytes += pkt_len; | 796 | dev->stats.rx_bytes += pkt_len; |
801 | } | 797 | } |
802 | done: | 798 | done: |
803 | write_reg(ioaddr, CMR1, CMR1_NextPkt); | 799 | write_reg(ioaddr, CMR1, CMR1_NextPkt); |
@@ -849,15 +845,6 @@ net_close(struct net_device *dev) | |||
849 | return 0; | 845 | return 0; |
850 | } | 846 | } |
851 | 847 | ||
852 | /* Get the current statistics. This may be called with the card open or | ||
853 | closed. */ | ||
854 | static struct net_device_stats * | ||
855 | net_get_stats(struct net_device *dev) | ||
856 | { | ||
857 | struct net_local *lp = netdev_priv(dev); | ||
858 | return &lp->stats; | ||
859 | } | ||
860 | |||
861 | /* | 848 | /* |
862 | * Set or clear the multicast filter for this adapter. | 849 | * Set or clear the multicast filter for this adapter. |
863 | */ | 850 | */ |
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index e86b3691765b..b46c5d8a77bd 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -90,7 +90,6 @@ static int au1000_rx(struct net_device *); | |||
90 | static irqreturn_t au1000_interrupt(int, void *); | 90 | static irqreturn_t au1000_interrupt(int, void *); |
91 | static void au1000_tx_timeout(struct net_device *); | 91 | static void au1000_tx_timeout(struct net_device *); |
92 | static void set_rx_mode(struct net_device *); | 92 | static void set_rx_mode(struct net_device *); |
93 | static struct net_device_stats *au1000_get_stats(struct net_device *); | ||
94 | static int au1000_ioctl(struct net_device *, struct ifreq *, int); | 93 | static int au1000_ioctl(struct net_device *, struct ifreq *, int); |
95 | static int mdio_read(struct net_device *, int, int); | 94 | static int mdio_read(struct net_device *, int, int); |
96 | static void mdio_write(struct net_device *, int, int, u16); | 95 | static void mdio_write(struct net_device *, int, int, u16); |
@@ -772,7 +771,6 @@ static struct net_device * au1000_probe(int port_num) | |||
772 | dev->open = au1000_open; | 771 | dev->open = au1000_open; |
773 | dev->hard_start_xmit = au1000_tx; | 772 | dev->hard_start_xmit = au1000_tx; |
774 | dev->stop = au1000_close; | 773 | dev->stop = au1000_close; |
775 | dev->get_stats = au1000_get_stats; | ||
776 | dev->set_multicast_list = &set_rx_mode; | 774 | dev->set_multicast_list = &set_rx_mode; |
777 | dev->do_ioctl = &au1000_ioctl; | 775 | dev->do_ioctl = &au1000_ioctl; |
778 | SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); | 776 | SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); |
@@ -1038,7 +1036,7 @@ static void __exit au1000_cleanup_module(void) | |||
1038 | static void update_tx_stats(struct net_device *dev, u32 status) | 1036 | static void update_tx_stats(struct net_device *dev, u32 status) |
1039 | { | 1037 | { |
1040 | struct au1000_private *aup = (struct au1000_private *) dev->priv; | 1038 | struct au1000_private *aup = (struct au1000_private *) dev->priv; |
1041 | struct net_device_stats *ps = &aup->stats; | 1039 | struct net_device_stats *ps = &dev->stats; |
1042 | 1040 | ||
1043 | if (status & TX_FRAME_ABORTED) { | 1041 | if (status & TX_FRAME_ABORTED) { |
1044 | if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { | 1042 | if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { |
@@ -1094,7 +1092,7 @@ static void au1000_tx_ack(struct net_device *dev) | |||
1094 | static int au1000_tx(struct sk_buff *skb, struct net_device *dev) | 1092 | static int au1000_tx(struct sk_buff *skb, struct net_device *dev) |
1095 | { | 1093 | { |
1096 | struct au1000_private *aup = (struct au1000_private *) dev->priv; | 1094 | struct au1000_private *aup = (struct au1000_private *) dev->priv; |
1097 | struct net_device_stats *ps = &aup->stats; | 1095 | struct net_device_stats *ps = &dev->stats; |
1098 | volatile tx_dma_t *ptxd; | 1096 | volatile tx_dma_t *ptxd; |
1099 | u32 buff_stat; | 1097 | u32 buff_stat; |
1100 | db_dest_t *pDB; | 1098 | db_dest_t *pDB; |
@@ -1148,7 +1146,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) | |||
1148 | static inline void update_rx_stats(struct net_device *dev, u32 status) | 1146 | static inline void update_rx_stats(struct net_device *dev, u32 status) |
1149 | { | 1147 | { |
1150 | struct au1000_private *aup = (struct au1000_private *) dev->priv; | 1148 | struct au1000_private *aup = (struct au1000_private *) dev->priv; |
1151 | struct net_device_stats *ps = &aup->stats; | 1149 | struct net_device_stats *ps = &dev->stats; |
1152 | 1150 | ||
1153 | ps->rx_packets++; | 1151 | ps->rx_packets++; |
1154 | if (status & RX_MCAST_FRAME) | 1152 | if (status & RX_MCAST_FRAME) |
@@ -1201,7 +1199,7 @@ static int au1000_rx(struct net_device *dev) | |||
1201 | printk(KERN_ERR | 1199 | printk(KERN_ERR |
1202 | "%s: Memory squeeze, dropping packet.\n", | 1200 | "%s: Memory squeeze, dropping packet.\n", |
1203 | dev->name); | 1201 | dev->name); |
1204 | aup->stats.rx_dropped++; | 1202 | dev->stats.rx_dropped++; |
1205 | continue; | 1203 | continue; |
1206 | } | 1204 | } |
1207 | skb_reserve(skb, 2); /* 16 byte IP header align */ | 1205 | skb_reserve(skb, 2); /* 16 byte IP header align */ |
@@ -1324,18 +1322,5 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
1324 | return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); | 1322 | return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); |
1325 | } | 1323 | } |
1326 | 1324 | ||
1327 | static struct net_device_stats *au1000_get_stats(struct net_device *dev) | ||
1328 | { | ||
1329 | struct au1000_private *aup = (struct au1000_private *) dev->priv; | ||
1330 | |||
1331 | if (au1000_debug > 4) | ||
1332 | printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev); | ||
1333 | |||
1334 | if (netif_device_present(dev)) { | ||
1335 | return &aup->stats; | ||
1336 | } | ||
1337 | return 0; | ||
1338 | } | ||
1339 | |||
1340 | module_init(au1000_init_module); | 1325 | module_init(au1000_init_module); |
1341 | module_exit(au1000_cleanup_module); | 1326 | module_exit(au1000_cleanup_module); |
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h index 52fe00dd6d24..f3baeaa12854 100644 --- a/drivers/net/au1000_eth.h +++ b/drivers/net/au1000_eth.h | |||
@@ -115,6 +115,5 @@ struct au1000_private { | |||
115 | u32 vaddr; /* virtual address of rx/tx buffers */ | 115 | u32 vaddr; /* virtual address of rx/tx buffers */ |
116 | dma_addr_t dma_addr; /* dma address of rx/tx buffers */ | 116 | dma_addr_t dma_addr; /* dma address of rx/tx buffers */ |
117 | 117 | ||
118 | struct net_device_stats stats; | ||
119 | spinlock_t lock; /* Serialise access to device */ | 118 | spinlock_t lock; /* Serialise access to device */ |
120 | }; | 119 | }; |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index e5bbcbe8de5f..cebe55440e13 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -579,8 +579,8 @@ out: | |||
579 | adjust_tx_list(); | 579 | adjust_tx_list(); |
580 | current_tx_ptr = current_tx_ptr->next; | 580 | current_tx_ptr = current_tx_ptr->next; |
581 | dev->trans_start = jiffies; | 581 | dev->trans_start = jiffies; |
582 | lp->stats.tx_packets++; | 582 | dev->stats.tx_packets++; |
583 | lp->stats.tx_bytes += (skb->len); | 583 | dev->stats.tx_bytes += (skb->len); |
584 | return 0; | 584 | return 0; |
585 | } | 585 | } |
586 | 586 | ||
@@ -596,7 +596,7 @@ static void bf537mac_rx(struct net_device *dev) | |||
596 | if (!new_skb) { | 596 | if (!new_skb) { |
597 | printk(KERN_NOTICE DRV_NAME | 597 | printk(KERN_NOTICE DRV_NAME |
598 | ": rx: low on mem - packet dropped\n"); | 598 | ": rx: low on mem - packet dropped\n"); |
599 | lp->stats.rx_dropped++; | 599 | dev->stats.rx_dropped++; |
600 | goto out; | 600 | goto out; |
601 | } | 601 | } |
602 | /* reserve 2 bytes for RXDWA padding */ | 602 | /* reserve 2 bytes for RXDWA padding */ |
@@ -618,8 +618,8 @@ static void bf537mac_rx(struct net_device *dev) | |||
618 | #endif | 618 | #endif |
619 | 619 | ||
620 | netif_rx(skb); | 620 | netif_rx(skb); |
621 | lp->stats.rx_packets++; | 621 | dev->stats.rx_packets++; |
622 | lp->stats.rx_bytes += len; | 622 | dev->stats.rx_bytes += len; |
623 | current_rx_ptr->status.status_word = 0x00000000; | 623 | current_rx_ptr->status.status_word = 0x00000000; |
624 | current_rx_ptr = current_rx_ptr->next; | 624 | current_rx_ptr = current_rx_ptr->next; |
625 | 625 | ||
@@ -733,20 +733,6 @@ static void bf537mac_timeout(struct net_device *dev) | |||
733 | } | 733 | } |
734 | 734 | ||
735 | /* | 735 | /* |
736 | * Get the current statistics. | ||
737 | * This may be called with the card open or closed. | ||
738 | */ | ||
739 | static struct net_device_stats *bf537mac_query_statistics(struct net_device | ||
740 | *dev) | ||
741 | { | ||
742 | struct bf537mac_local *lp = netdev_priv(dev); | ||
743 | |||
744 | pr_debug("%s: %s\n", dev->name, __FUNCTION__); | ||
745 | |||
746 | return &lp->stats; | ||
747 | } | ||
748 | |||
749 | /* | ||
750 | * This routine will, depending on the values passed to it, | 736 | * This routine will, depending on the values passed to it, |
751 | * either make it accept multicast packets, go into | 737 | * either make it accept multicast packets, go into |
752 | * promiscuous mode (for TCPDUMP and cousins) or accept | 738 | * promiscuous mode (for TCPDUMP and cousins) or accept |
@@ -891,7 +877,6 @@ static int __init bf537mac_probe(struct net_device *dev) | |||
891 | dev->stop = bf537mac_close; | 877 | dev->stop = bf537mac_close; |
892 | dev->hard_start_xmit = bf537mac_hard_start_xmit; | 878 | dev->hard_start_xmit = bf537mac_hard_start_xmit; |
893 | dev->tx_timeout = bf537mac_timeout; | 879 | dev->tx_timeout = bf537mac_timeout; |
894 | dev->get_stats = bf537mac_query_statistics; | ||
895 | dev->set_multicast_list = bf537mac_set_multicast_list; | 880 | dev->set_multicast_list = bf537mac_set_multicast_list; |
896 | #ifdef CONFIG_NET_POLL_CONTROLLER | 881 | #ifdef CONFIG_NET_POLL_CONTROLLER |
897 | dev->poll_controller = bf537mac_poll; | 882 | dev->poll_controller = bf537mac_poll; |
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h index af87189b85fa..b82724692283 100644 --- a/drivers/net/bfin_mac.h +++ b/drivers/net/bfin_mac.h | |||
@@ -104,8 +104,6 @@ struct bf537mac_local { | |||
104 | * can find out semi-useless statistics of how well the card is | 104 | * can find out semi-useless statistics of how well the card is |
105 | * performing | 105 | * performing |
106 | */ | 106 | */ |
107 | struct net_device_stats stats; | ||
108 | |||
109 | int version; | 107 | int version; |
110 | 108 | ||
111 | int FlowEnabled; /* record if data flow is active */ | 109 | int FlowEnabled; /* record if data flow is active */ |
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index ee157f5a5dbc..2761441f6644 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c | |||
@@ -75,7 +75,6 @@ struct bmac_data { | |||
75 | int tx_fill; | 75 | int tx_fill; |
76 | int tx_empty; | 76 | int tx_empty; |
77 | unsigned char tx_fullup; | 77 | unsigned char tx_fullup; |
78 | struct net_device_stats stats; | ||
79 | struct timer_list tx_timeout; | 78 | struct timer_list tx_timeout; |
80 | int timeout_active; | 79 | int timeout_active; |
81 | int sleeping; | 80 | int sleeping; |
@@ -145,7 +144,6 @@ static unsigned char *bmac_emergency_rxbuf; | |||
145 | static int bmac_open(struct net_device *dev); | 144 | static int bmac_open(struct net_device *dev); |
146 | static int bmac_close(struct net_device *dev); | 145 | static int bmac_close(struct net_device *dev); |
147 | static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); | 146 | static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); |
148 | static struct net_device_stats *bmac_stats(struct net_device *dev); | ||
149 | static void bmac_set_multicast(struct net_device *dev); | 147 | static void bmac_set_multicast(struct net_device *dev); |
150 | static void bmac_reset_and_enable(struct net_device *dev); | 148 | static void bmac_reset_and_enable(struct net_device *dev); |
151 | static void bmac_start_chip(struct net_device *dev); | 149 | static void bmac_start_chip(struct net_device *dev); |
@@ -668,7 +666,7 @@ static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev) | |||
668 | bp->tx_bufs[bp->tx_fill] = skb; | 666 | bp->tx_bufs[bp->tx_fill] = skb; |
669 | bp->tx_fill = i; | 667 | bp->tx_fill = i; |
670 | 668 | ||
671 | bp->stats.tx_bytes += skb->len; | 669 | dev->stats.tx_bytes += skb->len; |
672 | 670 | ||
673 | dbdma_continue(td); | 671 | dbdma_continue(td); |
674 | 672 | ||
@@ -707,8 +705,8 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) | |||
707 | nb = RX_BUFLEN - residual - 2; | 705 | nb = RX_BUFLEN - residual - 2; |
708 | if (nb < (ETHERMINPACKET - ETHERCRC)) { | 706 | if (nb < (ETHERMINPACKET - ETHERCRC)) { |
709 | skb = NULL; | 707 | skb = NULL; |
710 | bp->stats.rx_length_errors++; | 708 | dev->stats.rx_length_errors++; |
711 | bp->stats.rx_errors++; | 709 | dev->stats.rx_errors++; |
712 | } else { | 710 | } else { |
713 | skb = bp->rx_bufs[i]; | 711 | skb = bp->rx_bufs[i]; |
714 | bp->rx_bufs[i] = NULL; | 712 | bp->rx_bufs[i] = NULL; |
@@ -719,10 +717,10 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) | |||
719 | skb->protocol = eth_type_trans(skb, dev); | 717 | skb->protocol = eth_type_trans(skb, dev); |
720 | netif_rx(skb); | 718 | netif_rx(skb); |
721 | dev->last_rx = jiffies; | 719 | dev->last_rx = jiffies; |
722 | ++bp->stats.rx_packets; | 720 | ++dev->stats.rx_packets; |
723 | bp->stats.rx_bytes += nb; | 721 | dev->stats.rx_bytes += nb; |
724 | } else { | 722 | } else { |
725 | ++bp->stats.rx_dropped; | 723 | ++dev->stats.rx_dropped; |
726 | } | 724 | } |
727 | dev->last_rx = jiffies; | 725 | dev->last_rx = jiffies; |
728 | if ((skb = bp->rx_bufs[i]) == NULL) { | 726 | if ((skb = bp->rx_bufs[i]) == NULL) { |
@@ -785,7 +783,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) | |||
785 | } | 783 | } |
786 | 784 | ||
787 | if (bp->tx_bufs[bp->tx_empty]) { | 785 | if (bp->tx_bufs[bp->tx_empty]) { |
788 | ++bp->stats.tx_packets; | 786 | ++dev->stats.tx_packets; |
789 | dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); | 787 | dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); |
790 | } | 788 | } |
791 | bp->tx_bufs[bp->tx_empty] = NULL; | 789 | bp->tx_bufs[bp->tx_empty] = NULL; |
@@ -807,13 +805,6 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) | |||
807 | return IRQ_HANDLED; | 805 | return IRQ_HANDLED; |
808 | } | 806 | } |
809 | 807 | ||
810 | static struct net_device_stats *bmac_stats(struct net_device *dev) | ||
811 | { | ||
812 | struct bmac_data *p = netdev_priv(dev); | ||
813 | |||
814 | return &p->stats; | ||
815 | } | ||
816 | |||
817 | #ifndef SUNHME_MULTICAST | 808 | #ifndef SUNHME_MULTICAST |
818 | /* Real fast bit-reversal algorithm, 6-bit values */ | 809 | /* Real fast bit-reversal algorithm, 6-bit values */ |
819 | static int reverse6[64] = { | 810 | static int reverse6[64] = { |
@@ -1080,17 +1071,17 @@ static irqreturn_t bmac_misc_intr(int irq, void *dev_id) | |||
1080 | } | 1071 | } |
1081 | /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ | 1072 | /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ |
1082 | /* bmac_txdma_intr_inner(irq, dev_id); */ | 1073 | /* bmac_txdma_intr_inner(irq, dev_id); */ |
1083 | /* if (status & FrameReceived) bp->stats.rx_dropped++; */ | 1074 | /* if (status & FrameReceived) dev->stats.rx_dropped++; */ |
1084 | if (status & RxErrorMask) bp->stats.rx_errors++; | 1075 | if (status & RxErrorMask) dev->stats.rx_errors++; |
1085 | if (status & RxCRCCntExp) bp->stats.rx_crc_errors++; | 1076 | if (status & RxCRCCntExp) dev->stats.rx_crc_errors++; |
1086 | if (status & RxLenCntExp) bp->stats.rx_length_errors++; | 1077 | if (status & RxLenCntExp) dev->stats.rx_length_errors++; |
1087 | if (status & RxOverFlow) bp->stats.rx_over_errors++; | 1078 | if (status & RxOverFlow) dev->stats.rx_over_errors++; |
1088 | if (status & RxAlignCntExp) bp->stats.rx_frame_errors++; | 1079 | if (status & RxAlignCntExp) dev->stats.rx_frame_errors++; |
1089 | 1080 | ||
1090 | /* if (status & FrameSent) bp->stats.tx_dropped++; */ | 1081 | /* if (status & FrameSent) dev->stats.tx_dropped++; */ |
1091 | if (status & TxErrorMask) bp->stats.tx_errors++; | 1082 | if (status & TxErrorMask) dev->stats.tx_errors++; |
1092 | if (status & TxUnderrun) bp->stats.tx_fifo_errors++; | 1083 | if (status & TxUnderrun) dev->stats.tx_fifo_errors++; |
1093 | if (status & TxNormalCollExp) bp->stats.collisions++; | 1084 | if (status & TxNormalCollExp) dev->stats.collisions++; |
1094 | return IRQ_HANDLED; | 1085 | return IRQ_HANDLED; |
1095 | } | 1086 | } |
1096 | 1087 | ||
@@ -1324,7 +1315,6 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i | |||
1324 | dev->stop = bmac_close; | 1315 | dev->stop = bmac_close; |
1325 | dev->ethtool_ops = &bmac_ethtool_ops; | 1316 | dev->ethtool_ops = &bmac_ethtool_ops; |
1326 | dev->hard_start_xmit = bmac_output; | 1317 | dev->hard_start_xmit = bmac_output; |
1327 | dev->get_stats = bmac_stats; | ||
1328 | dev->set_multicast_list = bmac_set_multicast; | 1318 | dev->set_multicast_list = bmac_set_multicast; |
1329 | dev->set_mac_address = bmac_set_address; | 1319 | dev->set_mac_address = bmac_set_address; |
1330 | 1320 | ||
@@ -1542,7 +1532,7 @@ static void bmac_tx_timeout(unsigned long data) | |||
1542 | XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", | 1532 | XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", |
1543 | bp->tx_empty, bp->tx_fill, bp->tx_fullup)); | 1533 | bp->tx_empty, bp->tx_fill, bp->tx_fullup)); |
1544 | i = bp->tx_empty; | 1534 | i = bp->tx_empty; |
1545 | ++bp->stats.tx_errors; | 1535 | ++dev->stats.tx_errors; |
1546 | if (i != bp->tx_fill) { | 1536 | if (i != bp->tx_fill) { |
1547 | dev_kfree_skb(bp->tx_bufs[i]); | 1537 | dev_kfree_skb(bp->tx_bufs[i]); |
1548 | bp->tx_bufs[i] = NULL; | 1538 | bp->tx_bufs[i] = NULL; |
diff --git a/drivers/net/de600.c b/drivers/net/de600.c index 5dd0d9c0eac9..421c2ca49711 100644 --- a/drivers/net/de600.c +++ b/drivers/net/de600.c | |||
@@ -154,11 +154,6 @@ static int de600_close(struct net_device *dev) | |||
154 | return 0; | 154 | return 0; |
155 | } | 155 | } |
156 | 156 | ||
157 | static struct net_device_stats *get_stats(struct net_device *dev) | ||
158 | { | ||
159 | return (struct net_device_stats *)(dev->priv); | ||
160 | } | ||
161 | |||
162 | static inline void trigger_interrupt(struct net_device *dev) | 157 | static inline void trigger_interrupt(struct net_device *dev) |
163 | { | 158 | { |
164 | de600_put_command(FLIP_IRQ); | 159 | de600_put_command(FLIP_IRQ); |
@@ -308,7 +303,7 @@ static int de600_tx_intr(struct net_device *dev, int irq_status) | |||
308 | if (!(irq_status & TX_FAILED16)) { | 303 | if (!(irq_status & TX_FAILED16)) { |
309 | tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES; | 304 | tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES; |
310 | ++free_tx_pages; | 305 | ++free_tx_pages; |
311 | ((struct net_device_stats *)(dev->priv))->tx_packets++; | 306 | dev->stats.tx_packets++; |
312 | netif_wake_queue(dev); | 307 | netif_wake_queue(dev); |
313 | } | 308 | } |
314 | 309 | ||
@@ -375,8 +370,8 @@ static void de600_rx_intr(struct net_device *dev) | |||
375 | 370 | ||
376 | /* update stats */ | 371 | /* update stats */ |
377 | dev->last_rx = jiffies; | 372 | dev->last_rx = jiffies; |
378 | ((struct net_device_stats *)(dev->priv))->rx_packets++; /* count all receives */ | 373 | dev->stats.rx_packets++; /* count all receives */ |
379 | ((struct net_device_stats *)(dev->priv))->rx_bytes += size; /* count all received bytes */ | 374 | dev->stats.rx_bytes += size; /* count all received bytes */ |
380 | 375 | ||
381 | /* | 376 | /* |
382 | * If any worth-while packets have been received, netif_rx() | 377 | * If any worth-while packets have been received, netif_rx() |
@@ -390,7 +385,7 @@ static struct net_device * __init de600_probe(void) | |||
390 | struct net_device *dev; | 385 | struct net_device *dev; |
391 | int err; | 386 | int err; |
392 | 387 | ||
393 | dev = alloc_etherdev(sizeof(struct net_device_stats)); | 388 | dev = alloc_etherdev(0); |
394 | if (!dev) | 389 | if (!dev) |
395 | return ERR_PTR(-ENOMEM); | 390 | return ERR_PTR(-ENOMEM); |
396 | 391 | ||
@@ -448,8 +443,6 @@ static struct net_device * __init de600_probe(void) | |||
448 | printk(":%02X",dev->dev_addr[i]); | 443 | printk(":%02X",dev->dev_addr[i]); |
449 | printk("\n"); | 444 | printk("\n"); |
450 | 445 | ||
451 | dev->get_stats = get_stats; | ||
452 | |||
453 | dev->open = de600_open; | 446 | dev->open = de600_open; |
454 | dev->stop = de600_close; | 447 | dev->stop = de600_close; |
455 | dev->hard_start_xmit = &de600_start_xmit; | 448 | dev->hard_start_xmit = &de600_start_xmit; |
diff --git a/drivers/net/de600.h b/drivers/net/de600.h index 1288e48ba704..e80ecbabcf4e 100644 --- a/drivers/net/de600.h +++ b/drivers/net/de600.h | |||
@@ -121,7 +121,6 @@ static u8 de600_read_byte(unsigned char type, struct net_device *dev); | |||
121 | /* Put in the device structure. */ | 121 | /* Put in the device structure. */ |
122 | static int de600_open(struct net_device *dev); | 122 | static int de600_open(struct net_device *dev); |
123 | static int de600_close(struct net_device *dev); | 123 | static int de600_close(struct net_device *dev); |
124 | static struct net_device_stats *get_stats(struct net_device *dev); | ||
125 | static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev); | 124 | static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev); |
126 | 125 | ||
127 | /* Dispatch from interrupts. */ | 126 | /* Dispatch from interrupts. */ |
diff --git a/drivers/net/de620.c b/drivers/net/de620.c index a92c207b8839..4b93902906ba 100644 --- a/drivers/net/de620.c +++ b/drivers/net/de620.c | |||
@@ -216,7 +216,6 @@ MODULE_PARM_DESC(de620_debug, "DE-620 debug level (0-2)"); | |||
216 | /* Put in the device structure. */ | 216 | /* Put in the device structure. */ |
217 | static int de620_open(struct net_device *); | 217 | static int de620_open(struct net_device *); |
218 | static int de620_close(struct net_device *); | 218 | static int de620_close(struct net_device *); |
219 | static struct net_device_stats *get_stats(struct net_device *); | ||
220 | static void de620_set_multicast_list(struct net_device *); | 219 | static void de620_set_multicast_list(struct net_device *); |
221 | static int de620_start_xmit(struct sk_buff *, struct net_device *); | 220 | static int de620_start_xmit(struct sk_buff *, struct net_device *); |
222 | 221 | ||
@@ -480,16 +479,6 @@ static int de620_close(struct net_device *dev) | |||
480 | 479 | ||
481 | /********************************************* | 480 | /********************************************* |
482 | * | 481 | * |
483 | * Return current statistics | ||
484 | * | ||
485 | */ | ||
486 | static struct net_device_stats *get_stats(struct net_device *dev) | ||
487 | { | ||
488 | return (struct net_device_stats *)(dev->priv); | ||
489 | } | ||
490 | |||
491 | /********************************************* | ||
492 | * | ||
493 | * Set or clear the multicast filter for this adaptor. | 482 | * Set or clear the multicast filter for this adaptor. |
494 | * (no real multicast implemented for the DE-620, but she can be promiscuous...) | 483 | * (no real multicast implemented for the DE-620, but she can be promiscuous...) |
495 | * | 484 | * |
@@ -579,7 +568,7 @@ static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
579 | if(!(using_txbuf == (TXBF0 | TXBF1))) | 568 | if(!(using_txbuf == (TXBF0 | TXBF1))) |
580 | netif_wake_queue(dev); | 569 | netif_wake_queue(dev); |
581 | 570 | ||
582 | ((struct net_device_stats *)(dev->priv))->tx_packets++; | 571 | dev->stats.tx_packets++; |
583 | spin_unlock_irqrestore(&de620_lock, flags); | 572 | spin_unlock_irqrestore(&de620_lock, flags); |
584 | dev_kfree_skb (skb); | 573 | dev_kfree_skb (skb); |
585 | return 0; | 574 | return 0; |
@@ -660,7 +649,7 @@ static int de620_rx_intr(struct net_device *dev) | |||
660 | /* You win some, you lose some. And sometimes plenty... */ | 649 | /* You win some, you lose some. And sometimes plenty... */ |
661 | adapter_init(dev); | 650 | adapter_init(dev); |
662 | netif_wake_queue(dev); | 651 | netif_wake_queue(dev); |
663 | ((struct net_device_stats *)(dev->priv))->rx_over_errors++; | 652 | dev->stats.rx_over_errors++; |
664 | return 0; | 653 | return 0; |
665 | } | 654 | } |
666 | 655 | ||
@@ -680,7 +669,7 @@ static int de620_rx_intr(struct net_device *dev) | |||
680 | next_rx_page = header_buf.Rx_NextPage; /* at least a try... */ | 669 | next_rx_page = header_buf.Rx_NextPage; /* at least a try... */ |
681 | de620_send_command(dev, W_DUMMY); | 670 | de620_send_command(dev, W_DUMMY); |
682 | de620_set_register(dev, W_NPRF, next_rx_page); | 671 | de620_set_register(dev, W_NPRF, next_rx_page); |
683 | ((struct net_device_stats *)(dev->priv))->rx_over_errors++; | 672 | dev->stats.rx_over_errors++; |
684 | return 0; | 673 | return 0; |
685 | } | 674 | } |
686 | next_rx_page = pagelink; | 675 | next_rx_page = pagelink; |
@@ -693,7 +682,7 @@ static int de620_rx_intr(struct net_device *dev) | |||
693 | skb = dev_alloc_skb(size+2); | 682 | skb = dev_alloc_skb(size+2); |
694 | if (skb == NULL) { /* Yeah, but no place to put it... */ | 683 | if (skb == NULL) { /* Yeah, but no place to put it... */ |
695 | printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); | 684 | printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); |
696 | ((struct net_device_stats *)(dev->priv))->rx_dropped++; | 685 | dev->stats.rx_dropped++; |
697 | } | 686 | } |
698 | else { /* Yep! Go get it! */ | 687 | else { /* Yep! Go get it! */ |
699 | skb_reserve(skb,2); /* Align */ | 688 | skb_reserve(skb,2); /* Align */ |
@@ -706,8 +695,8 @@ static int de620_rx_intr(struct net_device *dev) | |||
706 | netif_rx(skb); /* deliver it "upstairs" */ | 695 | netif_rx(skb); /* deliver it "upstairs" */ |
707 | dev->last_rx = jiffies; | 696 | dev->last_rx = jiffies; |
708 | /* count all receives */ | 697 | /* count all receives */ |
709 | ((struct net_device_stats *)(dev->priv))->rx_packets++; | 698 | dev->stats.rx_packets++; |
710 | ((struct net_device_stats *)(dev->priv))->rx_bytes += size; | 699 | dev->stats.rx_bytes += size; |
711 | } | 700 | } |
712 | } | 701 | } |
713 | 702 | ||
@@ -819,7 +808,7 @@ struct net_device * __init de620_probe(int unit) | |||
819 | int err = -ENOMEM; | 808 | int err = -ENOMEM; |
820 | int i; | 809 | int i; |
821 | 810 | ||
822 | dev = alloc_etherdev(sizeof(struct net_device_stats)); | 811 | dev = alloc_etherdev(0); |
823 | if (!dev) | 812 | if (!dev) |
824 | goto out; | 813 | goto out; |
825 | 814 | ||
@@ -879,7 +868,6 @@ struct net_device * __init de620_probe(int unit) | |||
879 | else | 868 | else |
880 | printk(" UTP)\n"); | 869 | printk(" UTP)\n"); |
881 | 870 | ||
882 | dev->get_stats = get_stats; | ||
883 | dev->open = de620_open; | 871 | dev->open = de620_open; |
884 | dev->stop = de620_close; | 872 | dev->stop = de620_close; |
885 | dev->hard_start_xmit = de620_start_xmit; | 873 | dev->hard_start_xmit = de620_start_xmit; |
diff --git a/drivers/net/declance.c b/drivers/net/declance.c index b2577f40124e..7e7ac3330e60 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c | |||
@@ -258,8 +258,6 @@ struct lance_private { | |||
258 | int rx_new, tx_new; | 258 | int rx_new, tx_new; |
259 | int rx_old, tx_old; | 259 | int rx_old, tx_old; |
260 | 260 | ||
261 | struct net_device_stats stats; | ||
262 | |||
263 | unsigned short busmaster_regval; | 261 | unsigned short busmaster_regval; |
264 | 262 | ||
265 | struct timer_list multicast_timer; | 263 | struct timer_list multicast_timer; |
@@ -583,22 +581,22 @@ static int lance_rx(struct net_device *dev) | |||
583 | 581 | ||
584 | /* We got an incomplete frame? */ | 582 | /* We got an incomplete frame? */ |
585 | if ((bits & LE_R1_POK) != LE_R1_POK) { | 583 | if ((bits & LE_R1_POK) != LE_R1_POK) { |
586 | lp->stats.rx_over_errors++; | 584 | dev->stats.rx_over_errors++; |
587 | lp->stats.rx_errors++; | 585 | dev->stats.rx_errors++; |
588 | } else if (bits & LE_R1_ERR) { | 586 | } else if (bits & LE_R1_ERR) { |
589 | /* Count only the end frame as a rx error, | 587 | /* Count only the end frame as a rx error, |
590 | * not the beginning | 588 | * not the beginning |
591 | */ | 589 | */ |
592 | if (bits & LE_R1_BUF) | 590 | if (bits & LE_R1_BUF) |
593 | lp->stats.rx_fifo_errors++; | 591 | dev->stats.rx_fifo_errors++; |
594 | if (bits & LE_R1_CRC) | 592 | if (bits & LE_R1_CRC) |
595 | lp->stats.rx_crc_errors++; | 593 | dev->stats.rx_crc_errors++; |
596 | if (bits & LE_R1_OFL) | 594 | if (bits & LE_R1_OFL) |
597 | lp->stats.rx_over_errors++; | 595 | dev->stats.rx_over_errors++; |
598 | if (bits & LE_R1_FRA) | 596 | if (bits & LE_R1_FRA) |
599 | lp->stats.rx_frame_errors++; | 597 | dev->stats.rx_frame_errors++; |
600 | if (bits & LE_R1_EOP) | 598 | if (bits & LE_R1_EOP) |
601 | lp->stats.rx_errors++; | 599 | dev->stats.rx_errors++; |
602 | } else { | 600 | } else { |
603 | len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4; | 601 | len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4; |
604 | skb = dev_alloc_skb(len + 2); | 602 | skb = dev_alloc_skb(len + 2); |
@@ -606,7 +604,7 @@ static int lance_rx(struct net_device *dev) | |||
606 | if (skb == 0) { | 604 | if (skb == 0) { |
607 | printk("%s: Memory squeeze, deferring packet.\n", | 605 | printk("%s: Memory squeeze, deferring packet.\n", |
608 | dev->name); | 606 | dev->name); |
609 | lp->stats.rx_dropped++; | 607 | dev->stats.rx_dropped++; |
610 | *rds_ptr(rd, mblength, lp->type) = 0; | 608 | *rds_ptr(rd, mblength, lp->type) = 0; |
611 | *rds_ptr(rd, rmd1, lp->type) = | 609 | *rds_ptr(rd, rmd1, lp->type) = |
612 | ((lp->rx_buf_ptr_lnc[entry] >> 16) & | 610 | ((lp->rx_buf_ptr_lnc[entry] >> 16) & |
@@ -614,7 +612,7 @@ static int lance_rx(struct net_device *dev) | |||
614 | lp->rx_new = (entry + 1) & RX_RING_MOD_MASK; | 612 | lp->rx_new = (entry + 1) & RX_RING_MOD_MASK; |
615 | return 0; | 613 | return 0; |
616 | } | 614 | } |
617 | lp->stats.rx_bytes += len; | 615 | dev->stats.rx_bytes += len; |
618 | 616 | ||
619 | skb_reserve(skb, 2); /* 16 byte align */ | 617 | skb_reserve(skb, 2); /* 16 byte align */ |
620 | skb_put(skb, len); /* make room */ | 618 | skb_put(skb, len); /* make room */ |
@@ -625,7 +623,7 @@ static int lance_rx(struct net_device *dev) | |||
625 | skb->protocol = eth_type_trans(skb, dev); | 623 | skb->protocol = eth_type_trans(skb, dev); |
626 | netif_rx(skb); | 624 | netif_rx(skb); |
627 | dev->last_rx = jiffies; | 625 | dev->last_rx = jiffies; |
628 | lp->stats.rx_packets++; | 626 | dev->stats.rx_packets++; |
629 | } | 627 | } |
630 | 628 | ||
631 | /* Return the packet to the pool */ | 629 | /* Return the packet to the pool */ |
@@ -660,14 +658,14 @@ static void lance_tx(struct net_device *dev) | |||
660 | if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) { | 658 | if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) { |
661 | status = *tds_ptr(td, misc, lp->type); | 659 | status = *tds_ptr(td, misc, lp->type); |
662 | 660 | ||
663 | lp->stats.tx_errors++; | 661 | dev->stats.tx_errors++; |
664 | if (status & LE_T3_RTY) | 662 | if (status & LE_T3_RTY) |
665 | lp->stats.tx_aborted_errors++; | 663 | dev->stats.tx_aborted_errors++; |
666 | if (status & LE_T3_LCOL) | 664 | if (status & LE_T3_LCOL) |
667 | lp->stats.tx_window_errors++; | 665 | dev->stats.tx_window_errors++; |
668 | 666 | ||
669 | if (status & LE_T3_CLOS) { | 667 | if (status & LE_T3_CLOS) { |
670 | lp->stats.tx_carrier_errors++; | 668 | dev->stats.tx_carrier_errors++; |
671 | printk("%s: Carrier Lost\n", dev->name); | 669 | printk("%s: Carrier Lost\n", dev->name); |
672 | /* Stop the lance */ | 670 | /* Stop the lance */ |
673 | writereg(&ll->rap, LE_CSR0); | 671 | writereg(&ll->rap, LE_CSR0); |
@@ -681,7 +679,7 @@ static void lance_tx(struct net_device *dev) | |||
681 | * transmitter, restart the adapter. | 679 | * transmitter, restart the adapter. |
682 | */ | 680 | */ |
683 | if (status & (LE_T3_BUF | LE_T3_UFL)) { | 681 | if (status & (LE_T3_BUF | LE_T3_UFL)) { |
684 | lp->stats.tx_fifo_errors++; | 682 | dev->stats.tx_fifo_errors++; |
685 | 683 | ||
686 | printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", | 684 | printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", |
687 | dev->name); | 685 | dev->name); |
@@ -702,13 +700,13 @@ static void lance_tx(struct net_device *dev) | |||
702 | 700 | ||
703 | /* One collision before packet was sent. */ | 701 | /* One collision before packet was sent. */ |
704 | if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE) | 702 | if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE) |
705 | lp->stats.collisions++; | 703 | dev->stats.collisions++; |
706 | 704 | ||
707 | /* More than one collision, be optimistic. */ | 705 | /* More than one collision, be optimistic. */ |
708 | if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE) | 706 | if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE) |
709 | lp->stats.collisions += 2; | 707 | dev->stats.collisions += 2; |
710 | 708 | ||
711 | lp->stats.tx_packets++; | 709 | dev->stats.tx_packets++; |
712 | } | 710 | } |
713 | j = (j + 1) & TX_RING_MOD_MASK; | 711 | j = (j + 1) & TX_RING_MOD_MASK; |
714 | } | 712 | } |
@@ -754,10 +752,10 @@ static irqreturn_t lance_interrupt(const int irq, void *dev_id) | |||
754 | lance_tx(dev); | 752 | lance_tx(dev); |
755 | 753 | ||
756 | if (csr0 & LE_C0_BABL) | 754 | if (csr0 & LE_C0_BABL) |
757 | lp->stats.tx_errors++; | 755 | dev->stats.tx_errors++; |
758 | 756 | ||
759 | if (csr0 & LE_C0_MISS) | 757 | if (csr0 & LE_C0_MISS) |
760 | lp->stats.rx_errors++; | 758 | dev->stats.rx_errors++; |
761 | 759 | ||
762 | if (csr0 & LE_C0_MERR) { | 760 | if (csr0 & LE_C0_MERR) { |
763 | printk("%s: Memory error, status %04x\n", dev->name, csr0); | 761 | printk("%s: Memory error, status %04x\n", dev->name, csr0); |
@@ -912,7 +910,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
912 | len = ETH_ZLEN; | 910 | len = ETH_ZLEN; |
913 | } | 911 | } |
914 | 912 | ||
915 | lp->stats.tx_bytes += len; | 913 | dev->stats.tx_bytes += len; |
916 | 914 | ||
917 | entry = lp->tx_new; | 915 | entry = lp->tx_new; |
918 | *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len); | 916 | *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len); |
@@ -938,13 +936,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
938 | return 0; | 936 | return 0; |
939 | } | 937 | } |
940 | 938 | ||
941 | static struct net_device_stats *lance_get_stats(struct net_device *dev) | ||
942 | { | ||
943 | struct lance_private *lp = netdev_priv(dev); | ||
944 | |||
945 | return &lp->stats; | ||
946 | } | ||
947 | |||
948 | static void lance_load_multicast(struct net_device *dev) | 939 | static void lance_load_multicast(struct net_device *dev) |
949 | { | 940 | { |
950 | struct lance_private *lp = netdev_priv(dev); | 941 | struct lance_private *lp = netdev_priv(dev); |
@@ -1244,7 +1235,6 @@ static int __init dec_lance_probe(struct device *bdev, const int type) | |||
1244 | dev->hard_start_xmit = &lance_start_xmit; | 1235 | dev->hard_start_xmit = &lance_start_xmit; |
1245 | dev->tx_timeout = &lance_tx_timeout; | 1236 | dev->tx_timeout = &lance_tx_timeout; |
1246 | dev->watchdog_timeo = 5*HZ; | 1237 | dev->watchdog_timeo = 5*HZ; |
1247 | dev->get_stats = &lance_get_stats; | ||
1248 | dev->set_multicast_list = &lance_set_multicast; | 1238 | dev->set_multicast_list = &lance_set_multicast; |
1249 | 1239 | ||
1250 | /* lp->ll is the location of the registers for lance card */ | 1240 | /* lp->ll is the location of the registers for lance card */ |
diff --git a/drivers/net/depca.c b/drivers/net/depca.c index 183497020bfc..28fa2bdc8c79 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c | |||
@@ -485,7 +485,6 @@ struct depca_private { | |||
485 | /* Kernel-only (not device) fields */ | 485 | /* Kernel-only (not device) fields */ |
486 | int rx_new, tx_new; /* The next free ring entry */ | 486 | int rx_new, tx_new; /* The next free ring entry */ |
487 | int rx_old, tx_old; /* The ring entries to be free()ed. */ | 487 | int rx_old, tx_old; /* The ring entries to be free()ed. */ |
488 | struct net_device_stats stats; | ||
489 | spinlock_t lock; | 488 | spinlock_t lock; |
490 | struct { /* Private stats counters */ | 489 | struct { /* Private stats counters */ |
491 | u32 bins[DEPCA_PKT_STAT_SZ]; | 490 | u32 bins[DEPCA_PKT_STAT_SZ]; |
@@ -522,7 +521,6 @@ static irqreturn_t depca_interrupt(int irq, void *dev_id); | |||
522 | static int depca_close(struct net_device *dev); | 521 | static int depca_close(struct net_device *dev); |
523 | static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 522 | static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
524 | static void depca_tx_timeout(struct net_device *dev); | 523 | static void depca_tx_timeout(struct net_device *dev); |
525 | static struct net_device_stats *depca_get_stats(struct net_device *dev); | ||
526 | static void set_multicast_list(struct net_device *dev); | 524 | static void set_multicast_list(struct net_device *dev); |
527 | 525 | ||
528 | /* | 526 | /* |
@@ -801,7 +799,6 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) | |||
801 | dev->open = &depca_open; | 799 | dev->open = &depca_open; |
802 | dev->hard_start_xmit = &depca_start_xmit; | 800 | dev->hard_start_xmit = &depca_start_xmit; |
803 | dev->stop = &depca_close; | 801 | dev->stop = &depca_close; |
804 | dev->get_stats = &depca_get_stats; | ||
805 | dev->set_multicast_list = &set_multicast_list; | 802 | dev->set_multicast_list = &set_multicast_list; |
806 | dev->do_ioctl = &depca_ioctl; | 803 | dev->do_ioctl = &depca_ioctl; |
807 | dev->tx_timeout = depca_tx_timeout; | 804 | dev->tx_timeout = depca_tx_timeout; |
@@ -1026,15 +1023,15 @@ static int depca_rx(struct net_device *dev) | |||
1026 | } | 1023 | } |
1027 | if (status & R_ENP) { /* Valid frame status */ | 1024 | if (status & R_ENP) { /* Valid frame status */ |
1028 | if (status & R_ERR) { /* There was an error. */ | 1025 | if (status & R_ERR) { /* There was an error. */ |
1029 | lp->stats.rx_errors++; /* Update the error stats. */ | 1026 | dev->stats.rx_errors++; /* Update the error stats. */ |
1030 | if (status & R_FRAM) | 1027 | if (status & R_FRAM) |
1031 | lp->stats.rx_frame_errors++; | 1028 | dev->stats.rx_frame_errors++; |
1032 | if (status & R_OFLO) | 1029 | if (status & R_OFLO) |
1033 | lp->stats.rx_over_errors++; | 1030 | dev->stats.rx_over_errors++; |
1034 | if (status & R_CRC) | 1031 | if (status & R_CRC) |
1035 | lp->stats.rx_crc_errors++; | 1032 | dev->stats.rx_crc_errors++; |
1036 | if (status & R_BUFF) | 1033 | if (status & R_BUFF) |
1037 | lp->stats.rx_fifo_errors++; | 1034 | dev->stats.rx_fifo_errors++; |
1038 | } else { | 1035 | } else { |
1039 | short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4; | 1036 | short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4; |
1040 | struct sk_buff *skb; | 1037 | struct sk_buff *skb; |
@@ -1063,8 +1060,8 @@ static int depca_rx(struct net_device *dev) | |||
1063 | ** Update stats | 1060 | ** Update stats |
1064 | */ | 1061 | */ |
1065 | dev->last_rx = jiffies; | 1062 | dev->last_rx = jiffies; |
1066 | lp->stats.rx_packets++; | 1063 | dev->stats.rx_packets++; |
1067 | lp->stats.rx_bytes += pkt_len; | 1064 | dev->stats.rx_bytes += pkt_len; |
1068 | for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) { | 1065 | for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) { |
1069 | if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) { | 1066 | if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) { |
1070 | lp->pktStats.bins[i]++; | 1067 | lp->pktStats.bins[i]++; |
@@ -1087,7 +1084,7 @@ static int depca_rx(struct net_device *dev) | |||
1087 | } | 1084 | } |
1088 | } else { | 1085 | } else { |
1089 | printk("%s: Memory squeeze, deferring packet.\n", dev->name); | 1086 | printk("%s: Memory squeeze, deferring packet.\n", dev->name); |
1090 | lp->stats.rx_dropped++; /* Really, deferred. */ | 1087 | dev->stats.rx_dropped++; /* Really, deferred. */ |
1091 | break; | 1088 | break; |
1092 | } | 1089 | } |
1093 | } | 1090 | } |
@@ -1125,24 +1122,24 @@ static int depca_tx(struct net_device *dev) | |||
1125 | break; | 1122 | break; |
1126 | } else if (status & T_ERR) { /* An error occurred. */ | 1123 | } else if (status & T_ERR) { /* An error occurred. */ |
1127 | status = readl(&lp->tx_ring[entry].misc); | 1124 | status = readl(&lp->tx_ring[entry].misc); |
1128 | lp->stats.tx_errors++; | 1125 | dev->stats.tx_errors++; |
1129 | if (status & TMD3_RTRY) | 1126 | if (status & TMD3_RTRY) |
1130 | lp->stats.tx_aborted_errors++; | 1127 | dev->stats.tx_aborted_errors++; |
1131 | if (status & TMD3_LCAR) | 1128 | if (status & TMD3_LCAR) |
1132 | lp->stats.tx_carrier_errors++; | 1129 | dev->stats.tx_carrier_errors++; |
1133 | if (status & TMD3_LCOL) | 1130 | if (status & TMD3_LCOL) |
1134 | lp->stats.tx_window_errors++; | 1131 | dev->stats.tx_window_errors++; |
1135 | if (status & TMD3_UFLO) | 1132 | if (status & TMD3_UFLO) |
1136 | lp->stats.tx_fifo_errors++; | 1133 | dev->stats.tx_fifo_errors++; |
1137 | if (status & (TMD3_BUFF | TMD3_UFLO)) { | 1134 | if (status & (TMD3_BUFF | TMD3_UFLO)) { |
1138 | /* Trigger an immediate send demand. */ | 1135 | /* Trigger an immediate send demand. */ |
1139 | outw(CSR0, DEPCA_ADDR); | 1136 | outw(CSR0, DEPCA_ADDR); |
1140 | outw(INEA | TDMD, DEPCA_DATA); | 1137 | outw(INEA | TDMD, DEPCA_DATA); |
1141 | } | 1138 | } |
1142 | } else if (status & (T_MORE | T_ONE)) { | 1139 | } else if (status & (T_MORE | T_ONE)) { |
1143 | lp->stats.collisions++; | 1140 | dev->stats.collisions++; |
1144 | } else { | 1141 | } else { |
1145 | lp->stats.tx_packets++; | 1142 | dev->stats.tx_packets++; |
1146 | } | 1143 | } |
1147 | 1144 | ||
1148 | /* Update all the pointers */ | 1145 | /* Update all the pointers */ |
@@ -1234,15 +1231,6 @@ static int InitRestartDepca(struct net_device *dev) | |||
1234 | return status; | 1231 | return status; |
1235 | } | 1232 | } |
1236 | 1233 | ||
1237 | static struct net_device_stats *depca_get_stats(struct net_device *dev) | ||
1238 | { | ||
1239 | struct depca_private *lp = (struct depca_private *) dev->priv; | ||
1240 | |||
1241 | /* Null body since there is no framing error counter */ | ||
1242 | |||
1243 | return &lp->stats; | ||
1244 | } | ||
1245 | |||
1246 | /* | 1234 | /* |
1247 | ** Set or clear the multicast filter for this adaptor. | 1235 | ** Set or clear the multicast filter for this adaptor. |
1248 | */ | 1236 | */ |
diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c index ddedb76303d5..a9ef79da3dc7 100644 --- a/drivers/net/dgrs.c +++ b/drivers/net/dgrs.c | |||
@@ -194,11 +194,6 @@ static int dgrs_nicmode; | |||
194 | typedef struct | 194 | typedef struct |
195 | { | 195 | { |
196 | /* | 196 | /* |
197 | * Stuff for generic ethercard I/F | ||
198 | */ | ||
199 | struct net_device_stats stats; | ||
200 | |||
201 | /* | ||
202 | * DGRS specific data | 197 | * DGRS specific data |
203 | */ | 198 | */ |
204 | char *vmem; | 199 | char *vmem; |
@@ -499,7 +494,7 @@ dgrs_rcv_frame( | |||
499 | if ((skb = dev_alloc_skb(len+5)) == NULL) | 494 | if ((skb = dev_alloc_skb(len+5)) == NULL) |
500 | { | 495 | { |
501 | printk("%s: dev_alloc_skb failed for rcv buffer\n", devN->name); | 496 | printk("%s: dev_alloc_skb failed for rcv buffer\n", devN->name); |
502 | ++privN->stats.rx_dropped; | 497 | ++dev0->stats.rx_dropped; |
503 | /* discarding the frame */ | 498 | /* discarding the frame */ |
504 | goto out; | 499 | goto out; |
505 | } | 500 | } |
@@ -667,8 +662,8 @@ again: | |||
667 | skb->protocol = eth_type_trans(skb, devN); | 662 | skb->protocol = eth_type_trans(skb, devN); |
668 | netif_rx(skb); | 663 | netif_rx(skb); |
669 | devN->last_rx = jiffies; | 664 | devN->last_rx = jiffies; |
670 | ++privN->stats.rx_packets; | 665 | ++devN->stats.rx_packets; |
671 | privN->stats.rx_bytes += len; | 666 | devN->stats.rx_bytes += len; |
672 | 667 | ||
673 | out: | 668 | out: |
674 | cbp->xmit.status = I596_CB_STATUS_C | I596_CB_STATUS_OK; | 669 | cbp->xmit.status = I596_CB_STATUS_C | I596_CB_STATUS_OK; |
@@ -776,7 +771,7 @@ frame_done: | |||
776 | priv0->rfdp->status = I596_RFD_C | I596_RFD_OK; | 771 | priv0->rfdp->status = I596_RFD_C | I596_RFD_OK; |
777 | priv0->rfdp = (I596_RFD *) S2H(priv0->rfdp->next); | 772 | priv0->rfdp = (I596_RFD *) S2H(priv0->rfdp->next); |
778 | 773 | ||
779 | ++privN->stats.tx_packets; | 774 | ++devN->stats.tx_packets; |
780 | 775 | ||
781 | dev_kfree_skb (skb); | 776 | dev_kfree_skb (skb); |
782 | return (0); | 777 | return (0); |
@@ -806,16 +801,6 @@ static int dgrs_close( struct net_device *dev ) | |||
806 | } | 801 | } |
807 | 802 | ||
808 | /* | 803 | /* |
809 | * Get statistics | ||
810 | */ | ||
811 | static struct net_device_stats *dgrs_get_stats( struct net_device *dev ) | ||
812 | { | ||
813 | DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv; | ||
814 | |||
815 | return (&priv->stats); | ||
816 | } | ||
817 | |||
818 | /* | ||
819 | * Set multicast list and/or promiscuous mode | 804 | * Set multicast list and/or promiscuous mode |
820 | */ | 805 | */ |
821 | 806 | ||
@@ -1213,7 +1198,6 @@ dgrs_probe1(struct net_device *dev) | |||
1213 | */ | 1198 | */ |
1214 | dev->open = &dgrs_open; | 1199 | dev->open = &dgrs_open; |
1215 | dev->stop = &dgrs_close; | 1200 | dev->stop = &dgrs_close; |
1216 | dev->get_stats = &dgrs_get_stats; | ||
1217 | dev->hard_start_xmit = &dgrs_start_xmit; | 1201 | dev->hard_start_xmit = &dgrs_start_xmit; |
1218 | dev->set_multicast_list = &dgrs_set_multicast_list; | 1202 | dev->set_multicast_list = &dgrs_set_multicast_list; |
1219 | dev->do_ioctl = &dgrs_ioctl; | 1203 | dev->do_ioctl = &dgrs_ioctl; |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 857eb366bb11..f691ef61b2d3 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -148,7 +148,6 @@ typedef struct board_info { | |||
148 | struct resource *irq_res; | 148 | struct resource *irq_res; |
149 | 149 | ||
150 | struct timer_list timer; | 150 | struct timer_list timer; |
151 | struct net_device_stats stats; | ||
152 | unsigned char srom[128]; | 151 | unsigned char srom[128]; |
153 | spinlock_t lock; | 152 | spinlock_t lock; |
154 | 153 | ||
@@ -166,8 +165,6 @@ static int dm9000_stop(struct net_device *); | |||
166 | static void dm9000_timer(unsigned long); | 165 | static void dm9000_timer(unsigned long); |
167 | static void dm9000_init_dm9000(struct net_device *); | 166 | static void dm9000_init_dm9000(struct net_device *); |
168 | 167 | ||
169 | static struct net_device_stats *dm9000_get_stats(struct net_device *); | ||
170 | |||
171 | static irqreturn_t dm9000_interrupt(int, void *); | 168 | static irqreturn_t dm9000_interrupt(int, void *); |
172 | 169 | ||
173 | static int dm9000_phy_read(struct net_device *dev, int phyaddr_unsused, int reg); | 170 | static int dm9000_phy_read(struct net_device *dev, int phyaddr_unsused, int reg); |
@@ -558,7 +555,6 @@ dm9000_probe(struct platform_device *pdev) | |||
558 | ndev->tx_timeout = &dm9000_timeout; | 555 | ndev->tx_timeout = &dm9000_timeout; |
559 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); | 556 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); |
560 | ndev->stop = &dm9000_stop; | 557 | ndev->stop = &dm9000_stop; |
561 | ndev->get_stats = &dm9000_get_stats; | ||
562 | ndev->set_multicast_list = &dm9000_hash_table; | 558 | ndev->set_multicast_list = &dm9000_hash_table; |
563 | #ifdef CONFIG_NET_POLL_CONTROLLER | 559 | #ifdef CONFIG_NET_POLL_CONTROLLER |
564 | ndev->poll_controller = &dm9000_poll_controller; | 560 | ndev->poll_controller = &dm9000_poll_controller; |
@@ -713,7 +709,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
713 | writeb(DM9000_MWCMD, db->io_addr); | 709 | writeb(DM9000_MWCMD, db->io_addr); |
714 | 710 | ||
715 | (db->outblk)(db->io_data, skb->data, skb->len); | 711 | (db->outblk)(db->io_data, skb->data, skb->len); |
716 | db->stats.tx_bytes += skb->len; | 712 | dev->stats.tx_bytes += skb->len; |
717 | 713 | ||
718 | db->tx_pkt_cnt++; | 714 | db->tx_pkt_cnt++; |
719 | /* TX control: First packet immediately send, second packet queue */ | 715 | /* TX control: First packet immediately send, second packet queue */ |
@@ -790,7 +786,7 @@ dm9000_tx_done(struct net_device *dev, board_info_t * db) | |||
790 | if (tx_status & (NSR_TX2END | NSR_TX1END)) { | 786 | if (tx_status & (NSR_TX2END | NSR_TX1END)) { |
791 | /* One packet sent complete */ | 787 | /* One packet sent complete */ |
792 | db->tx_pkt_cnt--; | 788 | db->tx_pkt_cnt--; |
793 | db->stats.tx_packets++; | 789 | dev->stats.tx_packets++; |
794 | 790 | ||
795 | /* Queue packet check & send */ | 791 | /* Queue packet check & send */ |
796 | if (db->tx_pkt_cnt > 0) { | 792 | if (db->tx_pkt_cnt > 0) { |
@@ -852,17 +848,6 @@ dm9000_interrupt(int irq, void *dev_id) | |||
852 | } | 848 | } |
853 | 849 | ||
854 | /* | 850 | /* |
855 | * Get statistics from driver. | ||
856 | */ | ||
857 | static struct net_device_stats * | ||
858 | dm9000_get_stats(struct net_device *dev) | ||
859 | { | ||
860 | board_info_t *db = (board_info_t *) dev->priv; | ||
861 | return &db->stats; | ||
862 | } | ||
863 | |||
864 | |||
865 | /* | ||
866 | * A periodic timer routine | 851 | * A periodic timer routine |
867 | * Dynamic media sense, allocated Rx buffer... | 852 | * Dynamic media sense, allocated Rx buffer... |
868 | */ | 853 | */ |
@@ -939,15 +924,15 @@ dm9000_rx(struct net_device *dev) | |||
939 | GoodPacket = false; | 924 | GoodPacket = false; |
940 | if (rxhdr.RxStatus & 0x100) { | 925 | if (rxhdr.RxStatus & 0x100) { |
941 | PRINTK1("fifo error\n"); | 926 | PRINTK1("fifo error\n"); |
942 | db->stats.rx_fifo_errors++; | 927 | dev->stats.rx_fifo_errors++; |
943 | } | 928 | } |
944 | if (rxhdr.RxStatus & 0x200) { | 929 | if (rxhdr.RxStatus & 0x200) { |
945 | PRINTK1("crc error\n"); | 930 | PRINTK1("crc error\n"); |
946 | db->stats.rx_crc_errors++; | 931 | dev->stats.rx_crc_errors++; |
947 | } | 932 | } |
948 | if (rxhdr.RxStatus & 0x8000) { | 933 | if (rxhdr.RxStatus & 0x8000) { |
949 | PRINTK1("length error\n"); | 934 | PRINTK1("length error\n"); |
950 | db->stats.rx_length_errors++; | 935 | dev->stats.rx_length_errors++; |
951 | } | 936 | } |
952 | } | 937 | } |
953 | 938 | ||
@@ -960,12 +945,12 @@ dm9000_rx(struct net_device *dev) | |||
960 | /* Read received packet from RX SRAM */ | 945 | /* Read received packet from RX SRAM */ |
961 | 946 | ||
962 | (db->inblk)(db->io_data, rdptr, RxLen); | 947 | (db->inblk)(db->io_data, rdptr, RxLen); |
963 | db->stats.rx_bytes += RxLen; | 948 | dev->stats.rx_bytes += RxLen; |
964 | 949 | ||
965 | /* Pass to upper layer */ | 950 | /* Pass to upper layer */ |
966 | skb->protocol = eth_type_trans(skb, dev); | 951 | skb->protocol = eth_type_trans(skb, dev); |
967 | netif_rx(skb); | 952 | netif_rx(skb); |
968 | db->stats.rx_packets++; | 953 | dev->stats.rx_packets++; |
969 | 954 | ||
970 | } else { | 955 | } else { |
971 | /* need to dump the packet's data */ | 956 | /* need to dump the packet's data */ |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index f9aa13e04ada..99126564f1a0 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -558,7 +558,6 @@ struct nic { | |||
558 | enum mac mac; | 558 | enum mac mac; |
559 | enum phy phy; | 559 | enum phy phy; |
560 | struct params params; | 560 | struct params params; |
561 | struct net_device_stats net_stats; | ||
562 | struct timer_list watchdog; | 561 | struct timer_list watchdog; |
563 | struct timer_list blink_timer; | 562 | struct timer_list blink_timer; |
564 | struct mii_if_info mii; | 563 | struct mii_if_info mii; |
@@ -1483,7 +1482,8 @@ static void e100_set_multicast_list(struct net_device *netdev) | |||
1483 | 1482 | ||
1484 | static void e100_update_stats(struct nic *nic) | 1483 | static void e100_update_stats(struct nic *nic) |
1485 | { | 1484 | { |
1486 | struct net_device_stats *ns = &nic->net_stats; | 1485 | struct net_device *dev = nic->netdev; |
1486 | struct net_device_stats *ns = &dev->stats; | ||
1487 | struct stats *s = &nic->mem->stats; | 1487 | struct stats *s = &nic->mem->stats; |
1488 | u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : | 1488 | u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : |
1489 | (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames : | 1489 | (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames : |
@@ -1661,6 +1661,7 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1661 | 1661 | ||
1662 | static int e100_tx_clean(struct nic *nic) | 1662 | static int e100_tx_clean(struct nic *nic) |
1663 | { | 1663 | { |
1664 | struct net_device *dev = nic->netdev; | ||
1664 | struct cb *cb; | 1665 | struct cb *cb; |
1665 | int tx_cleaned = 0; | 1666 | int tx_cleaned = 0; |
1666 | 1667 | ||
@@ -1675,8 +1676,8 @@ static int e100_tx_clean(struct nic *nic) | |||
1675 | cb->status); | 1676 | cb->status); |
1676 | 1677 | ||
1677 | if(likely(cb->skb != NULL)) { | 1678 | if(likely(cb->skb != NULL)) { |
1678 | nic->net_stats.tx_packets++; | 1679 | dev->stats.tx_packets++; |
1679 | nic->net_stats.tx_bytes += cb->skb->len; | 1680 | dev->stats.tx_bytes += cb->skb->len; |
1680 | 1681 | ||
1681 | pci_unmap_single(nic->pdev, | 1682 | pci_unmap_single(nic->pdev, |
1682 | le32_to_cpu(cb->u.tcb.tbd.buf_addr), | 1683 | le32_to_cpu(cb->u.tcb.tbd.buf_addr), |
@@ -1807,6 +1808,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1807 | static int e100_rx_indicate(struct nic *nic, struct rx *rx, | 1808 | static int e100_rx_indicate(struct nic *nic, struct rx *rx, |
1808 | unsigned int *work_done, unsigned int work_to_do) | 1809 | unsigned int *work_done, unsigned int work_to_do) |
1809 | { | 1810 | { |
1811 | struct net_device *dev = nic->netdev; | ||
1810 | struct sk_buff *skb = rx->skb; | 1812 | struct sk_buff *skb = rx->skb; |
1811 | struct rfd *rfd = (struct rfd *)skb->data; | 1813 | struct rfd *rfd = (struct rfd *)skb->data; |
1812 | u16 rfd_status, actual_size; | 1814 | u16 rfd_status, actual_size; |
@@ -1851,8 +1853,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, | |||
1851 | nic->rx_over_length_errors++; | 1853 | nic->rx_over_length_errors++; |
1852 | dev_kfree_skb_any(skb); | 1854 | dev_kfree_skb_any(skb); |
1853 | } else { | 1855 | } else { |
1854 | nic->net_stats.rx_packets++; | 1856 | dev->stats.rx_packets++; |
1855 | nic->net_stats.rx_bytes += actual_size; | 1857 | dev->stats.rx_bytes += actual_size; |
1856 | nic->netdev->last_rx = jiffies; | 1858 | nic->netdev->last_rx = jiffies; |
1857 | netif_receive_skb(skb); | 1859 | netif_receive_skb(skb); |
1858 | if(work_done) | 1860 | if(work_done) |
@@ -2015,12 +2017,6 @@ static void e100_netpoll(struct net_device *netdev) | |||
2015 | } | 2017 | } |
2016 | #endif | 2018 | #endif |
2017 | 2019 | ||
2018 | static struct net_device_stats *e100_get_stats(struct net_device *netdev) | ||
2019 | { | ||
2020 | struct nic *nic = netdev_priv(netdev); | ||
2021 | return &nic->net_stats; | ||
2022 | } | ||
2023 | |||
2024 | static int e100_set_mac_address(struct net_device *netdev, void *p) | 2020 | static int e100_set_mac_address(struct net_device *netdev, void *p) |
2025 | { | 2021 | { |
2026 | struct nic *nic = netdev_priv(netdev); | 2022 | struct nic *nic = netdev_priv(netdev); |
@@ -2457,7 +2453,7 @@ static void e100_get_ethtool_stats(struct net_device *netdev, | |||
2457 | int i; | 2453 | int i; |
2458 | 2454 | ||
2459 | for(i = 0; i < E100_NET_STATS_LEN; i++) | 2455 | for(i = 0; i < E100_NET_STATS_LEN; i++) |
2460 | data[i] = ((unsigned long *)&nic->net_stats)[i]; | 2456 | data[i] = ((unsigned long *)&netdev->stats)[i]; |
2461 | 2457 | ||
2462 | data[i++] = nic->tx_deferred; | 2458 | data[i++] = nic->tx_deferred; |
2463 | data[i++] = nic->tx_single_collisions; | 2459 | data[i++] = nic->tx_single_collisions; |
@@ -2562,7 +2558,6 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2562 | netdev->open = e100_open; | 2558 | netdev->open = e100_open; |
2563 | netdev->stop = e100_close; | 2559 | netdev->stop = e100_close; |
2564 | netdev->hard_start_xmit = e100_xmit_frame; | 2560 | netdev->hard_start_xmit = e100_xmit_frame; |
2565 | netdev->get_stats = e100_get_stats; | ||
2566 | netdev->set_multicast_list = e100_set_multicast_list; | 2561 | netdev->set_multicast_list = e100_set_multicast_list; |
2567 | netdev->set_mac_address = e100_set_mac_address; | 2562 | netdev->set_mac_address = e100_set_mac_address; |
2568 | netdev->change_mtu = e100_change_mtu; | 2563 | netdev->change_mtu = e100_change_mtu; |
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 6eb84f14c88d..54811f6f766d 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c | |||
@@ -192,7 +192,6 @@ static unsigned int net_debug = NET_DEBUG; | |||
192 | 192 | ||
193 | /* Information that need to be kept for each board. */ | 193 | /* Information that need to be kept for each board. */ |
194 | struct eepro_local { | 194 | struct eepro_local { |
195 | struct net_device_stats stats; | ||
196 | unsigned rx_start; | 195 | unsigned rx_start; |
197 | unsigned tx_start; /* start of the transmit chain */ | 196 | unsigned tx_start; /* start of the transmit chain */ |
198 | int tx_last; /* pointer to last packet in the transmit chain */ | 197 | int tx_last; /* pointer to last packet in the transmit chain */ |
@@ -315,7 +314,6 @@ static irqreturn_t eepro_interrupt(int irq, void *dev_id); | |||
315 | static void eepro_rx(struct net_device *dev); | 314 | static void eepro_rx(struct net_device *dev); |
316 | static void eepro_transmit_interrupt(struct net_device *dev); | 315 | static void eepro_transmit_interrupt(struct net_device *dev); |
317 | static int eepro_close(struct net_device *dev); | 316 | static int eepro_close(struct net_device *dev); |
318 | static struct net_device_stats *eepro_get_stats(struct net_device *dev); | ||
319 | static void set_multicast_list(struct net_device *dev); | 317 | static void set_multicast_list(struct net_device *dev); |
320 | static void eepro_tx_timeout (struct net_device *dev); | 318 | static void eepro_tx_timeout (struct net_device *dev); |
321 | 319 | ||
@@ -514,7 +512,7 @@ buffer (transmit-buffer = 32K - receive-buffer). | |||
514 | 512 | ||
515 | /* a complete sel reset */ | 513 | /* a complete sel reset */ |
516 | #define eepro_complete_selreset(ioaddr) { \ | 514 | #define eepro_complete_selreset(ioaddr) { \ |
517 | lp->stats.tx_errors++;\ | 515 | dev->stats.tx_errors++;\ |
518 | eepro_sel_reset(ioaddr);\ | 516 | eepro_sel_reset(ioaddr);\ |
519 | lp->tx_end = \ | 517 | lp->tx_end = \ |
520 | lp->xmt_lower_limit;\ | 518 | lp->xmt_lower_limit;\ |
@@ -856,7 +854,6 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe) | |||
856 | dev->open = eepro_open; | 854 | dev->open = eepro_open; |
857 | dev->stop = eepro_close; | 855 | dev->stop = eepro_close; |
858 | dev->hard_start_xmit = eepro_send_packet; | 856 | dev->hard_start_xmit = eepro_send_packet; |
859 | dev->get_stats = eepro_get_stats; | ||
860 | dev->set_multicast_list = &set_multicast_list; | 857 | dev->set_multicast_list = &set_multicast_list; |
861 | dev->tx_timeout = eepro_tx_timeout; | 858 | dev->tx_timeout = eepro_tx_timeout; |
862 | dev->watchdog_timeo = TX_TIMEOUT; | 859 | dev->watchdog_timeo = TX_TIMEOUT; |
@@ -1154,9 +1151,9 @@ static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1154 | 1151 | ||
1155 | if (hardware_send_packet(dev, buf, length)) | 1152 | if (hardware_send_packet(dev, buf, length)) |
1156 | /* we won't wake queue here because we're out of space */ | 1153 | /* we won't wake queue here because we're out of space */ |
1157 | lp->stats.tx_dropped++; | 1154 | dev->stats.tx_dropped++; |
1158 | else { | 1155 | else { |
1159 | lp->stats.tx_bytes+=skb->len; | 1156 | dev->stats.tx_bytes+=skb->len; |
1160 | dev->trans_start = jiffies; | 1157 | dev->trans_start = jiffies; |
1161 | netif_wake_queue(dev); | 1158 | netif_wake_queue(dev); |
1162 | } | 1159 | } |
@@ -1166,7 +1163,7 @@ static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1166 | dev_kfree_skb (skb); | 1163 | dev_kfree_skb (skb); |
1167 | 1164 | ||
1168 | /* You might need to clean up and record Tx statistics here. */ | 1165 | /* You might need to clean up and record Tx statistics here. */ |
1169 | /* lp->stats.tx_aborted_errors++; */ | 1166 | /* dev->stats.tx_aborted_errors++; */ |
1170 | 1167 | ||
1171 | if (net_debug > 5) | 1168 | if (net_debug > 5) |
1172 | printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name); | 1169 | printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name); |
@@ -1273,16 +1270,6 @@ static int eepro_close(struct net_device *dev) | |||
1273 | return 0; | 1270 | return 0; |
1274 | } | 1271 | } |
1275 | 1272 | ||
1276 | /* Get the current statistics. This may be called with the card open or | ||
1277 | closed. */ | ||
1278 | static struct net_device_stats * | ||
1279 | eepro_get_stats(struct net_device *dev) | ||
1280 | { | ||
1281 | struct eepro_local *lp = netdev_priv(dev); | ||
1282 | |||
1283 | return &lp->stats; | ||
1284 | } | ||
1285 | |||
1286 | /* Set or clear the multicast filter for this adaptor. | 1273 | /* Set or clear the multicast filter for this adaptor. |
1287 | */ | 1274 | */ |
1288 | static void | 1275 | static void |
@@ -1575,12 +1562,12 @@ eepro_rx(struct net_device *dev) | |||
1575 | /* Malloc up new buffer. */ | 1562 | /* Malloc up new buffer. */ |
1576 | struct sk_buff *skb; | 1563 | struct sk_buff *skb; |
1577 | 1564 | ||
1578 | lp->stats.rx_bytes+=rcv_size; | 1565 | dev->stats.rx_bytes+=rcv_size; |
1579 | rcv_size &= 0x3fff; | 1566 | rcv_size &= 0x3fff; |
1580 | skb = dev_alloc_skb(rcv_size+5); | 1567 | skb = dev_alloc_skb(rcv_size+5); |
1581 | if (skb == NULL) { | 1568 | if (skb == NULL) { |
1582 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); | 1569 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); |
1583 | lp->stats.rx_dropped++; | 1570 | dev->stats.rx_dropped++; |
1584 | rcv_car = lp->rx_start + RCV_HEADER + rcv_size; | 1571 | rcv_car = lp->rx_start + RCV_HEADER + rcv_size; |
1585 | lp->rx_start = rcv_next_frame; | 1572 | lp->rx_start = rcv_next_frame; |
1586 | outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); | 1573 | outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); |
@@ -1602,28 +1589,28 @@ eepro_rx(struct net_device *dev) | |||
1602 | skb->protocol = eth_type_trans(skb,dev); | 1589 | skb->protocol = eth_type_trans(skb,dev); |
1603 | netif_rx(skb); | 1590 | netif_rx(skb); |
1604 | dev->last_rx = jiffies; | 1591 | dev->last_rx = jiffies; |
1605 | lp->stats.rx_packets++; | 1592 | dev->stats.rx_packets++; |
1606 | } | 1593 | } |
1607 | 1594 | ||
1608 | else { /* Not sure will ever reach here, | 1595 | else { /* Not sure will ever reach here, |
1609 | I set the 595 to discard bad received frames */ | 1596 | I set the 595 to discard bad received frames */ |
1610 | lp->stats.rx_errors++; | 1597 | dev->stats.rx_errors++; |
1611 | 1598 | ||
1612 | if (rcv_status & 0x0100) | 1599 | if (rcv_status & 0x0100) |
1613 | lp->stats.rx_over_errors++; | 1600 | dev->stats.rx_over_errors++; |
1614 | 1601 | ||
1615 | else if (rcv_status & 0x0400) | 1602 | else if (rcv_status & 0x0400) |
1616 | lp->stats.rx_frame_errors++; | 1603 | dev->stats.rx_frame_errors++; |
1617 | 1604 | ||
1618 | else if (rcv_status & 0x0800) | 1605 | else if (rcv_status & 0x0800) |
1619 | lp->stats.rx_crc_errors++; | 1606 | dev->stats.rx_crc_errors++; |
1620 | 1607 | ||
1621 | printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", | 1608 | printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", |
1622 | dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); | 1609 | dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); |
1623 | } | 1610 | } |
1624 | 1611 | ||
1625 | if (rcv_status & 0x1000) | 1612 | if (rcv_status & 0x1000) |
1626 | lp->stats.rx_length_errors++; | 1613 | dev->stats.rx_length_errors++; |
1627 | 1614 | ||
1628 | rcv_car = lp->rx_start + RCV_HEADER + rcv_size; | 1615 | rcv_car = lp->rx_start + RCV_HEADER + rcv_size; |
1629 | lp->rx_start = rcv_next_frame; | 1616 | lp->rx_start = rcv_next_frame; |
@@ -1666,11 +1653,11 @@ eepro_transmit_interrupt(struct net_device *dev) | |||
1666 | netif_wake_queue (dev); | 1653 | netif_wake_queue (dev); |
1667 | 1654 | ||
1668 | if (xmt_status & TX_OK) | 1655 | if (xmt_status & TX_OK) |
1669 | lp->stats.tx_packets++; | 1656 | dev->stats.tx_packets++; |
1670 | else { | 1657 | else { |
1671 | lp->stats.tx_errors++; | 1658 | dev->stats.tx_errors++; |
1672 | if (xmt_status & 0x0400) { | 1659 | if (xmt_status & 0x0400) { |
1673 | lp->stats.tx_carrier_errors++; | 1660 | dev->stats.tx_carrier_errors++; |
1674 | printk(KERN_DEBUG "%s: carrier error\n", | 1661 | printk(KERN_DEBUG "%s: carrier error\n", |
1675 | dev->name); | 1662 | dev->name); |
1676 | printk(KERN_DEBUG "%s: XMT status = %#x\n", | 1663 | printk(KERN_DEBUG "%s: XMT status = %#x\n", |
@@ -1684,11 +1671,11 @@ eepro_transmit_interrupt(struct net_device *dev) | |||
1684 | } | 1671 | } |
1685 | } | 1672 | } |
1686 | if (xmt_status & 0x000f) { | 1673 | if (xmt_status & 0x000f) { |
1687 | lp->stats.collisions += (xmt_status & 0x000f); | 1674 | dev->stats.collisions += (xmt_status & 0x000f); |
1688 | } | 1675 | } |
1689 | 1676 | ||
1690 | if ((xmt_status & 0x0040) == 0x0) { | 1677 | if ((xmt_status & 0x0040) == 0x0) { |
1691 | lp->stats.tx_heartbeat_errors++; | 1678 | dev->stats.tx_heartbeat_errors++; |
1692 | } | 1679 | } |
1693 | } | 1680 | } |
1694 | } | 1681 | } |
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 6c91bfa72bb2..9c85e50014b4 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c | |||
@@ -135,7 +135,6 @@ | |||
135 | 135 | ||
136 | struct net_local | 136 | struct net_local |
137 | { | 137 | { |
138 | struct net_device_stats stats; | ||
139 | unsigned long last_tx; /* jiffies when last transmit started */ | 138 | unsigned long last_tx; /* jiffies when last transmit started */ |
140 | unsigned long init_time; /* jiffies when eexp_hw_init586 called */ | 139 | unsigned long init_time; /* jiffies when eexp_hw_init586 called */ |
141 | unsigned short rx_first; /* first rx buf, same as RX_BUF_START */ | 140 | unsigned short rx_first; /* first rx buf, same as RX_BUF_START */ |
@@ -247,7 +246,6 @@ static char mca_irqmap[] = { 12, 9, 3, 4, 5, 10, 11, 15 }; | |||
247 | static int eexp_open(struct net_device *dev); | 246 | static int eexp_open(struct net_device *dev); |
248 | static int eexp_close(struct net_device *dev); | 247 | static int eexp_close(struct net_device *dev); |
249 | static void eexp_timeout(struct net_device *dev); | 248 | static void eexp_timeout(struct net_device *dev); |
250 | static struct net_device_stats *eexp_stats(struct net_device *dev); | ||
251 | static int eexp_xmit(struct sk_buff *buf, struct net_device *dev); | 249 | static int eexp_xmit(struct sk_buff *buf, struct net_device *dev); |
252 | 250 | ||
253 | static irqreturn_t eexp_irq(int irq, void *dev_addr); | 251 | static irqreturn_t eexp_irq(int irq, void *dev_addr); |
@@ -533,17 +531,6 @@ static int eexp_close(struct net_device *dev) | |||
533 | } | 531 | } |
534 | 532 | ||
535 | /* | 533 | /* |
536 | * Return interface stats | ||
537 | */ | ||
538 | |||
539 | static struct net_device_stats *eexp_stats(struct net_device *dev) | ||
540 | { | ||
541 | struct net_local *lp = netdev_priv(dev); | ||
542 | |||
543 | return &lp->stats; | ||
544 | } | ||
545 | |||
546 | /* | ||
547 | * This gets called when a higher level thinks we are broken. Check that | 534 | * This gets called when a higher level thinks we are broken. Check that |
548 | * nothing has become jammed in the CU. | 535 | * nothing has become jammed in the CU. |
549 | */ | 536 | */ |
@@ -646,7 +633,7 @@ static void eexp_timeout(struct net_device *dev) | |||
646 | printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name, | 633 | printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name, |
647 | (SCB_complete(status)?"lost interrupt": | 634 | (SCB_complete(status)?"lost interrupt": |
648 | "board on fire")); | 635 | "board on fire")); |
649 | lp->stats.tx_errors++; | 636 | dev->stats.tx_errors++; |
650 | lp->last_tx = jiffies; | 637 | lp->last_tx = jiffies; |
651 | if (!SCB_complete(status)) { | 638 | if (!SCB_complete(status)) { |
652 | scb_command(dev, SCB_CUabort); | 639 | scb_command(dev, SCB_CUabort); |
@@ -694,7 +681,7 @@ static int eexp_xmit(struct sk_buff *buf, struct net_device *dev) | |||
694 | { | 681 | { |
695 | unsigned short *data = (unsigned short *)buf->data; | 682 | unsigned short *data = (unsigned short *)buf->data; |
696 | 683 | ||
697 | lp->stats.tx_bytes += length; | 684 | dev->stats.tx_bytes += length; |
698 | 685 | ||
699 | eexp_hw_tx_pio(dev,data,length); | 686 | eexp_hw_tx_pio(dev,data,length); |
700 | } | 687 | } |
@@ -843,7 +830,7 @@ static irqreturn_t eexp_irq(int irq, void *dev_info) | |||
843 | outw(rbd+8, ioaddr+READ_PTR); | 830 | outw(rbd+8, ioaddr+READ_PTR); |
844 | printk("[%04x]\n", inw(ioaddr+DATAPORT)); | 831 | printk("[%04x]\n", inw(ioaddr+DATAPORT)); |
845 | #endif | 832 | #endif |
846 | lp->stats.rx_errors++; | 833 | dev->stats.rx_errors++; |
847 | #if 1 | 834 | #if 1 |
848 | eexp_hw_rxinit(dev); | 835 | eexp_hw_rxinit(dev); |
849 | #else | 836 | #else |
@@ -952,17 +939,17 @@ static void eexp_hw_rx_pio(struct net_device *dev) | |||
952 | } | 939 | } |
953 | else if (!FD_OK(status)) | 940 | else if (!FD_OK(status)) |
954 | { | 941 | { |
955 | lp->stats.rx_errors++; | 942 | dev->stats.rx_errors++; |
956 | if (FD_CRC(status)) | 943 | if (FD_CRC(status)) |
957 | lp->stats.rx_crc_errors++; | 944 | dev->stats.rx_crc_errors++; |
958 | if (FD_Align(status)) | 945 | if (FD_Align(status)) |
959 | lp->stats.rx_frame_errors++; | 946 | dev->stats.rx_frame_errors++; |
960 | if (FD_Resrc(status)) | 947 | if (FD_Resrc(status)) |
961 | lp->stats.rx_fifo_errors++; | 948 | dev->stats.rx_fifo_errors++; |
962 | if (FD_DMA(status)) | 949 | if (FD_DMA(status)) |
963 | lp->stats.rx_over_errors++; | 950 | dev->stats.rx_over_errors++; |
964 | if (FD_Short(status)) | 951 | if (FD_Short(status)) |
965 | lp->stats.rx_length_errors++; | 952 | dev->stats.rx_length_errors++; |
966 | } | 953 | } |
967 | else | 954 | else |
968 | { | 955 | { |
@@ -972,7 +959,7 @@ static void eexp_hw_rx_pio(struct net_device *dev) | |||
972 | if (skb == NULL) | 959 | if (skb == NULL) |
973 | { | 960 | { |
974 | printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name); | 961 | printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name); |
975 | lp->stats.rx_dropped++; | 962 | dev->stats.rx_dropped++; |
976 | break; | 963 | break; |
977 | } | 964 | } |
978 | skb_reserve(skb, 2); | 965 | skb_reserve(skb, 2); |
@@ -981,8 +968,8 @@ static void eexp_hw_rx_pio(struct net_device *dev) | |||
981 | skb->protocol = eth_type_trans(skb,dev); | 968 | skb->protocol = eth_type_trans(skb,dev); |
982 | netif_rx(skb); | 969 | netif_rx(skb); |
983 | dev->last_rx = jiffies; | 970 | dev->last_rx = jiffies; |
984 | lp->stats.rx_packets++; | 971 | dev->stats.rx_packets++; |
985 | lp->stats.rx_bytes += pkt_len; | 972 | dev->stats.rx_bytes += pkt_len; |
986 | } | 973 | } |
987 | outw(rx_block, ioaddr+WRITE_PTR); | 974 | outw(rx_block, ioaddr+WRITE_PTR); |
988 | outw(0, ioaddr+DATAPORT); | 975 | outw(0, ioaddr+DATAPORT); |
@@ -1053,7 +1040,7 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf, | |||
1053 | outw(0xFFFF, ioaddr+SIGNAL_CA); | 1040 | outw(0xFFFF, ioaddr+SIGNAL_CA); |
1054 | } | 1041 | } |
1055 | 1042 | ||
1056 | lp->stats.tx_packets++; | 1043 | dev->stats.tx_packets++; |
1057 | lp->last_tx = jiffies; | 1044 | lp->last_tx = jiffies; |
1058 | } | 1045 | } |
1059 | 1046 | ||
@@ -1180,7 +1167,6 @@ static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr) | |||
1180 | dev->open = eexp_open; | 1167 | dev->open = eexp_open; |
1181 | dev->stop = eexp_close; | 1168 | dev->stop = eexp_close; |
1182 | dev->hard_start_xmit = eexp_xmit; | 1169 | dev->hard_start_xmit = eexp_xmit; |
1183 | dev->get_stats = eexp_stats; | ||
1184 | dev->set_multicast_list = &eexp_set_multicast; | 1170 | dev->set_multicast_list = &eexp_set_multicast; |
1185 | dev->tx_timeout = eexp_timeout; | 1171 | dev->tx_timeout = eexp_timeout; |
1186 | dev->watchdog_timeo = 2*HZ; | 1172 | dev->watchdog_timeo = 2*HZ; |
@@ -1263,35 +1249,35 @@ static unsigned short eexp_hw_lasttxstat(struct net_device *dev) | |||
1263 | else | 1249 | else |
1264 | { | 1250 | { |
1265 | lp->last_tx_restart = 0; | 1251 | lp->last_tx_restart = 0; |
1266 | lp->stats.collisions += Stat_NoColl(status); | 1252 | dev->stats.collisions += Stat_NoColl(status); |
1267 | if (!Stat_OK(status)) | 1253 | if (!Stat_OK(status)) |
1268 | { | 1254 | { |
1269 | char *whatsup = NULL; | 1255 | char *whatsup = NULL; |
1270 | lp->stats.tx_errors++; | 1256 | dev->stats.tx_errors++; |
1271 | if (Stat_Abort(status)) | 1257 | if (Stat_Abort(status)) |
1272 | lp->stats.tx_aborted_errors++; | 1258 | dev->stats.tx_aborted_errors++; |
1273 | if (Stat_TNoCar(status)) { | 1259 | if (Stat_TNoCar(status)) { |
1274 | whatsup = "aborted, no carrier"; | 1260 | whatsup = "aborted, no carrier"; |
1275 | lp->stats.tx_carrier_errors++; | 1261 | dev->stats.tx_carrier_errors++; |
1276 | } | 1262 | } |
1277 | if (Stat_TNoCTS(status)) { | 1263 | if (Stat_TNoCTS(status)) { |
1278 | whatsup = "aborted, lost CTS"; | 1264 | whatsup = "aborted, lost CTS"; |
1279 | lp->stats.tx_carrier_errors++; | 1265 | dev->stats.tx_carrier_errors++; |
1280 | } | 1266 | } |
1281 | if (Stat_TNoDMA(status)) { | 1267 | if (Stat_TNoDMA(status)) { |
1282 | whatsup = "FIFO underran"; | 1268 | whatsup = "FIFO underran"; |
1283 | lp->stats.tx_fifo_errors++; | 1269 | dev->stats.tx_fifo_errors++; |
1284 | } | 1270 | } |
1285 | if (Stat_TXColl(status)) { | 1271 | if (Stat_TXColl(status)) { |
1286 | whatsup = "aborted, too many collisions"; | 1272 | whatsup = "aborted, too many collisions"; |
1287 | lp->stats.tx_aborted_errors++; | 1273 | dev->stats.tx_aborted_errors++; |
1288 | } | 1274 | } |
1289 | if (whatsup) | 1275 | if (whatsup) |
1290 | printk(KERN_INFO "%s: transmit %s\n", | 1276 | printk(KERN_INFO "%s: transmit %s\n", |
1291 | dev->name, whatsup); | 1277 | dev->name, whatsup); |
1292 | } | 1278 | } |
1293 | else | 1279 | else |
1294 | lp->stats.tx_packets++; | 1280 | dev->stats.tx_packets++; |
1295 | } | 1281 | } |
1296 | if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE)) | 1282 | if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE)) |
1297 | lp->tx_reap = tx_block = TX_BUF_START; | 1283 | lp->tx_reap = tx_block = TX_BUF_START; |
diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 7266f6dbdd95..18f1364d3d5b 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c | |||
@@ -128,7 +128,6 @@ static int eql_open(struct net_device *dev); | |||
128 | static int eql_close(struct net_device *dev); | 128 | static int eql_close(struct net_device *dev); |
129 | static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | 129 | static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); |
130 | static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev); | 130 | static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev); |
131 | static struct net_device_stats *eql_get_stats(struct net_device *dev); | ||
132 | 131 | ||
133 | #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) | 132 | #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) |
134 | #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER) | 133 | #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER) |
@@ -180,7 +179,6 @@ static void __init eql_setup(struct net_device *dev) | |||
180 | dev->stop = eql_close; | 179 | dev->stop = eql_close; |
181 | dev->do_ioctl = eql_ioctl; | 180 | dev->do_ioctl = eql_ioctl; |
182 | dev->hard_start_xmit = eql_slave_xmit; | 181 | dev->hard_start_xmit = eql_slave_xmit; |
183 | dev->get_stats = eql_get_stats; | ||
184 | 182 | ||
185 | /* | 183 | /* |
186 | * Now we undo some of the things that eth_setup does | 184 | * Now we undo some of the things that eth_setup does |
@@ -337,9 +335,9 @@ static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) | |||
337 | skb->priority = 1; | 335 | skb->priority = 1; |
338 | slave->bytes_queued += skb->len; | 336 | slave->bytes_queued += skb->len; |
339 | dev_queue_xmit(skb); | 337 | dev_queue_xmit(skb); |
340 | eql->stats.tx_packets++; | 338 | dev->stats.tx_packets++; |
341 | } else { | 339 | } else { |
342 | eql->stats.tx_dropped++; | 340 | dev->stats.tx_dropped++; |
343 | dev_kfree_skb(skb); | 341 | dev_kfree_skb(skb); |
344 | } | 342 | } |
345 | 343 | ||
@@ -348,12 +346,6 @@ static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) | |||
348 | return 0; | 346 | return 0; |
349 | } | 347 | } |
350 | 348 | ||
351 | static struct net_device_stats * eql_get_stats(struct net_device *dev) | ||
352 | { | ||
353 | equalizer_t *eql = netdev_priv(dev); | ||
354 | return &eql->stats; | ||
355 | } | ||
356 | |||
357 | /* | 349 | /* |
358 | * Private ioctl functions | 350 | * Private ioctl functions |
359 | */ | 351 | */ |
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index 0e3b33717cac..243fc6b354b5 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c | |||
@@ -380,7 +380,6 @@ static unsigned int eth16i_debug = ETH16I_DEBUG; | |||
380 | /* Information for each board */ | 380 | /* Information for each board */ |
381 | 381 | ||
382 | struct eth16i_local { | 382 | struct eth16i_local { |
383 | struct net_device_stats stats; | ||
384 | unsigned char tx_started; | 383 | unsigned char tx_started; |
385 | unsigned char tx_buf_busy; | 384 | unsigned char tx_buf_busy; |
386 | unsigned short tx_queue; /* Number of packets in transmit buffer */ | 385 | unsigned short tx_queue; /* Number of packets in transmit buffer */ |
@@ -426,8 +425,6 @@ static int eth16i_set_irq(struct net_device *dev); | |||
426 | static ushort eth16i_parse_mediatype(const char* s); | 425 | static ushort eth16i_parse_mediatype(const char* s); |
427 | #endif | 426 | #endif |
428 | 427 | ||
429 | static struct net_device_stats *eth16i_get_stats(struct net_device *dev); | ||
430 | |||
431 | static char cardname[] __initdata = "ICL EtherTeam 16i/32"; | 428 | static char cardname[] __initdata = "ICL EtherTeam 16i/32"; |
432 | 429 | ||
433 | static int __init do_eth16i_probe(struct net_device *dev) | 430 | static int __init do_eth16i_probe(struct net_device *dev) |
@@ -557,7 +554,6 @@ static int __init eth16i_probe1(struct net_device *dev, int ioaddr) | |||
557 | dev->open = eth16i_open; | 554 | dev->open = eth16i_open; |
558 | dev->stop = eth16i_close; | 555 | dev->stop = eth16i_close; |
559 | dev->hard_start_xmit = eth16i_tx; | 556 | dev->hard_start_xmit = eth16i_tx; |
560 | dev->get_stats = eth16i_get_stats; | ||
561 | dev->set_multicast_list = eth16i_multicast; | 557 | dev->set_multicast_list = eth16i_multicast; |
562 | dev->tx_timeout = eth16i_timeout; | 558 | dev->tx_timeout = eth16i_timeout; |
563 | dev->watchdog_timeo = TX_TIMEOUT; | 559 | dev->watchdog_timeo = TX_TIMEOUT; |
@@ -1045,7 +1041,7 @@ static void eth16i_timeout(struct net_device *dev) | |||
1045 | printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len); | 1041 | printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len); |
1046 | printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started); | 1042 | printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started); |
1047 | } | 1043 | } |
1048 | lp->stats.tx_errors++; | 1044 | dev->stats.tx_errors++; |
1049 | eth16i_reset(dev); | 1045 | eth16i_reset(dev); |
1050 | dev->trans_start = jiffies; | 1046 | dev->trans_start = jiffies; |
1051 | outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); | 1047 | outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); |
@@ -1130,7 +1126,6 @@ static int eth16i_tx(struct sk_buff *skb, struct net_device *dev) | |||
1130 | 1126 | ||
1131 | static void eth16i_rx(struct net_device *dev) | 1127 | static void eth16i_rx(struct net_device *dev) |
1132 | { | 1128 | { |
1133 | struct eth16i_local *lp = netdev_priv(dev); | ||
1134 | int ioaddr = dev->base_addr; | 1129 | int ioaddr = dev->base_addr; |
1135 | int boguscount = MAX_RX_LOOP; | 1130 | int boguscount = MAX_RX_LOOP; |
1136 | 1131 | ||
@@ -1149,16 +1144,16 @@ static void eth16i_rx(struct net_device *dev) | |||
1149 | inb(ioaddr + RECEIVE_MODE_REG), status); | 1144 | inb(ioaddr + RECEIVE_MODE_REG), status); |
1150 | 1145 | ||
1151 | if( !(status & PKT_GOOD) ) { | 1146 | if( !(status & PKT_GOOD) ) { |
1152 | lp->stats.rx_errors++; | 1147 | dev->stats.rx_errors++; |
1153 | 1148 | ||
1154 | if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) { | 1149 | if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) { |
1155 | lp->stats.rx_length_errors++; | 1150 | dev->stats.rx_length_errors++; |
1156 | eth16i_reset(dev); | 1151 | eth16i_reset(dev); |
1157 | return; | 1152 | return; |
1158 | } | 1153 | } |
1159 | else { | 1154 | else { |
1160 | eth16i_skip_packet(dev); | 1155 | eth16i_skip_packet(dev); |
1161 | lp->stats.rx_dropped++; | 1156 | dev->stats.rx_dropped++; |
1162 | } | 1157 | } |
1163 | } | 1158 | } |
1164 | else { /* Ok so now we should have a good packet */ | 1159 | else { /* Ok so now we should have a good packet */ |
@@ -1169,7 +1164,7 @@ static void eth16i_rx(struct net_device *dev) | |||
1169 | printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n", | 1164 | printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n", |
1170 | dev->name, pkt_len); | 1165 | dev->name, pkt_len); |
1171 | eth16i_skip_packet(dev); | 1166 | eth16i_skip_packet(dev); |
1172 | lp->stats.rx_dropped++; | 1167 | dev->stats.rx_dropped++; |
1173 | break; | 1168 | break; |
1174 | } | 1169 | } |
1175 | 1170 | ||
@@ -1212,8 +1207,8 @@ static void eth16i_rx(struct net_device *dev) | |||
1212 | } | 1207 | } |
1213 | netif_rx(skb); | 1208 | netif_rx(skb); |
1214 | dev->last_rx = jiffies; | 1209 | dev->last_rx = jiffies; |
1215 | lp->stats.rx_packets++; | 1210 | dev->stats.rx_packets++; |
1216 | lp->stats.rx_bytes += pkt_len; | 1211 | dev->stats.rx_bytes += pkt_len; |
1217 | 1212 | ||
1218 | } /* else */ | 1213 | } /* else */ |
1219 | 1214 | ||
@@ -1250,32 +1245,32 @@ static irqreturn_t eth16i_interrupt(int irq, void *dev_id) | |||
1250 | 1245 | ||
1251 | if( status & 0x7f00 ) { | 1246 | if( status & 0x7f00 ) { |
1252 | 1247 | ||
1253 | lp->stats.rx_errors++; | 1248 | dev->stats.rx_errors++; |
1254 | 1249 | ||
1255 | if(status & (BUS_RD_ERR << 8) ) | 1250 | if(status & (BUS_RD_ERR << 8) ) |
1256 | printk(KERN_WARNING "%s: Bus read error.\n",dev->name); | 1251 | printk(KERN_WARNING "%s: Bus read error.\n",dev->name); |
1257 | if(status & (SHORT_PKT_ERR << 8) ) lp->stats.rx_length_errors++; | 1252 | if(status & (SHORT_PKT_ERR << 8) ) dev->stats.rx_length_errors++; |
1258 | if(status & (ALIGN_ERR << 8) ) lp->stats.rx_frame_errors++; | 1253 | if(status & (ALIGN_ERR << 8) ) dev->stats.rx_frame_errors++; |
1259 | if(status & (CRC_ERR << 8) ) lp->stats.rx_crc_errors++; | 1254 | if(status & (CRC_ERR << 8) ) dev->stats.rx_crc_errors++; |
1260 | if(status & (RX_BUF_OVERFLOW << 8) ) lp->stats.rx_over_errors++; | 1255 | if(status & (RX_BUF_OVERFLOW << 8) ) dev->stats.rx_over_errors++; |
1261 | } | 1256 | } |
1262 | if( status & 0x001a) { | 1257 | if( status & 0x001a) { |
1263 | 1258 | ||
1264 | lp->stats.tx_errors++; | 1259 | dev->stats.tx_errors++; |
1265 | 1260 | ||
1266 | if(status & CR_LOST) lp->stats.tx_carrier_errors++; | 1261 | if(status & CR_LOST) dev->stats.tx_carrier_errors++; |
1267 | if(status & TX_JABBER_ERR) lp->stats.tx_window_errors++; | 1262 | if(status & TX_JABBER_ERR) dev->stats.tx_window_errors++; |
1268 | 1263 | ||
1269 | #if 0 | 1264 | #if 0 |
1270 | if(status & COLLISION) { | 1265 | if(status & COLLISION) { |
1271 | lp->stats.collisions += | 1266 | dev->stats.collisions += |
1272 | ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4); | 1267 | ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4); |
1273 | } | 1268 | } |
1274 | #endif | 1269 | #endif |
1275 | if(status & COLLISIONS_16) { | 1270 | if(status & COLLISIONS_16) { |
1276 | if(lp->col_16 < MAX_COL_16) { | 1271 | if(lp->col_16 < MAX_COL_16) { |
1277 | lp->col_16++; | 1272 | lp->col_16++; |
1278 | lp->stats.collisions++; | 1273 | dev->stats.collisions++; |
1279 | /* Resume transmitting, skip failed packet */ | 1274 | /* Resume transmitting, skip failed packet */ |
1280 | outb(0x02, ioaddr + COL_16_REG); | 1275 | outb(0x02, ioaddr + COL_16_REG); |
1281 | } | 1276 | } |
@@ -1288,8 +1283,8 @@ static irqreturn_t eth16i_interrupt(int irq, void *dev_id) | |||
1288 | if( status & 0x00ff ) { /* Let's check the transmit status reg */ | 1283 | if( status & 0x00ff ) { /* Let's check the transmit status reg */ |
1289 | 1284 | ||
1290 | if(status & TX_DONE) { /* The transmit has been done */ | 1285 | if(status & TX_DONE) { /* The transmit has been done */ |
1291 | lp->stats.tx_packets = lp->tx_buffered_packets; | 1286 | dev->stats.tx_packets = lp->tx_buffered_packets; |
1292 | lp->stats.tx_bytes += lp->tx_buffered_bytes; | 1287 | dev->stats.tx_bytes += lp->tx_buffered_bytes; |
1293 | lp->col_16 = 0; | 1288 | lp->col_16 = 0; |
1294 | 1289 | ||
1295 | if(lp->tx_queue) { /* Is there still packets ? */ | 1290 | if(lp->tx_queue) { /* Is there still packets ? */ |
@@ -1369,12 +1364,6 @@ static void eth16i_multicast(struct net_device *dev) | |||
1369 | } | 1364 | } |
1370 | } | 1365 | } |
1371 | 1366 | ||
1372 | static struct net_device_stats *eth16i_get_stats(struct net_device *dev) | ||
1373 | { | ||
1374 | struct eth16i_local *lp = netdev_priv(dev); | ||
1375 | return &lp->stats; | ||
1376 | } | ||
1377 | |||
1378 | static void eth16i_select_regbank(unsigned char banknbr, int ioaddr) | 1367 | static void eth16i_select_regbank(unsigned char banknbr, int ioaddr) |
1379 | { | 1368 | { |
1380 | unsigned char data; | 1369 | unsigned char data; |
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c index 6a5d0436e89e..142aa225d89e 100644 --- a/drivers/net/ewrk3.c +++ b/drivers/net/ewrk3.c | |||
@@ -275,7 +275,6 @@ struct ewrk3_private { | |||
275 | u_long shmem_base; /* Shared memory start address */ | 275 | u_long shmem_base; /* Shared memory start address */ |
276 | void __iomem *shmem; | 276 | void __iomem *shmem; |
277 | u_long shmem_length; /* Shared memory window length */ | 277 | u_long shmem_length; /* Shared memory window length */ |
278 | struct net_device_stats stats; /* Public stats */ | ||
279 | struct ewrk3_stats pktStats; /* Private stats counters */ | 278 | struct ewrk3_stats pktStats; /* Private stats counters */ |
280 | u_char irq_mask; /* Adapter IRQ mask bits */ | 279 | u_char irq_mask; /* Adapter IRQ mask bits */ |
281 | u_char mPage; /* Maximum 2kB Page number */ | 280 | u_char mPage; /* Maximum 2kB Page number */ |
@@ -302,7 +301,6 @@ static int ewrk3_open(struct net_device *dev); | |||
302 | static int ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev); | 301 | static int ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev); |
303 | static irqreturn_t ewrk3_interrupt(int irq, void *dev_id); | 302 | static irqreturn_t ewrk3_interrupt(int irq, void *dev_id); |
304 | static int ewrk3_close(struct net_device *dev); | 303 | static int ewrk3_close(struct net_device *dev); |
305 | static struct net_device_stats *ewrk3_get_stats(struct net_device *dev); | ||
306 | static void set_multicast_list(struct net_device *dev); | 304 | static void set_multicast_list(struct net_device *dev); |
307 | static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 305 | static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
308 | static const struct ethtool_ops ethtool_ops_203; | 306 | static const struct ethtool_ops ethtool_ops_203; |
@@ -611,7 +609,6 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase) | |||
611 | dev->open = ewrk3_open; | 609 | dev->open = ewrk3_open; |
612 | dev->hard_start_xmit = ewrk3_queue_pkt; | 610 | dev->hard_start_xmit = ewrk3_queue_pkt; |
613 | dev->stop = ewrk3_close; | 611 | dev->stop = ewrk3_close; |
614 | dev->get_stats = ewrk3_get_stats; | ||
615 | dev->set_multicast_list = set_multicast_list; | 612 | dev->set_multicast_list = set_multicast_list; |
616 | dev->do_ioctl = ewrk3_ioctl; | 613 | dev->do_ioctl = ewrk3_ioctl; |
617 | if (lp->adapter_name[4] == '3') | 614 | if (lp->adapter_name[4] == '3') |
@@ -863,7 +860,7 @@ static int ewrk3_queue_pkt (struct sk_buff *skb, struct net_device *dev) | |||
863 | ENABLE_IRQs; | 860 | ENABLE_IRQs; |
864 | spin_unlock_irq (&lp->hw_lock); | 861 | spin_unlock_irq (&lp->hw_lock); |
865 | 862 | ||
866 | lp->stats.tx_bytes += skb->len; | 863 | dev->stats.tx_bytes += skb->len; |
867 | dev->trans_start = jiffies; | 864 | dev->trans_start = jiffies; |
868 | dev_kfree_skb (skb); | 865 | dev_kfree_skb (skb); |
869 | 866 | ||
@@ -980,13 +977,13 @@ static int ewrk3_rx(struct net_device *dev) | |||
980 | } | 977 | } |
981 | 978 | ||
982 | if (!(rx_status & R_ROK)) { /* There was an error. */ | 979 | if (!(rx_status & R_ROK)) { /* There was an error. */ |
983 | lp->stats.rx_errors++; /* Update the error stats. */ | 980 | dev->stats.rx_errors++; /* Update the error stats. */ |
984 | if (rx_status & R_DBE) | 981 | if (rx_status & R_DBE) |
985 | lp->stats.rx_frame_errors++; | 982 | dev->stats.rx_frame_errors++; |
986 | if (rx_status & R_CRC) | 983 | if (rx_status & R_CRC) |
987 | lp->stats.rx_crc_errors++; | 984 | dev->stats.rx_crc_errors++; |
988 | if (rx_status & R_PLL) | 985 | if (rx_status & R_PLL) |
989 | lp->stats.rx_fifo_errors++; | 986 | dev->stats.rx_fifo_errors++; |
990 | } else { | 987 | } else { |
991 | struct sk_buff *skb; | 988 | struct sk_buff *skb; |
992 | 989 | ||
@@ -1037,11 +1034,11 @@ static int ewrk3_rx(struct net_device *dev) | |||
1037 | ** Update stats | 1034 | ** Update stats |
1038 | */ | 1035 | */ |
1039 | dev->last_rx = jiffies; | 1036 | dev->last_rx = jiffies; |
1040 | lp->stats.rx_packets++; | 1037 | dev->stats.rx_packets++; |
1041 | lp->stats.rx_bytes += pkt_len; | 1038 | dev->stats.rx_bytes += pkt_len; |
1042 | } else { | 1039 | } else { |
1043 | printk("%s: Insufficient memory; nuking packet.\n", dev->name); | 1040 | printk("%s: Insufficient memory; nuking packet.\n", dev->name); |
1044 | lp->stats.rx_dropped++; /* Really, deferred. */ | 1041 | dev->stats.rx_dropped++; /* Really, deferred. */ |
1045 | break; | 1042 | break; |
1046 | } | 1043 | } |
1047 | } | 1044 | } |
@@ -1071,11 +1068,11 @@ static int ewrk3_tx(struct net_device *dev) | |||
1071 | while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */ | 1068 | while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */ |
1072 | if (tx_status & T_VSTS) { /* The status is valid */ | 1069 | if (tx_status & T_VSTS) { /* The status is valid */ |
1073 | if (tx_status & T_TXE) { | 1070 | if (tx_status & T_TXE) { |
1074 | lp->stats.tx_errors++; | 1071 | dev->stats.tx_errors++; |
1075 | if (tx_status & T_NCL) | 1072 | if (tx_status & T_NCL) |
1076 | lp->stats.tx_carrier_errors++; | 1073 | dev->stats.tx_carrier_errors++; |
1077 | if (tx_status & T_LCL) | 1074 | if (tx_status & T_LCL) |
1078 | lp->stats.tx_window_errors++; | 1075 | dev->stats.tx_window_errors++; |
1079 | if (tx_status & T_CTU) { | 1076 | if (tx_status & T_CTU) { |
1080 | if ((tx_status & T_COLL) ^ T_XUR) { | 1077 | if ((tx_status & T_COLL) ^ T_XUR) { |
1081 | lp->pktStats.tx_underruns++; | 1078 | lp->pktStats.tx_underruns++; |
@@ -1084,13 +1081,13 @@ static int ewrk3_tx(struct net_device *dev) | |||
1084 | } | 1081 | } |
1085 | } else if (tx_status & T_COLL) { | 1082 | } else if (tx_status & T_COLL) { |
1086 | if ((tx_status & T_COLL) ^ T_XCOLL) { | 1083 | if ((tx_status & T_COLL) ^ T_XCOLL) { |
1087 | lp->stats.collisions++; | 1084 | dev->stats.collisions++; |
1088 | } else { | 1085 | } else { |
1089 | lp->pktStats.excessive_collisions++; | 1086 | lp->pktStats.excessive_collisions++; |
1090 | } | 1087 | } |
1091 | } | 1088 | } |
1092 | } else { | 1089 | } else { |
1093 | lp->stats.tx_packets++; | 1090 | dev->stats.tx_packets++; |
1094 | } | 1091 | } |
1095 | } | 1092 | } |
1096 | } | 1093 | } |
@@ -1133,14 +1130,6 @@ static int ewrk3_close(struct net_device *dev) | |||
1133 | return 0; | 1130 | return 0; |
1134 | } | 1131 | } |
1135 | 1132 | ||
1136 | static struct net_device_stats *ewrk3_get_stats(struct net_device *dev) | ||
1137 | { | ||
1138 | struct ewrk3_private *lp = netdev_priv(dev); | ||
1139 | |||
1140 | /* Null body since there is no framing error counter */ | ||
1141 | return &lp->stats; | ||
1142 | } | ||
1143 | |||
1144 | /* | 1133 | /* |
1145 | ** Set or clear the multicast filter for this adapter. | 1134 | ** Set or clear the multicast filter for this adapter. |
1146 | */ | 1135 | */ |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 4e8df910c00d..4419c3cee995 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -204,7 +204,6 @@ struct fec_enet_private { | |||
204 | cbd_t *tx_bd_base; | 204 | cbd_t *tx_bd_base; |
205 | cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ | 205 | cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ |
206 | cbd_t *dirty_tx; /* The ring entries to be free()ed. */ | 206 | cbd_t *dirty_tx; /* The ring entries to be free()ed. */ |
207 | struct net_device_stats stats; | ||
208 | uint tx_full; | 207 | uint tx_full; |
209 | spinlock_t lock; | 208 | spinlock_t lock; |
210 | 209 | ||
@@ -234,7 +233,6 @@ static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); | |||
234 | static void fec_enet_tx(struct net_device *dev); | 233 | static void fec_enet_tx(struct net_device *dev); |
235 | static void fec_enet_rx(struct net_device *dev); | 234 | static void fec_enet_rx(struct net_device *dev); |
236 | static int fec_enet_close(struct net_device *dev); | 235 | static int fec_enet_close(struct net_device *dev); |
237 | static struct net_device_stats *fec_enet_get_stats(struct net_device *dev); | ||
238 | static void set_multicast_list(struct net_device *dev); | 236 | static void set_multicast_list(struct net_device *dev); |
239 | static void fec_restart(struct net_device *dev, int duplex); | 237 | static void fec_restart(struct net_device *dev, int duplex); |
240 | static void fec_stop(struct net_device *dev); | 238 | static void fec_stop(struct net_device *dev); |
@@ -359,7 +357,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
359 | */ | 357 | */ |
360 | fep->tx_skbuff[fep->skb_cur] = skb; | 358 | fep->tx_skbuff[fep->skb_cur] = skb; |
361 | 359 | ||
362 | fep->stats.tx_bytes += skb->len; | 360 | dev->stats.tx_bytes += skb->len; |
363 | fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; | 361 | fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; |
364 | 362 | ||
365 | /* Push the data cache so the CPM does not get stale memory | 363 | /* Push the data cache so the CPM does not get stale memory |
@@ -409,7 +407,7 @@ fec_timeout(struct net_device *dev) | |||
409 | struct fec_enet_private *fep = netdev_priv(dev); | 407 | struct fec_enet_private *fep = netdev_priv(dev); |
410 | 408 | ||
411 | printk("%s: transmit timed out.\n", dev->name); | 409 | printk("%s: transmit timed out.\n", dev->name); |
412 | fep->stats.tx_errors++; | 410 | dev->stats.tx_errors++; |
413 | #ifndef final_version | 411 | #ifndef final_version |
414 | { | 412 | { |
415 | int i; | 413 | int i; |
@@ -511,19 +509,19 @@ fec_enet_tx(struct net_device *dev) | |||
511 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 509 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
512 | BD_ENET_TX_RL | BD_ENET_TX_UN | | 510 | BD_ENET_TX_RL | BD_ENET_TX_UN | |
513 | BD_ENET_TX_CSL)) { | 511 | BD_ENET_TX_CSL)) { |
514 | fep->stats.tx_errors++; | 512 | dev->stats.tx_errors++; |
515 | if (status & BD_ENET_TX_HB) /* No heartbeat */ | 513 | if (status & BD_ENET_TX_HB) /* No heartbeat */ |
516 | fep->stats.tx_heartbeat_errors++; | 514 | dev->stats.tx_heartbeat_errors++; |
517 | if (status & BD_ENET_TX_LC) /* Late collision */ | 515 | if (status & BD_ENET_TX_LC) /* Late collision */ |
518 | fep->stats.tx_window_errors++; | 516 | dev->stats.tx_window_errors++; |
519 | if (status & BD_ENET_TX_RL) /* Retrans limit */ | 517 | if (status & BD_ENET_TX_RL) /* Retrans limit */ |
520 | fep->stats.tx_aborted_errors++; | 518 | dev->stats.tx_aborted_errors++; |
521 | if (status & BD_ENET_TX_UN) /* Underrun */ | 519 | if (status & BD_ENET_TX_UN) /* Underrun */ |
522 | fep->stats.tx_fifo_errors++; | 520 | dev->stats.tx_fifo_errors++; |
523 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ | 521 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ |
524 | fep->stats.tx_carrier_errors++; | 522 | dev->stats.tx_carrier_errors++; |
525 | } else { | 523 | } else { |
526 | fep->stats.tx_packets++; | 524 | dev->stats.tx_packets++; |
527 | } | 525 | } |
528 | 526 | ||
529 | #ifndef final_version | 527 | #ifndef final_version |
@@ -534,7 +532,7 @@ fec_enet_tx(struct net_device *dev) | |||
534 | * but we eventually sent the packet OK. | 532 | * but we eventually sent the packet OK. |
535 | */ | 533 | */ |
536 | if (status & BD_ENET_TX_DEF) | 534 | if (status & BD_ENET_TX_DEF) |
537 | fep->stats.collisions++; | 535 | dev->stats.collisions++; |
538 | 536 | ||
539 | /* Free the sk buffer associated with this last transmit. | 537 | /* Free the sk buffer associated with this last transmit. |
540 | */ | 538 | */ |
@@ -607,17 +605,17 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | |||
607 | /* Check for errors. */ | 605 | /* Check for errors. */ |
608 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | | 606 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | |
609 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | 607 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { |
610 | fep->stats.rx_errors++; | 608 | dev->stats.rx_errors++; |
611 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { | 609 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { |
612 | /* Frame too long or too short. */ | 610 | /* Frame too long or too short. */ |
613 | fep->stats.rx_length_errors++; | 611 | dev->stats.rx_length_errors++; |
614 | } | 612 | } |
615 | if (status & BD_ENET_RX_NO) /* Frame alignment */ | 613 | if (status & BD_ENET_RX_NO) /* Frame alignment */ |
616 | fep->stats.rx_frame_errors++; | 614 | dev->stats.rx_frame_errors++; |
617 | if (status & BD_ENET_RX_CR) /* CRC Error */ | 615 | if (status & BD_ENET_RX_CR) /* CRC Error */ |
618 | fep->stats.rx_crc_errors++; | 616 | dev->stats.rx_crc_errors++; |
619 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ | 617 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ |
620 | fep->stats.rx_fifo_errors++; | 618 | dev->stats.rx_fifo_errors++; |
621 | } | 619 | } |
622 | 620 | ||
623 | /* Report late collisions as a frame error. | 621 | /* Report late collisions as a frame error. |
@@ -625,16 +623,16 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | |||
625 | * have in the buffer. So, just drop this frame on the floor. | 623 | * have in the buffer. So, just drop this frame on the floor. |
626 | */ | 624 | */ |
627 | if (status & BD_ENET_RX_CL) { | 625 | if (status & BD_ENET_RX_CL) { |
628 | fep->stats.rx_errors++; | 626 | dev->stats.rx_errors++; |
629 | fep->stats.rx_frame_errors++; | 627 | dev->stats.rx_frame_errors++; |
630 | goto rx_processing_done; | 628 | goto rx_processing_done; |
631 | } | 629 | } |
632 | 630 | ||
633 | /* Process the incoming frame. | 631 | /* Process the incoming frame. |
634 | */ | 632 | */ |
635 | fep->stats.rx_packets++; | 633 | dev->stats.rx_packets++; |
636 | pkt_len = bdp->cbd_datlen; | 634 | pkt_len = bdp->cbd_datlen; |
637 | fep->stats.rx_bytes += pkt_len; | 635 | dev->stats.rx_bytes += pkt_len; |
638 | data = (__u8*)__va(bdp->cbd_bufaddr); | 636 | data = (__u8*)__va(bdp->cbd_bufaddr); |
639 | 637 | ||
640 | /* This does 16 byte alignment, exactly what we need. | 638 | /* This does 16 byte alignment, exactly what we need. |
@@ -646,7 +644,7 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | |||
646 | 644 | ||
647 | if (skb == NULL) { | 645 | if (skb == NULL) { |
648 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); | 646 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); |
649 | fep->stats.rx_dropped++; | 647 | dev->stats.rx_dropped++; |
650 | } else { | 648 | } else { |
651 | skb_put(skb,pkt_len-4); /* Make room */ | 649 | skb_put(skb,pkt_len-4); /* Make room */ |
652 | skb_copy_to_linear_data(skb, data, pkt_len-4); | 650 | skb_copy_to_linear_data(skb, data, pkt_len-4); |
@@ -2220,13 +2218,6 @@ fec_enet_close(struct net_device *dev) | |||
2220 | return 0; | 2218 | return 0; |
2221 | } | 2219 | } |
2222 | 2220 | ||
2223 | static struct net_device_stats *fec_enet_get_stats(struct net_device *dev) | ||
2224 | { | ||
2225 | struct fec_enet_private *fep = netdev_priv(dev); | ||
2226 | |||
2227 | return &fep->stats; | ||
2228 | } | ||
2229 | |||
2230 | /* Set or clear the multicast filter for this adaptor. | 2221 | /* Set or clear the multicast filter for this adaptor. |
2231 | * Skeleton taken from sunlance driver. | 2222 | * Skeleton taken from sunlance driver. |
2232 | * The CPM Ethernet implementation allows Multicast as well as individual | 2223 | * The CPM Ethernet implementation allows Multicast as well as individual |
@@ -2462,7 +2453,6 @@ int __init fec_enet_init(struct net_device *dev) | |||
2462 | dev->tx_timeout = fec_timeout; | 2453 | dev->tx_timeout = fec_timeout; |
2463 | dev->watchdog_timeo = TX_TIMEOUT; | 2454 | dev->watchdog_timeo = TX_TIMEOUT; |
2464 | dev->stop = fec_enet_close; | 2455 | dev->stop = fec_enet_close; |
2465 | dev->get_stats = fec_enet_get_stats; | ||
2466 | dev->set_multicast_list = set_multicast_list; | 2456 | dev->set_multicast_list = set_multicast_list; |
2467 | 2457 | ||
2468 | for (i=0; i<NMII-1; i++) | 2458 | for (i=0; i<NMII-1; i++) |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 002f8baaab2d..5a1a1165b48c 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -116,7 +116,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | |||
116 | static void gfar_timeout(struct net_device *dev); | 116 | static void gfar_timeout(struct net_device *dev); |
117 | static int gfar_close(struct net_device *dev); | 117 | static int gfar_close(struct net_device *dev); |
118 | struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); | 118 | struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); |
119 | static struct net_device_stats *gfar_get_stats(struct net_device *dev); | ||
120 | static int gfar_set_mac_address(struct net_device *dev); | 119 | static int gfar_set_mac_address(struct net_device *dev); |
121 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | 120 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); |
122 | static irqreturn_t gfar_error(int irq, void *dev_id); | 121 | static irqreturn_t gfar_error(int irq, void *dev_id); |
@@ -266,7 +265,6 @@ static int gfar_probe(struct platform_device *pdev) | |||
266 | dev->poll_controller = gfar_netpoll; | 265 | dev->poll_controller = gfar_netpoll; |
267 | #endif | 266 | #endif |
268 | dev->stop = gfar_close; | 267 | dev->stop = gfar_close; |
269 | dev->get_stats = gfar_get_stats; | ||
270 | dev->change_mtu = gfar_change_mtu; | 268 | dev->change_mtu = gfar_change_mtu; |
271 | dev->mtu = 1500; | 269 | dev->mtu = 1500; |
272 | dev->set_multicast_list = gfar_set_multi; | 270 | dev->set_multicast_list = gfar_set_multi; |
@@ -1013,7 +1011,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1013 | unsigned long flags; | 1011 | unsigned long flags; |
1014 | 1012 | ||
1015 | /* Update transmit stats */ | 1013 | /* Update transmit stats */ |
1016 | priv->stats.tx_bytes += skb->len; | 1014 | dev->stats.tx_bytes += skb->len; |
1017 | 1015 | ||
1018 | /* Lock priv now */ | 1016 | /* Lock priv now */ |
1019 | spin_lock_irqsave(&priv->txlock, flags); | 1017 | spin_lock_irqsave(&priv->txlock, flags); |
@@ -1086,7 +1084,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1086 | if (txbdp == priv->dirty_tx) { | 1084 | if (txbdp == priv->dirty_tx) { |
1087 | netif_stop_queue(dev); | 1085 | netif_stop_queue(dev); |
1088 | 1086 | ||
1089 | priv->stats.tx_fifo_errors++; | 1087 | dev->stats.tx_fifo_errors++; |
1090 | } | 1088 | } |
1091 | 1089 | ||
1092 | /* Update the current txbd to the next one */ | 1090 | /* Update the current txbd to the next one */ |
@@ -1119,14 +1117,6 @@ static int gfar_close(struct net_device *dev) | |||
1119 | return 0; | 1117 | return 0; |
1120 | } | 1118 | } |
1121 | 1119 | ||
1122 | /* returns a net_device_stats structure pointer */ | ||
1123 | static struct net_device_stats * gfar_get_stats(struct net_device *dev) | ||
1124 | { | ||
1125 | struct gfar_private *priv = netdev_priv(dev); | ||
1126 | |||
1127 | return &(priv->stats); | ||
1128 | } | ||
1129 | |||
1130 | /* Changes the mac address if the controller is not running. */ | 1120 | /* Changes the mac address if the controller is not running. */ |
1131 | int gfar_set_mac_address(struct net_device *dev) | 1121 | int gfar_set_mac_address(struct net_device *dev) |
1132 | { | 1122 | { |
@@ -1238,7 +1228,7 @@ static void gfar_timeout(struct net_device *dev) | |||
1238 | { | 1228 | { |
1239 | struct gfar_private *priv = netdev_priv(dev); | 1229 | struct gfar_private *priv = netdev_priv(dev); |
1240 | 1230 | ||
1241 | priv->stats.tx_errors++; | 1231 | dev->stats.tx_errors++; |
1242 | 1232 | ||
1243 | if (dev->flags & IFF_UP) { | 1233 | if (dev->flags & IFF_UP) { |
1244 | stop_gfar(dev); | 1234 | stop_gfar(dev); |
@@ -1268,12 +1258,12 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id) | |||
1268 | if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) | 1258 | if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) |
1269 | break; | 1259 | break; |
1270 | 1260 | ||
1271 | priv->stats.tx_packets++; | 1261 | dev->stats.tx_packets++; |
1272 | 1262 | ||
1273 | /* Deferred means some collisions occurred during transmit, */ | 1263 | /* Deferred means some collisions occurred during transmit, */ |
1274 | /* but we eventually sent the packet. */ | 1264 | /* but we eventually sent the packet. */ |
1275 | if (bdp->status & TXBD_DEF) | 1265 | if (bdp->status & TXBD_DEF) |
1276 | priv->stats.collisions++; | 1266 | dev->stats.collisions++; |
1277 | 1267 | ||
1278 | /* Free the sk buffer associated with this TxBD */ | 1268 | /* Free the sk buffer associated with this TxBD */ |
1279 | dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); | 1269 | dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); |
@@ -1345,7 +1335,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) | |||
1345 | 1335 | ||
1346 | static inline void count_errors(unsigned short status, struct gfar_private *priv) | 1336 | static inline void count_errors(unsigned short status, struct gfar_private *priv) |
1347 | { | 1337 | { |
1348 | struct net_device_stats *stats = &priv->stats; | 1338 | struct net_device_stats *stats = &dev->stats; |
1349 | struct gfar_extra_stats *estats = &priv->extra_stats; | 1339 | struct gfar_extra_stats *estats = &priv->extra_stats; |
1350 | 1340 | ||
1351 | /* If the packet was truncated, none of the other errors | 1341 | /* If the packet was truncated, none of the other errors |
@@ -1470,7 +1460,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
1470 | if (NULL == skb) { | 1460 | if (NULL == skb) { |
1471 | if (netif_msg_rx_err(priv)) | 1461 | if (netif_msg_rx_err(priv)) |
1472 | printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); | 1462 | printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); |
1473 | priv->stats.rx_dropped++; | 1463 | dev->stats.rx_dropped++; |
1474 | priv->extra_stats.rx_skbmissing++; | 1464 | priv->extra_stats.rx_skbmissing++; |
1475 | } else { | 1465 | } else { |
1476 | int ret; | 1466 | int ret; |
@@ -1528,7 +1518,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
1528 | (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET | 1518 | (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET |
1529 | | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { | 1519 | | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { |
1530 | /* Increment the number of packets */ | 1520 | /* Increment the number of packets */ |
1531 | priv->stats.rx_packets++; | 1521 | dev->stats.rx_packets++; |
1532 | howmany++; | 1522 | howmany++; |
1533 | 1523 | ||
1534 | /* Remove the FCS from the packet length */ | 1524 | /* Remove the FCS from the packet length */ |
@@ -1536,7 +1526,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
1536 | 1526 | ||
1537 | gfar_process_frame(dev, skb, pkt_len); | 1527 | gfar_process_frame(dev, skb, pkt_len); |
1538 | 1528 | ||
1539 | priv->stats.rx_bytes += pkt_len; | 1529 | dev->stats.rx_bytes += pkt_len; |
1540 | } else { | 1530 | } else { |
1541 | count_errors(bdp->status, priv); | 1531 | count_errors(bdp->status, priv); |
1542 | 1532 | ||
@@ -1916,17 +1906,17 @@ static irqreturn_t gfar_error(int irq, void *dev_id) | |||
1916 | 1906 | ||
1917 | /* Update the error counters */ | 1907 | /* Update the error counters */ |
1918 | if (events & IEVENT_TXE) { | 1908 | if (events & IEVENT_TXE) { |
1919 | priv->stats.tx_errors++; | 1909 | dev->stats.tx_errors++; |
1920 | 1910 | ||
1921 | if (events & IEVENT_LC) | 1911 | if (events & IEVENT_LC) |
1922 | priv->stats.tx_window_errors++; | 1912 | dev->stats.tx_window_errors++; |
1923 | if (events & IEVENT_CRL) | 1913 | if (events & IEVENT_CRL) |
1924 | priv->stats.tx_aborted_errors++; | 1914 | dev->stats.tx_aborted_errors++; |
1925 | if (events & IEVENT_XFUN) { | 1915 | if (events & IEVENT_XFUN) { |
1926 | if (netif_msg_tx_err(priv)) | 1916 | if (netif_msg_tx_err(priv)) |
1927 | printk(KERN_DEBUG "%s: TX FIFO underrun, " | 1917 | printk(KERN_DEBUG "%s: TX FIFO underrun, " |
1928 | "packet dropped.\n", dev->name); | 1918 | "packet dropped.\n", dev->name); |
1929 | priv->stats.tx_dropped++; | 1919 | dev->stats.tx_dropped++; |
1930 | priv->extra_stats.tx_underrun++; | 1920 | priv->extra_stats.tx_underrun++; |
1931 | 1921 | ||
1932 | /* Reactivate the Tx Queues */ | 1922 | /* Reactivate the Tx Queues */ |
@@ -1936,7 +1926,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id) | |||
1936 | printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); | 1926 | printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); |
1937 | } | 1927 | } |
1938 | if (events & IEVENT_BSY) { | 1928 | if (events & IEVENT_BSY) { |
1939 | priv->stats.rx_errors++; | 1929 | dev->stats.rx_errors++; |
1940 | priv->extra_stats.rx_bsy++; | 1930 | priv->extra_stats.rx_bsy++; |
1941 | 1931 | ||
1942 | gfar_receive(irq, dev_id); | 1932 | gfar_receive(irq, dev_id); |
@@ -1951,7 +1941,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id) | |||
1951 | dev->name, gfar_read(&priv->regs->rstat)); | 1941 | dev->name, gfar_read(&priv->regs->rstat)); |
1952 | } | 1942 | } |
1953 | if (events & IEVENT_BABR) { | 1943 | if (events & IEVENT_BABR) { |
1954 | priv->stats.rx_errors++; | 1944 | dev->stats.rx_errors++; |
1955 | priv->extra_stats.rx_babr++; | 1945 | priv->extra_stats.rx_babr++; |
1956 | 1946 | ||
1957 | if (netif_msg_rx_err(priv)) | 1947 | if (netif_msg_rx_err(priv)) |
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c index c991cb82ff22..be6e5bc7c881 100644 --- a/drivers/net/hplance.c +++ b/drivers/net/hplance.c | |||
@@ -141,7 +141,6 @@ static void __init hplance_init(struct net_device *dev, struct dio_dev *d) | |||
141 | dev->poll_controller = lance_poll; | 141 | dev->poll_controller = lance_poll; |
142 | #endif | 142 | #endif |
143 | dev->hard_start_xmit = &lance_start_xmit; | 143 | dev->hard_start_xmit = &lance_start_xmit; |
144 | dev->get_stats = &lance_get_stats; | ||
145 | dev->set_multicast_list = &lance_set_multicast; | 144 | dev->set_multicast_list = &lance_set_multicast; |
146 | dev->dma = 0; | 145 | dev->dma = 0; |
147 | 146 | ||
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c index 67d82fa7659d..eebf39acf586 100644 --- a/drivers/net/ibmlana.c +++ b/drivers/net/ibmlana.c | |||
@@ -591,7 +591,7 @@ static void irqrx_handler(struct net_device *dev) | |||
591 | 591 | ||
592 | skb = dev_alloc_skb(rda.length + 2); | 592 | skb = dev_alloc_skb(rda.length + 2); |
593 | if (skb == NULL) | 593 | if (skb == NULL) |
594 | priv->stat.rx_dropped++; | 594 | dev->stats.rx_dropped++; |
595 | else { | 595 | else { |
596 | /* copy out data */ | 596 | /* copy out data */ |
597 | 597 | ||
@@ -606,8 +606,8 @@ static void irqrx_handler(struct net_device *dev) | |||
606 | 606 | ||
607 | /* bookkeeping */ | 607 | /* bookkeeping */ |
608 | dev->last_rx = jiffies; | 608 | dev->last_rx = jiffies; |
609 | priv->stat.rx_packets++; | 609 | dev->stats.rx_packets++; |
610 | priv->stat.rx_bytes += rda.length; | 610 | dev->stats.rx_bytes += rda.length; |
611 | 611 | ||
612 | /* pass to the upper layers */ | 612 | /* pass to the upper layers */ |
613 | netif_rx(skb); | 613 | netif_rx(skb); |
@@ -617,11 +617,11 @@ static void irqrx_handler(struct net_device *dev) | |||
617 | /* otherwise check error status bits and increase statistics */ | 617 | /* otherwise check error status bits and increase statistics */ |
618 | 618 | ||
619 | else { | 619 | else { |
620 | priv->stat.rx_errors++; | 620 | dev->stats.rx_errors++; |
621 | if (rda.status & RCREG_FAER) | 621 | if (rda.status & RCREG_FAER) |
622 | priv->stat.rx_frame_errors++; | 622 | dev->stats.rx_frame_errors++; |
623 | if (rda.status & RCREG_CRCR) | 623 | if (rda.status & RCREG_CRCR) |
624 | priv->stat.rx_crc_errors++; | 624 | dev->stats.rx_crc_errors++; |
625 | } | 625 | } |
626 | 626 | ||
627 | /* descriptor processed, will become new last descriptor in queue */ | 627 | /* descriptor processed, will become new last descriptor in queue */ |
@@ -656,8 +656,8 @@ static void irqtx_handler(struct net_device *dev) | |||
656 | memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); | 656 | memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); |
657 | 657 | ||
658 | /* update statistics */ | 658 | /* update statistics */ |
659 | priv->stat.tx_packets++; | 659 | dev->stats.tx_packets++; |
660 | priv->stat.tx_bytes += tda.length; | 660 | dev->stats.tx_bytes += tda.length; |
661 | 661 | ||
662 | /* update our pointers */ | 662 | /* update our pointers */ |
663 | priv->txused[priv->currtxdescr] = 0; | 663 | priv->txused[priv->currtxdescr] = 0; |
@@ -680,15 +680,15 @@ static void irqtxerr_handler(struct net_device *dev) | |||
680 | memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); | 680 | memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); |
681 | 681 | ||
682 | /* update statistics */ | 682 | /* update statistics */ |
683 | priv->stat.tx_errors++; | 683 | dev->stats.tx_errors++; |
684 | if (tda.status & (TCREG_NCRS | TCREG_CRSL)) | 684 | if (tda.status & (TCREG_NCRS | TCREG_CRSL)) |
685 | priv->stat.tx_carrier_errors++; | 685 | dev->stats.tx_carrier_errors++; |
686 | if (tda.status & TCREG_EXC) | 686 | if (tda.status & TCREG_EXC) |
687 | priv->stat.tx_aborted_errors++; | 687 | dev->stats.tx_aborted_errors++; |
688 | if (tda.status & TCREG_OWC) | 688 | if (tda.status & TCREG_OWC) |
689 | priv->stat.tx_window_errors++; | 689 | dev->stats.tx_window_errors++; |
690 | if (tda.status & TCREG_FU) | 690 | if (tda.status & TCREG_FU) |
691 | priv->stat.tx_fifo_errors++; | 691 | dev->stats.tx_fifo_errors++; |
692 | 692 | ||
693 | /* update our pointers */ | 693 | /* update our pointers */ |
694 | priv->txused[priv->currtxdescr] = 0; | 694 | priv->txused[priv->currtxdescr] = 0; |
@@ -824,7 +824,7 @@ static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev) | |||
824 | 824 | ||
825 | if (priv->txusedcnt >= TXBUFCNT) { | 825 | if (priv->txusedcnt >= TXBUFCNT) { |
826 | retval = -EIO; | 826 | retval = -EIO; |
827 | priv->stat.tx_dropped++; | 827 | dev->stats.tx_dropped++; |
828 | goto tx_done; | 828 | goto tx_done; |
829 | } | 829 | } |
830 | 830 | ||
@@ -876,14 +876,6 @@ tx_done: | |||
876 | return retval; | 876 | return retval; |
877 | } | 877 | } |
878 | 878 | ||
879 | /* return pointer to Ethernet statistics */ | ||
880 | |||
881 | static struct net_device_stats *ibmlana_stats(struct net_device *dev) | ||
882 | { | ||
883 | ibmlana_priv *priv = netdev_priv(dev); | ||
884 | return &priv->stat; | ||
885 | } | ||
886 | |||
887 | /* switch receiver mode. */ | 879 | /* switch receiver mode. */ |
888 | 880 | ||
889 | static void ibmlana_set_multicast_list(struct net_device *dev) | 881 | static void ibmlana_set_multicast_list(struct net_device *dev) |
@@ -978,7 +970,6 @@ static int ibmlana_probe(struct net_device *dev) | |||
978 | dev->stop = ibmlana_close; | 970 | dev->stop = ibmlana_close; |
979 | dev->hard_start_xmit = ibmlana_tx; | 971 | dev->hard_start_xmit = ibmlana_tx; |
980 | dev->do_ioctl = NULL; | 972 | dev->do_ioctl = NULL; |
981 | dev->get_stats = ibmlana_stats; | ||
982 | dev->set_multicast_list = ibmlana_set_multicast_list; | 973 | dev->set_multicast_list = ibmlana_set_multicast_list; |
983 | dev->flags |= IFF_MULTICAST; | 974 | dev->flags |= IFF_MULTICAST; |
984 | 975 | ||
diff --git a/drivers/net/ibmlana.h b/drivers/net/ibmlana.h index 6b58bab9e308..aa3ddbdee4bb 100644 --- a/drivers/net/ibmlana.h +++ b/drivers/net/ibmlana.h | |||
@@ -26,7 +26,6 @@ typedef enum { | |||
26 | 26 | ||
27 | typedef struct { | 27 | typedef struct { |
28 | unsigned int slot; /* MCA-Slot-# */ | 28 | unsigned int slot; /* MCA-Slot-# */ |
29 | struct net_device_stats stat; /* packet statistics */ | ||
30 | int realirq; /* memorizes actual IRQ, even when | 29 | int realirq; /* memorizes actual IRQ, even when |
31 | currently not allocated */ | 30 | currently not allocated */ |
32 | ibmlana_medium medium; /* physical cannector */ | 31 | ibmlana_medium medium; /* physical cannector */ |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index db908c40dbe1..bdbf3dead4e2 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -87,7 +87,6 @@ static int ibmveth_close(struct net_device *dev); | |||
87 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | 87 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); |
88 | static int ibmveth_poll(struct napi_struct *napi, int budget); | 88 | static int ibmveth_poll(struct napi_struct *napi, int budget); |
89 | static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); | 89 | static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); |
90 | static struct net_device_stats *ibmveth_get_stats(struct net_device *dev); | ||
91 | static void ibmveth_set_multicast_list(struct net_device *dev); | 90 | static void ibmveth_set_multicast_list(struct net_device *dev); |
92 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu); | 91 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu); |
93 | static void ibmveth_proc_register_driver(void); | 92 | static void ibmveth_proc_register_driver(void); |
@@ -909,9 +908,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
909 | skb->len, DMA_TO_DEVICE); | 908 | skb->len, DMA_TO_DEVICE); |
910 | 909 | ||
911 | out: spin_lock_irqsave(&adapter->stats_lock, flags); | 910 | out: spin_lock_irqsave(&adapter->stats_lock, flags); |
912 | adapter->stats.tx_dropped += tx_dropped; | 911 | netdev->stats.tx_dropped += tx_dropped; |
913 | adapter->stats.tx_bytes += tx_bytes; | 912 | netdev->stats.tx_bytes += tx_bytes; |
914 | adapter->stats.tx_packets += tx_packets; | 913 | netdev->stats.tx_packets += tx_packets; |
915 | adapter->tx_send_failed += tx_send_failed; | 914 | adapter->tx_send_failed += tx_send_failed; |
916 | adapter->tx_map_failed += tx_map_failed; | 915 | adapter->tx_map_failed += tx_map_failed; |
917 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 916 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
@@ -957,8 +956,8 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) | |||
957 | 956 | ||
958 | netif_receive_skb(skb); /* send it up */ | 957 | netif_receive_skb(skb); /* send it up */ |
959 | 958 | ||
960 | adapter->stats.rx_packets++; | 959 | netdev->stats.rx_packets++; |
961 | adapter->stats.rx_bytes += length; | 960 | netdev->stats.rx_bytes += length; |
962 | frames_processed++; | 961 | frames_processed++; |
963 | netdev->last_rx = jiffies; | 962 | netdev->last_rx = jiffies; |
964 | } | 963 | } |
@@ -1003,12 +1002,6 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) | |||
1003 | return IRQ_HANDLED; | 1002 | return IRQ_HANDLED; |
1004 | } | 1003 | } |
1005 | 1004 | ||
1006 | static struct net_device_stats *ibmveth_get_stats(struct net_device *dev) | ||
1007 | { | ||
1008 | struct ibmveth_adapter *adapter = dev->priv; | ||
1009 | return &adapter->stats; | ||
1010 | } | ||
1011 | |||
1012 | static void ibmveth_set_multicast_list(struct net_device *netdev) | 1005 | static void ibmveth_set_multicast_list(struct net_device *netdev) |
1013 | { | 1006 | { |
1014 | struct ibmveth_adapter *adapter = netdev->priv; | 1007 | struct ibmveth_adapter *adapter = netdev->priv; |
@@ -1170,7 +1163,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1170 | netdev->open = ibmveth_open; | 1163 | netdev->open = ibmveth_open; |
1171 | netdev->stop = ibmveth_close; | 1164 | netdev->stop = ibmveth_close; |
1172 | netdev->hard_start_xmit = ibmveth_start_xmit; | 1165 | netdev->hard_start_xmit = ibmveth_start_xmit; |
1173 | netdev->get_stats = ibmveth_get_stats; | ||
1174 | netdev->set_multicast_list = ibmveth_set_multicast_list; | 1166 | netdev->set_multicast_list = ibmveth_set_multicast_list; |
1175 | netdev->do_ioctl = ibmveth_ioctl; | 1167 | netdev->do_ioctl = ibmveth_ioctl; |
1176 | netdev->ethtool_ops = &netdev_ethtool_ops; | 1168 | netdev->ethtool_ops = &netdev_ethtool_ops; |
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 448e618b6974..15949d3df17e 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c | |||
@@ -40,7 +40,6 @@ | |||
40 | 40 | ||
41 | #define TX_Q_LIMIT 32 | 41 | #define TX_Q_LIMIT 32 |
42 | struct ifb_private { | 42 | struct ifb_private { |
43 | struct net_device_stats stats; | ||
44 | struct tasklet_struct ifb_tasklet; | 43 | struct tasklet_struct ifb_tasklet; |
45 | int tasklet_pending; | 44 | int tasklet_pending; |
46 | /* mostly debug stats leave in for now */ | 45 | /* mostly debug stats leave in for now */ |
@@ -61,7 +60,6 @@ static int numifbs = 2; | |||
61 | 60 | ||
62 | static void ri_tasklet(unsigned long dev); | 61 | static void ri_tasklet(unsigned long dev); |
63 | static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); | 62 | static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); |
64 | static struct net_device_stats *ifb_get_stats(struct net_device *dev); | ||
65 | static int ifb_open(struct net_device *dev); | 63 | static int ifb_open(struct net_device *dev); |
66 | static int ifb_close(struct net_device *dev); | 64 | static int ifb_close(struct net_device *dev); |
67 | 65 | ||
@@ -70,7 +68,7 @@ static void ri_tasklet(unsigned long dev) | |||
70 | 68 | ||
71 | struct net_device *_dev = (struct net_device *)dev; | 69 | struct net_device *_dev = (struct net_device *)dev; |
72 | struct ifb_private *dp = netdev_priv(_dev); | 70 | struct ifb_private *dp = netdev_priv(_dev); |
73 | struct net_device_stats *stats = &dp->stats; | 71 | struct net_device_stats *stats = &_dev->stats; |
74 | struct sk_buff *skb; | 72 | struct sk_buff *skb; |
75 | 73 | ||
76 | dp->st_task_enter++; | 74 | dp->st_task_enter++; |
@@ -140,7 +138,6 @@ resched: | |||
140 | static void ifb_setup(struct net_device *dev) | 138 | static void ifb_setup(struct net_device *dev) |
141 | { | 139 | { |
142 | /* Initialize the device structure. */ | 140 | /* Initialize the device structure. */ |
143 | dev->get_stats = ifb_get_stats; | ||
144 | dev->hard_start_xmit = ifb_xmit; | 141 | dev->hard_start_xmit = ifb_xmit; |
145 | dev->open = &ifb_open; | 142 | dev->open = &ifb_open; |
146 | dev->stop = &ifb_close; | 143 | dev->stop = &ifb_close; |
@@ -158,7 +155,7 @@ static void ifb_setup(struct net_device *dev) | |||
158 | static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) | 155 | static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) |
159 | { | 156 | { |
160 | struct ifb_private *dp = netdev_priv(dev); | 157 | struct ifb_private *dp = netdev_priv(dev); |
161 | struct net_device_stats *stats = &dp->stats; | 158 | struct net_device_stats *stats = &dev->stats; |
162 | int ret = 0; | 159 | int ret = 0; |
163 | u32 from = G_TC_FROM(skb->tc_verd); | 160 | u32 from = G_TC_FROM(skb->tc_verd); |
164 | 161 | ||
@@ -185,19 +182,6 @@ static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) | |||
185 | return ret; | 182 | return ret; |
186 | } | 183 | } |
187 | 184 | ||
188 | static struct net_device_stats *ifb_get_stats(struct net_device *dev) | ||
189 | { | ||
190 | struct ifb_private *dp = netdev_priv(dev); | ||
191 | struct net_device_stats *stats = &dp->stats; | ||
192 | |||
193 | pr_debug("tasklets stats %ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld \n", | ||
194 | dp->st_task_enter, dp->st_txq_refl_try, dp->st_rxq_enter, | ||
195 | dp->st_rx2tx_tran, dp->st_rxq_notenter, dp->st_rx_frm_egr, | ||
196 | dp->st_rx_frm_ing, dp->st_rxq_check, dp->st_rxq_rsch); | ||
197 | |||
198 | return stats; | ||
199 | } | ||
200 | |||
201 | static int ifb_close(struct net_device *dev) | 185 | static int ifb_close(struct net_device *dev) |
202 | { | 186 | { |
203 | struct ifb_private *dp = netdev_priv(dev); | 187 | struct ifb_private *dp = netdev_priv(dev); |
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 0433c41f9029..97bd9dc2e52e 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -196,7 +196,6 @@ struct veth_lpar_connection { | |||
196 | 196 | ||
197 | struct veth_port { | 197 | struct veth_port { |
198 | struct device *dev; | 198 | struct device *dev; |
199 | struct net_device_stats stats; | ||
200 | u64 mac_addr; | 199 | u64 mac_addr; |
201 | HvLpIndexMap lpar_map; | 200 | HvLpIndexMap lpar_map; |
202 | 201 | ||
@@ -936,9 +935,6 @@ static void veth_release_connection(struct kobject *kobj) | |||
936 | 935 | ||
937 | static int veth_open(struct net_device *dev) | 936 | static int veth_open(struct net_device *dev) |
938 | { | 937 | { |
939 | struct veth_port *port = (struct veth_port *) dev->priv; | ||
940 | |||
941 | memset(&port->stats, 0, sizeof (port->stats)); | ||
942 | netif_start_queue(dev); | 938 | netif_start_queue(dev); |
943 | return 0; | 939 | return 0; |
944 | } | 940 | } |
@@ -949,13 +945,6 @@ static int veth_close(struct net_device *dev) | |||
949 | return 0; | 945 | return 0; |
950 | } | 946 | } |
951 | 947 | ||
952 | static struct net_device_stats *veth_get_stats(struct net_device *dev) | ||
953 | { | ||
954 | struct veth_port *port = (struct veth_port *) dev->priv; | ||
955 | |||
956 | return &port->stats; | ||
957 | } | ||
958 | |||
959 | static int veth_change_mtu(struct net_device *dev, int new_mtu) | 948 | static int veth_change_mtu(struct net_device *dev, int new_mtu) |
960 | { | 949 | { |
961 | if ((new_mtu < 68) || (new_mtu > VETH_MAX_MTU)) | 950 | if ((new_mtu < 68) || (new_mtu > VETH_MAX_MTU)) |
@@ -1084,7 +1073,6 @@ static struct net_device * __init veth_probe_one(int vlan, | |||
1084 | dev->open = veth_open; | 1073 | dev->open = veth_open; |
1085 | dev->hard_start_xmit = veth_start_xmit; | 1074 | dev->hard_start_xmit = veth_start_xmit; |
1086 | dev->stop = veth_close; | 1075 | dev->stop = veth_close; |
1087 | dev->get_stats = veth_get_stats; | ||
1088 | dev->change_mtu = veth_change_mtu; | 1076 | dev->change_mtu = veth_change_mtu; |
1089 | dev->set_mac_address = NULL; | 1077 | dev->set_mac_address = NULL; |
1090 | dev->set_multicast_list = veth_set_multicast_list; | 1078 | dev->set_multicast_list = veth_set_multicast_list; |
@@ -1183,7 +1171,6 @@ static void veth_transmit_to_many(struct sk_buff *skb, | |||
1183 | HvLpIndexMap lpmask, | 1171 | HvLpIndexMap lpmask, |
1184 | struct net_device *dev) | 1172 | struct net_device *dev) |
1185 | { | 1173 | { |
1186 | struct veth_port *port = (struct veth_port *) dev->priv; | ||
1187 | int i, success, error; | 1174 | int i, success, error; |
1188 | 1175 | ||
1189 | success = error = 0; | 1176 | success = error = 0; |
@@ -1199,11 +1186,11 @@ static void veth_transmit_to_many(struct sk_buff *skb, | |||
1199 | } | 1186 | } |
1200 | 1187 | ||
1201 | if (error) | 1188 | if (error) |
1202 | port->stats.tx_errors++; | 1189 | dev->stats.tx_errors++; |
1203 | 1190 | ||
1204 | if (success) { | 1191 | if (success) { |
1205 | port->stats.tx_packets++; | 1192 | dev->stats.tx_packets++; |
1206 | port->stats.tx_bytes += skb->len; | 1193 | dev->stats.tx_bytes += skb->len; |
1207 | } | 1194 | } |
1208 | } | 1195 | } |
1209 | 1196 | ||
@@ -1541,8 +1528,8 @@ static void veth_receive(struct veth_lpar_connection *cnx, | |||
1541 | skb->protocol = eth_type_trans(skb, dev); | 1528 | skb->protocol = eth_type_trans(skb, dev); |
1542 | skb->ip_summed = CHECKSUM_NONE; | 1529 | skb->ip_summed = CHECKSUM_NONE; |
1543 | netif_rx(skb); /* send it up */ | 1530 | netif_rx(skb); /* send it up */ |
1544 | port->stats.rx_packets++; | 1531 | dev->stats.rx_packets++; |
1545 | port->stats.rx_bytes += length; | 1532 | dev->stats.rx_bytes += length; |
1546 | } while (startchunk += nchunks, startchunk < VETH_MAX_FRAMES_PER_MSG); | 1533 | } while (startchunk += nchunks, startchunk < VETH_MAX_FRAMES_PER_MSG); |
1547 | 1534 | ||
1548 | /* Ack it */ | 1535 | /* Ack it */ |
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c index 5884f5bd04a4..afa4638052a2 100644 --- a/drivers/net/lib82596.c +++ b/drivers/net/lib82596.c | |||
@@ -322,7 +322,6 @@ struct i596_private { | |||
322 | struct i596_cmd *cmd_head; | 322 | struct i596_cmd *cmd_head; |
323 | int cmd_backlog; | 323 | int cmd_backlog; |
324 | u32 last_cmd; | 324 | u32 last_cmd; |
325 | struct net_device_stats stats; | ||
326 | int next_tx_cmd; | 325 | int next_tx_cmd; |
327 | int options; | 326 | int options; |
328 | spinlock_t lock; /* serialize access to chip */ | 327 | spinlock_t lock; /* serialize access to chip */ |
@@ -352,7 +351,6 @@ static int i596_open(struct net_device *dev); | |||
352 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); | 351 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); |
353 | static irqreturn_t i596_interrupt(int irq, void *dev_id); | 352 | static irqreturn_t i596_interrupt(int irq, void *dev_id); |
354 | static int i596_close(struct net_device *dev); | 353 | static int i596_close(struct net_device *dev); |
355 | static struct net_device_stats *i596_get_stats(struct net_device *dev); | ||
356 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); | 354 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); |
357 | static void i596_tx_timeout (struct net_device *dev); | 355 | static void i596_tx_timeout (struct net_device *dev); |
358 | static void print_eth(unsigned char *buf, char *str); | 356 | static void print_eth(unsigned char *buf, char *str); |
@@ -725,7 +723,7 @@ memory_squeeze: | |||
725 | printk(KERN_ERR | 723 | printk(KERN_ERR |
726 | "%s: i596_rx Memory squeeze, dropping packet.\n", | 724 | "%s: i596_rx Memory squeeze, dropping packet.\n", |
727 | dev->name); | 725 | dev->name); |
728 | lp->stats.rx_dropped++; | 726 | dev->stats.rx_dropped++; |
729 | } else { | 727 | } else { |
730 | if (!rx_in_place) { | 728 | if (!rx_in_place) { |
731 | /* 16 byte align the data fields */ | 729 | /* 16 byte align the data fields */ |
@@ -742,28 +740,28 @@ memory_squeeze: | |||
742 | skb->protocol = eth_type_trans(skb, dev); | 740 | skb->protocol = eth_type_trans(skb, dev); |
743 | netif_rx(skb); | 741 | netif_rx(skb); |
744 | dev->last_rx = jiffies; | 742 | dev->last_rx = jiffies; |
745 | lp->stats.rx_packets++; | 743 | dev->stats.rx_packets++; |
746 | lp->stats.rx_bytes += pkt_len; | 744 | dev->stats.rx_bytes += pkt_len; |
747 | } | 745 | } |
748 | } else { | 746 | } else { |
749 | DEB(DEB_ERRORS, printk(KERN_DEBUG | 747 | DEB(DEB_ERRORS, printk(KERN_DEBUG |
750 | "%s: Error, rfd.stat = 0x%04x\n", | 748 | "%s: Error, rfd.stat = 0x%04x\n", |
751 | dev->name, rfd->stat)); | 749 | dev->name, rfd->stat)); |
752 | lp->stats.rx_errors++; | 750 | dev->stats.rx_errors++; |
753 | if (rfd->stat & SWAP16(0x0100)) | 751 | if (rfd->stat & SWAP16(0x0100)) |
754 | lp->stats.collisions++; | 752 | dev->stats.collisions++; |
755 | if (rfd->stat & SWAP16(0x8000)) | 753 | if (rfd->stat & SWAP16(0x8000)) |
756 | lp->stats.rx_length_errors++; | 754 | dev->stats.rx_length_errors++; |
757 | if (rfd->stat & SWAP16(0x0001)) | 755 | if (rfd->stat & SWAP16(0x0001)) |
758 | lp->stats.rx_over_errors++; | 756 | dev->stats.rx_over_errors++; |
759 | if (rfd->stat & SWAP16(0x0002)) | 757 | if (rfd->stat & SWAP16(0x0002)) |
760 | lp->stats.rx_fifo_errors++; | 758 | dev->stats.rx_fifo_errors++; |
761 | if (rfd->stat & SWAP16(0x0004)) | 759 | if (rfd->stat & SWAP16(0x0004)) |
762 | lp->stats.rx_frame_errors++; | 760 | dev->stats.rx_frame_errors++; |
763 | if (rfd->stat & SWAP16(0x0008)) | 761 | if (rfd->stat & SWAP16(0x0008)) |
764 | lp->stats.rx_crc_errors++; | 762 | dev->stats.rx_crc_errors++; |
765 | if (rfd->stat & SWAP16(0x0010)) | 763 | if (rfd->stat & SWAP16(0x0010)) |
766 | lp->stats.rx_length_errors++; | 764 | dev->stats.rx_length_errors++; |
767 | } | 765 | } |
768 | 766 | ||
769 | /* Clear the buffer descriptor count and EOF + F flags */ | 767 | /* Clear the buffer descriptor count and EOF + F flags */ |
@@ -821,8 +819,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private | |||
821 | 819 | ||
822 | dev_kfree_skb(skb); | 820 | dev_kfree_skb(skb); |
823 | 821 | ||
824 | lp->stats.tx_errors++; | 822 | dev->stats.tx_errors++; |
825 | lp->stats.tx_aborted_errors++; | 823 | dev->stats.tx_aborted_errors++; |
826 | 824 | ||
827 | ptr->v_next = NULL; | 825 | ptr->v_next = NULL; |
828 | ptr->b_next = I596_NULL; | 826 | ptr->b_next = I596_NULL; |
@@ -951,10 +949,10 @@ static void i596_tx_timeout (struct net_device *dev) | |||
951 | "%s: transmit timed out, status resetting.\n", | 949 | "%s: transmit timed out, status resetting.\n", |
952 | dev->name)); | 950 | dev->name)); |
953 | 951 | ||
954 | lp->stats.tx_errors++; | 952 | dev->stats.tx_errors++; |
955 | 953 | ||
956 | /* Try to restart the adaptor */ | 954 | /* Try to restart the adaptor */ |
957 | if (lp->last_restart == lp->stats.tx_packets) { | 955 | if (lp->last_restart == dev->stats.tx_packets) { |
958 | DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n")); | 956 | DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n")); |
959 | /* Shutdown and restart */ | 957 | /* Shutdown and restart */ |
960 | i596_reset (dev, lp); | 958 | i596_reset (dev, lp); |
@@ -964,7 +962,7 @@ static void i596_tx_timeout (struct net_device *dev) | |||
964 | lp->dma->scb.command = SWAP16(CUC_START | RX_START); | 962 | lp->dma->scb.command = SWAP16(CUC_START | RX_START); |
965 | DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb)); | 963 | DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb)); |
966 | ca (dev); | 964 | ca (dev); |
967 | lp->last_restart = lp->stats.tx_packets; | 965 | lp->last_restart = dev->stats.tx_packets; |
968 | } | 966 | } |
969 | 967 | ||
970 | dev->trans_start = jiffies; | 968 | dev->trans_start = jiffies; |
@@ -999,7 +997,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
999 | DEB(DEB_ERRORS, printk(KERN_DEBUG | 997 | DEB(DEB_ERRORS, printk(KERN_DEBUG |
1000 | "%s: xmit ring full, dropping packet.\n", | 998 | "%s: xmit ring full, dropping packet.\n", |
1001 | dev->name)); | 999 | dev->name)); |
1002 | lp->stats.tx_dropped++; | 1000 | dev->stats.tx_dropped++; |
1003 | 1001 | ||
1004 | dev_kfree_skb(skb); | 1002 | dev_kfree_skb(skb); |
1005 | } else { | 1003 | } else { |
@@ -1025,8 +1023,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1025 | DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd)); | 1023 | DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd)); |
1026 | i596_add_cmd(dev, &tx_cmd->cmd); | 1024 | i596_add_cmd(dev, &tx_cmd->cmd); |
1027 | 1025 | ||
1028 | lp->stats.tx_packets++; | 1026 | dev->stats.tx_packets++; |
1029 | lp->stats.tx_bytes += length; | 1027 | dev->stats.tx_bytes += length; |
1030 | } | 1028 | } |
1031 | 1029 | ||
1032 | netif_start_queue(dev); | 1030 | netif_start_queue(dev); |
@@ -1076,7 +1074,6 @@ static int __devinit i82596_probe(struct net_device *dev) | |||
1076 | dev->open = i596_open; | 1074 | dev->open = i596_open; |
1077 | dev->stop = i596_close; | 1075 | dev->stop = i596_close; |
1078 | dev->hard_start_xmit = i596_start_xmit; | 1076 | dev->hard_start_xmit = i596_start_xmit; |
1079 | dev->get_stats = i596_get_stats; | ||
1080 | dev->set_multicast_list = set_multicast_list; | 1077 | dev->set_multicast_list = set_multicast_list; |
1081 | dev->tx_timeout = i596_tx_timeout; | 1078 | dev->tx_timeout = i596_tx_timeout; |
1082 | dev->watchdog_timeo = TX_TIMEOUT; | 1079 | dev->watchdog_timeo = TX_TIMEOUT; |
@@ -1197,17 +1194,17 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) | |||
1197 | DEB(DEB_TXADDR, | 1194 | DEB(DEB_TXADDR, |
1198 | print_eth(skb->data, "tx-done")); | 1195 | print_eth(skb->data, "tx-done")); |
1199 | } else { | 1196 | } else { |
1200 | lp->stats.tx_errors++; | 1197 | dev->stats.tx_errors++; |
1201 | if (ptr->status & SWAP16(0x0020)) | 1198 | if (ptr->status & SWAP16(0x0020)) |
1202 | lp->stats.collisions++; | 1199 | dev->stats.collisions++; |
1203 | if (!(ptr->status & SWAP16(0x0040))) | 1200 | if (!(ptr->status & SWAP16(0x0040))) |
1204 | lp->stats.tx_heartbeat_errors++; | 1201 | dev->stats.tx_heartbeat_errors++; |
1205 | if (ptr->status & SWAP16(0x0400)) | 1202 | if (ptr->status & SWAP16(0x0400)) |
1206 | lp->stats.tx_carrier_errors++; | 1203 | dev->stats.tx_carrier_errors++; |
1207 | if (ptr->status & SWAP16(0x0800)) | 1204 | if (ptr->status & SWAP16(0x0800)) |
1208 | lp->stats.collisions++; | 1205 | dev->stats.collisions++; |
1209 | if (ptr->status & SWAP16(0x1000)) | 1206 | if (ptr->status & SWAP16(0x1000)) |
1210 | lp->stats.tx_aborted_errors++; | 1207 | dev->stats.tx_aborted_errors++; |
1211 | } | 1208 | } |
1212 | dma_unmap_single(dev->dev.parent, | 1209 | dma_unmap_single(dev->dev.parent, |
1213 | tx_cmd->dma_addr, | 1210 | tx_cmd->dma_addr, |
@@ -1292,8 +1289,8 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) | |||
1292 | "%s: i596 interrupt receive unit inactive, status 0x%x\n", | 1289 | "%s: i596 interrupt receive unit inactive, status 0x%x\n", |
1293 | dev->name, status)); | 1290 | dev->name, status)); |
1294 | ack_cmd |= RX_START; | 1291 | ack_cmd |= RX_START; |
1295 | lp->stats.rx_errors++; | 1292 | dev->stats.rx_errors++; |
1296 | lp->stats.rx_fifo_errors++; | 1293 | dev->stats.rx_fifo_errors++; |
1297 | rebuild_rx_bufs(dev); | 1294 | rebuild_rx_bufs(dev); |
1298 | } | 1295 | } |
1299 | } | 1296 | } |
@@ -1346,13 +1343,6 @@ static int i596_close(struct net_device *dev) | |||
1346 | return 0; | 1343 | return 0; |
1347 | } | 1344 | } |
1348 | 1345 | ||
1349 | static struct net_device_stats *i596_get_stats(struct net_device *dev) | ||
1350 | { | ||
1351 | struct i596_private *lp = netdev_priv(dev); | ||
1352 | |||
1353 | return &lp->stats; | ||
1354 | } | ||
1355 | |||
1356 | /* | 1346 | /* |
1357 | * Set or clear the multicast filter for this adaptor. | 1347 | * Set or clear the multicast filter for this adaptor. |
1358 | */ | 1348 | */ |
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c index 408ae6eb6a8b..c5095ecd8b11 100644 --- a/drivers/net/lp486e.c +++ b/drivers/net/lp486e.c | |||
@@ -350,7 +350,6 @@ struct i596_private { /* aligned to a 16-byte boundary */ | |||
350 | struct i596_cmd *cmd_head; | 350 | struct i596_cmd *cmd_head; |
351 | int cmd_backlog; | 351 | int cmd_backlog; |
352 | unsigned long last_cmd; | 352 | unsigned long last_cmd; |
353 | struct net_device_stats stats; | ||
354 | spinlock_t cmd_lock; | 353 | spinlock_t cmd_lock; |
355 | }; | 354 | }; |
356 | 355 | ||
@@ -381,7 +380,6 @@ static int i596_open(struct net_device *dev); | |||
381 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); | 380 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); |
382 | static irqreturn_t i596_interrupt(int irq, void *dev_id); | 381 | static irqreturn_t i596_interrupt(int irq, void *dev_id); |
383 | static int i596_close(struct net_device *dev); | 382 | static int i596_close(struct net_device *dev); |
384 | static struct net_device_stats *i596_get_stats(struct net_device *dev); | ||
385 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); | 383 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); |
386 | static void print_eth(char *); | 384 | static void print_eth(char *); |
387 | static void set_multicast_list(struct net_device *dev); | 385 | static void set_multicast_list(struct net_device *dev); |
@@ -670,7 +668,7 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp, | |||
670 | if (skb == NULL) { | 668 | if (skb == NULL) { |
671 | printk ("%s: i596_rx Memory squeeze, " | 669 | printk ("%s: i596_rx Memory squeeze, " |
672 | "dropping packet.\n", dev->name); | 670 | "dropping packet.\n", dev->name); |
673 | lp->stats.rx_dropped++; | 671 | dev->stats.rx_dropped++; |
674 | return 1; | 672 | return 1; |
675 | } | 673 | } |
676 | 674 | ||
@@ -679,27 +677,27 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp, | |||
679 | skb->protocol = eth_type_trans(skb,dev); | 677 | skb->protocol = eth_type_trans(skb,dev); |
680 | netif_rx(skb); | 678 | netif_rx(skb); |
681 | dev->last_rx = jiffies; | 679 | dev->last_rx = jiffies; |
682 | lp->stats.rx_packets++; | 680 | dev->stats.rx_packets++; |
683 | } else { | 681 | } else { |
684 | #if 0 | 682 | #if 0 |
685 | printk("Frame reception error status %04x\n", | 683 | printk("Frame reception error status %04x\n", |
686 | rfd->stat); | 684 | rfd->stat); |
687 | #endif | 685 | #endif |
688 | lp->stats.rx_errors++; | 686 | dev->stats.rx_errors++; |
689 | if (rfd->stat & RFD_COLLISION) | 687 | if (rfd->stat & RFD_COLLISION) |
690 | lp->stats.collisions++; | 688 | dev->stats.collisions++; |
691 | if (rfd->stat & RFD_SHORT_FRAME_ERR) | 689 | if (rfd->stat & RFD_SHORT_FRAME_ERR) |
692 | lp->stats.rx_length_errors++; | 690 | dev->stats.rx_length_errors++; |
693 | if (rfd->stat & RFD_DMA_ERR) | 691 | if (rfd->stat & RFD_DMA_ERR) |
694 | lp->stats.rx_over_errors++; | 692 | dev->stats.rx_over_errors++; |
695 | if (rfd->stat & RFD_NOBUFS_ERR) | 693 | if (rfd->stat & RFD_NOBUFS_ERR) |
696 | lp->stats.rx_fifo_errors++; | 694 | dev->stats.rx_fifo_errors++; |
697 | if (rfd->stat & RFD_ALIGN_ERR) | 695 | if (rfd->stat & RFD_ALIGN_ERR) |
698 | lp->stats.rx_frame_errors++; | 696 | dev->stats.rx_frame_errors++; |
699 | if (rfd->stat & RFD_CRC_ERR) | 697 | if (rfd->stat & RFD_CRC_ERR) |
700 | lp->stats.rx_crc_errors++; | 698 | dev->stats.rx_crc_errors++; |
701 | if (rfd->stat & RFD_LENGTH_ERR) | 699 | if (rfd->stat & RFD_LENGTH_ERR) |
702 | lp->stats.rx_length_errors++; | 700 | dev->stats.rx_length_errors++; |
703 | } | 701 | } |
704 | rfd->stat = rfd->count = 0; | 702 | rfd->stat = rfd->count = 0; |
705 | return 0; | 703 | return 0; |
@@ -755,8 +753,8 @@ i596_cleanup_cmd(struct net_device *dev) { | |||
755 | 753 | ||
756 | dev_kfree_skb_any(tx_cmd_tbd->skb); | 754 | dev_kfree_skb_any(tx_cmd_tbd->skb); |
757 | 755 | ||
758 | lp->stats.tx_errors++; | 756 | dev->stats.tx_errors++; |
759 | lp->stats.tx_aborted_errors++; | 757 | dev->stats.tx_aborted_errors++; |
760 | 758 | ||
761 | cmd->pa_next = I596_NULL; | 759 | cmd->pa_next = I596_NULL; |
762 | kfree((unsigned char *)tx_cmd); | 760 | kfree((unsigned char *)tx_cmd); |
@@ -867,7 +865,6 @@ static int i596_open(struct net_device *dev) | |||
867 | } | 865 | } |
868 | 866 | ||
869 | static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { | 867 | static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { |
870 | struct i596_private *lp = dev->priv; | ||
871 | struct tx_cmd *tx_cmd; | 868 | struct tx_cmd *tx_cmd; |
872 | short length; | 869 | short length; |
873 | 870 | ||
@@ -884,7 +881,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { | |||
884 | tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC); | 881 | tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC); |
885 | if (tx_cmd == NULL) { | 882 | if (tx_cmd == NULL) { |
886 | printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name); | 883 | printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name); |
887 | lp->stats.tx_dropped++; | 884 | dev->stats.tx_dropped++; |
888 | dev_kfree_skb (skb); | 885 | dev_kfree_skb (skb); |
889 | } else { | 886 | } else { |
890 | struct i596_tbd *tx_cmd_tbd; | 887 | struct i596_tbd *tx_cmd_tbd; |
@@ -907,7 +904,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { | |||
907 | 904 | ||
908 | i596_add_cmd (dev, (struct i596_cmd *) tx_cmd); | 905 | i596_add_cmd (dev, (struct i596_cmd *) tx_cmd); |
909 | 906 | ||
910 | lp->stats.tx_packets++; | 907 | dev->stats.tx_packets++; |
911 | } | 908 | } |
912 | 909 | ||
913 | return 0; | 910 | return 0; |
@@ -920,10 +917,10 @@ i596_tx_timeout (struct net_device *dev) { | |||
920 | 917 | ||
921 | /* Transmitter timeout, serious problems. */ | 918 | /* Transmitter timeout, serious problems. */ |
922 | printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name); | 919 | printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name); |
923 | lp->stats.tx_errors++; | 920 | dev->stats.tx_errors++; |
924 | 921 | ||
925 | /* Try to restart the adaptor */ | 922 | /* Try to restart the adaptor */ |
926 | if (lp->last_restart == lp->stats.tx_packets) { | 923 | if (lp->last_restart == dev->stats.tx_packets) { |
927 | printk ("Resetting board.\n"); | 924 | printk ("Resetting board.\n"); |
928 | 925 | ||
929 | /* Shutdown and restart */ | 926 | /* Shutdown and restart */ |
@@ -933,7 +930,7 @@ i596_tx_timeout (struct net_device *dev) { | |||
933 | printk ("Kicking board.\n"); | 930 | printk ("Kicking board.\n"); |
934 | lp->scb.command = (CUC_START | RX_START); | 931 | lp->scb.command = (CUC_START | RX_START); |
935 | CA(); | 932 | CA(); |
936 | lp->last_restart = lp->stats.tx_packets; | 933 | lp->last_restart = dev->stats.tx_packets; |
937 | } | 934 | } |
938 | netif_wake_queue(dev); | 935 | netif_wake_queue(dev); |
939 | } | 936 | } |
@@ -1021,7 +1018,6 @@ static int __init lp486e_probe(struct net_device *dev) { | |||
1021 | dev->open = &i596_open; | 1018 | dev->open = &i596_open; |
1022 | dev->stop = &i596_close; | 1019 | dev->stop = &i596_close; |
1023 | dev->hard_start_xmit = &i596_start_xmit; | 1020 | dev->hard_start_xmit = &i596_start_xmit; |
1024 | dev->get_stats = &i596_get_stats; | ||
1025 | dev->set_multicast_list = &set_multicast_list; | 1021 | dev->set_multicast_list = &set_multicast_list; |
1026 | dev->watchdog_timeo = 5*HZ; | 1022 | dev->watchdog_timeo = 5*HZ; |
1027 | dev->tx_timeout = i596_tx_timeout; | 1023 | dev->tx_timeout = i596_tx_timeout; |
@@ -1078,20 +1074,20 @@ i596_handle_CU_completion(struct net_device *dev, | |||
1078 | if (i596_debug) | 1074 | if (i596_debug) |
1079 | print_eth(pa_to_va(tx_cmd_tbd->pa_data)); | 1075 | print_eth(pa_to_va(tx_cmd_tbd->pa_data)); |
1080 | } else { | 1076 | } else { |
1081 | lp->stats.tx_errors++; | 1077 | dev->stats.tx_errors++; |
1082 | if (i596_debug) | 1078 | if (i596_debug) |
1083 | printk("transmission failure:%04x\n", | 1079 | printk("transmission failure:%04x\n", |
1084 | cmd->status); | 1080 | cmd->status); |
1085 | if (cmd->status & 0x0020) | 1081 | if (cmd->status & 0x0020) |
1086 | lp->stats.collisions++; | 1082 | dev->stats.collisions++; |
1087 | if (!(cmd->status & 0x0040)) | 1083 | if (!(cmd->status & 0x0040)) |
1088 | lp->stats.tx_heartbeat_errors++; | 1084 | dev->stats.tx_heartbeat_errors++; |
1089 | if (cmd->status & 0x0400) | 1085 | if (cmd->status & 0x0400) |
1090 | lp->stats.tx_carrier_errors++; | 1086 | dev->stats.tx_carrier_errors++; |
1091 | if (cmd->status & 0x0800) | 1087 | if (cmd->status & 0x0800) |
1092 | lp->stats.collisions++; | 1088 | dev->stats.collisions++; |
1093 | if (cmd->status & 0x1000) | 1089 | if (cmd->status & 0x1000) |
1094 | lp->stats.tx_aborted_errors++; | 1090 | dev->stats.tx_aborted_errors++; |
1095 | } | 1091 | } |
1096 | dev_kfree_skb_irq(tx_cmd_tbd->skb); | 1092 | dev_kfree_skb_irq(tx_cmd_tbd->skb); |
1097 | 1093 | ||
@@ -1242,12 +1238,6 @@ static int i596_close(struct net_device *dev) { | |||
1242 | return 0; | 1238 | return 0; |
1243 | } | 1239 | } |
1244 | 1240 | ||
1245 | static struct net_device_stats * i596_get_stats(struct net_device *dev) { | ||
1246 | struct i596_private *lp = dev->priv; | ||
1247 | |||
1248 | return &lp->stats; | ||
1249 | } | ||
1250 | |||
1251 | /* | 1241 | /* |
1252 | * Set or clear the multicast filter for this adaptor. | 1242 | * Set or clear the multicast filter for this adaptor. |
1253 | */ | 1243 | */ |
diff --git a/drivers/net/mace.c b/drivers/net/mace.c index de3b002e9a4c..ee132b1e09b0 100644 --- a/drivers/net/mace.c +++ b/drivers/net/mace.c | |||
@@ -57,7 +57,6 @@ struct mace_data { | |||
57 | unsigned char tx_fullup; | 57 | unsigned char tx_fullup; |
58 | unsigned char tx_active; | 58 | unsigned char tx_active; |
59 | unsigned char tx_bad_runt; | 59 | unsigned char tx_bad_runt; |
60 | struct net_device_stats stats; | ||
61 | struct timer_list tx_timeout; | 60 | struct timer_list tx_timeout; |
62 | int timeout_active; | 61 | int timeout_active; |
63 | int port_aaui; | 62 | int port_aaui; |
@@ -78,7 +77,6 @@ struct mace_data { | |||
78 | static int mace_open(struct net_device *dev); | 77 | static int mace_open(struct net_device *dev); |
79 | static int mace_close(struct net_device *dev); | 78 | static int mace_close(struct net_device *dev); |
80 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); | 79 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); |
81 | static struct net_device_stats *mace_stats(struct net_device *dev); | ||
82 | static void mace_set_multicast(struct net_device *dev); | 80 | static void mace_set_multicast(struct net_device *dev); |
83 | static void mace_reset(struct net_device *dev); | 81 | static void mace_reset(struct net_device *dev); |
84 | static int mace_set_address(struct net_device *dev, void *addr); | 82 | static int mace_set_address(struct net_device *dev, void *addr); |
@@ -188,7 +186,6 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i | |||
188 | mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); | 186 | mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); |
189 | mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; | 187 | mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; |
190 | 188 | ||
191 | memset(&mp->stats, 0, sizeof(mp->stats)); | ||
192 | memset((char *) mp->tx_cmds, 0, | 189 | memset((char *) mp->tx_cmds, 0, |
193 | (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); | 190 | (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); |
194 | init_timer(&mp->tx_timeout); | 191 | init_timer(&mp->tx_timeout); |
@@ -213,7 +210,6 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i | |||
213 | dev->open = mace_open; | 210 | dev->open = mace_open; |
214 | dev->stop = mace_close; | 211 | dev->stop = mace_close; |
215 | dev->hard_start_xmit = mace_xmit_start; | 212 | dev->hard_start_xmit = mace_xmit_start; |
216 | dev->get_stats = mace_stats; | ||
217 | dev->set_multicast_list = mace_set_multicast; | 213 | dev->set_multicast_list = mace_set_multicast; |
218 | dev->set_mac_address = mace_set_address; | 214 | dev->set_mac_address = mace_set_address; |
219 | 215 | ||
@@ -584,13 +580,6 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | |||
584 | return 0; | 580 | return 0; |
585 | } | 581 | } |
586 | 582 | ||
587 | static struct net_device_stats *mace_stats(struct net_device *dev) | ||
588 | { | ||
589 | struct mace_data *p = (struct mace_data *) dev->priv; | ||
590 | |||
591 | return &p->stats; | ||
592 | } | ||
593 | |||
594 | static void mace_set_multicast(struct net_device *dev) | 583 | static void mace_set_multicast(struct net_device *dev) |
595 | { | 584 | { |
596 | struct mace_data *mp = (struct mace_data *) dev->priv; | 585 | struct mace_data *mp = (struct mace_data *) dev->priv; |
@@ -644,19 +633,19 @@ static void mace_set_multicast(struct net_device *dev) | |||
644 | spin_unlock_irqrestore(&mp->lock, flags); | 633 | spin_unlock_irqrestore(&mp->lock, flags); |
645 | } | 634 | } |
646 | 635 | ||
647 | static void mace_handle_misc_intrs(struct mace_data *mp, int intr) | 636 | static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev) |
648 | { | 637 | { |
649 | volatile struct mace __iomem *mb = mp->mace; | 638 | volatile struct mace __iomem *mb = mp->mace; |
650 | static int mace_babbles, mace_jabbers; | 639 | static int mace_babbles, mace_jabbers; |
651 | 640 | ||
652 | if (intr & MPCO) | 641 | if (intr & MPCO) |
653 | mp->stats.rx_missed_errors += 256; | 642 | dev->stats.rx_missed_errors += 256; |
654 | mp->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ | 643 | dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ |
655 | if (intr & RNTPCO) | 644 | if (intr & RNTPCO) |
656 | mp->stats.rx_length_errors += 256; | 645 | dev->stats.rx_length_errors += 256; |
657 | mp->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ | 646 | dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ |
658 | if (intr & CERR) | 647 | if (intr & CERR) |
659 | ++mp->stats.tx_heartbeat_errors; | 648 | ++dev->stats.tx_heartbeat_errors; |
660 | if (intr & BABBLE) | 649 | if (intr & BABBLE) |
661 | if (mace_babbles++ < 4) | 650 | if (mace_babbles++ < 4) |
662 | printk(KERN_DEBUG "mace: babbling transmitter\n"); | 651 | printk(KERN_DEBUG "mace: babbling transmitter\n"); |
@@ -680,7 +669,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) | |||
680 | spin_lock_irqsave(&mp->lock, flags); | 669 | spin_lock_irqsave(&mp->lock, flags); |
681 | intr = in_8(&mb->ir); /* read interrupt register */ | 670 | intr = in_8(&mb->ir); /* read interrupt register */ |
682 | in_8(&mb->xmtrc); /* get retries */ | 671 | in_8(&mb->xmtrc); /* get retries */ |
683 | mace_handle_misc_intrs(mp, intr); | 672 | mace_handle_misc_intrs(mp, intr, dev); |
684 | 673 | ||
685 | i = mp->tx_empty; | 674 | i = mp->tx_empty; |
686 | while (in_8(&mb->pr) & XMTSV) { | 675 | while (in_8(&mb->pr) & XMTSV) { |
@@ -693,7 +682,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) | |||
693 | */ | 682 | */ |
694 | intr = in_8(&mb->ir); | 683 | intr = in_8(&mb->ir); |
695 | if (intr != 0) | 684 | if (intr != 0) |
696 | mace_handle_misc_intrs(mp, intr); | 685 | mace_handle_misc_intrs(mp, intr, dev); |
697 | if (mp->tx_bad_runt) { | 686 | if (mp->tx_bad_runt) { |
698 | fs = in_8(&mb->xmtfs); | 687 | fs = in_8(&mb->xmtfs); |
699 | mp->tx_bad_runt = 0; | 688 | mp->tx_bad_runt = 0; |
@@ -767,14 +756,14 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) | |||
767 | } | 756 | } |
768 | /* Update stats */ | 757 | /* Update stats */ |
769 | if (fs & (UFLO|LCOL|LCAR|RTRY)) { | 758 | if (fs & (UFLO|LCOL|LCAR|RTRY)) { |
770 | ++mp->stats.tx_errors; | 759 | ++dev->stats.tx_errors; |
771 | if (fs & LCAR) | 760 | if (fs & LCAR) |
772 | ++mp->stats.tx_carrier_errors; | 761 | ++dev->stats.tx_carrier_errors; |
773 | if (fs & (UFLO|LCOL|RTRY)) | 762 | if (fs & (UFLO|LCOL|RTRY)) |
774 | ++mp->stats.tx_aborted_errors; | 763 | ++dev->stats.tx_aborted_errors; |
775 | } else { | 764 | } else { |
776 | mp->stats.tx_bytes += mp->tx_bufs[i]->len; | 765 | dev->stats.tx_bytes += mp->tx_bufs[i]->len; |
777 | ++mp->stats.tx_packets; | 766 | ++dev->stats.tx_packets; |
778 | } | 767 | } |
779 | dev_kfree_skb_irq(mp->tx_bufs[i]); | 768 | dev_kfree_skb_irq(mp->tx_bufs[i]); |
780 | --mp->tx_active; | 769 | --mp->tx_active; |
@@ -828,7 +817,7 @@ static void mace_tx_timeout(unsigned long data) | |||
828 | goto out; | 817 | goto out; |
829 | 818 | ||
830 | /* update various counters */ | 819 | /* update various counters */ |
831 | mace_handle_misc_intrs(mp, in_8(&mb->ir)); | 820 | mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); |
832 | 821 | ||
833 | cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; | 822 | cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; |
834 | 823 | ||
@@ -848,7 +837,7 @@ static void mace_tx_timeout(unsigned long data) | |||
848 | /* fix up the transmit side */ | 837 | /* fix up the transmit side */ |
849 | i = mp->tx_empty; | 838 | i = mp->tx_empty; |
850 | mp->tx_active = 0; | 839 | mp->tx_active = 0; |
851 | ++mp->stats.tx_errors; | 840 | ++dev->stats.tx_errors; |
852 | if (mp->tx_bad_runt) { | 841 | if (mp->tx_bad_runt) { |
853 | mp->tx_bad_runt = 0; | 842 | mp->tx_bad_runt = 0; |
854 | } else if (i != mp->tx_fill) { | 843 | } else if (i != mp->tx_fill) { |
@@ -916,18 +905,18 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) | |||
916 | /* got a packet, have a look at it */ | 905 | /* got a packet, have a look at it */ |
917 | skb = mp->rx_bufs[i]; | 906 | skb = mp->rx_bufs[i]; |
918 | if (skb == 0) { | 907 | if (skb == 0) { |
919 | ++mp->stats.rx_dropped; | 908 | ++dev->stats.rx_dropped; |
920 | } else if (nb > 8) { | 909 | } else if (nb > 8) { |
921 | data = skb->data; | 910 | data = skb->data; |
922 | frame_status = (data[nb-3] << 8) + data[nb-4]; | 911 | frame_status = (data[nb-3] << 8) + data[nb-4]; |
923 | if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) { | 912 | if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) { |
924 | ++mp->stats.rx_errors; | 913 | ++dev->stats.rx_errors; |
925 | if (frame_status & RS_OFLO) | 914 | if (frame_status & RS_OFLO) |
926 | ++mp->stats.rx_over_errors; | 915 | ++dev->stats.rx_over_errors; |
927 | if (frame_status & RS_FRAMERR) | 916 | if (frame_status & RS_FRAMERR) |
928 | ++mp->stats.rx_frame_errors; | 917 | ++dev->stats.rx_frame_errors; |
929 | if (frame_status & RS_FCSERR) | 918 | if (frame_status & RS_FCSERR) |
930 | ++mp->stats.rx_crc_errors; | 919 | ++dev->stats.rx_crc_errors; |
931 | } else { | 920 | } else { |
932 | /* Mace feature AUTO_STRIP_RCV is on by default, dropping the | 921 | /* Mace feature AUTO_STRIP_RCV is on by default, dropping the |
933 | * FCS on frames with 802.3 headers. This means that Ethernet | 922 | * FCS on frames with 802.3 headers. This means that Ethernet |
@@ -939,15 +928,15 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) | |||
939 | nb -= 8; | 928 | nb -= 8; |
940 | skb_put(skb, nb); | 929 | skb_put(skb, nb); |
941 | skb->protocol = eth_type_trans(skb, dev); | 930 | skb->protocol = eth_type_trans(skb, dev); |
942 | mp->stats.rx_bytes += skb->len; | 931 | dev->stats.rx_bytes += skb->len; |
943 | netif_rx(skb); | 932 | netif_rx(skb); |
944 | dev->last_rx = jiffies; | 933 | dev->last_rx = jiffies; |
945 | mp->rx_bufs[i] = NULL; | 934 | mp->rx_bufs[i] = NULL; |
946 | ++mp->stats.rx_packets; | 935 | ++dev->stats.rx_packets; |
947 | } | 936 | } |
948 | } else { | 937 | } else { |
949 | ++mp->stats.rx_errors; | 938 | ++dev->stats.rx_errors; |
950 | ++mp->stats.rx_length_errors; | 939 | ++dev->stats.rx_length_errors; |
951 | } | 940 | } |
952 | 941 | ||
953 | /* advance to next */ | 942 | /* advance to next */ |
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c index 5d2daa248873..57f7c1a2c1d7 100644 --- a/drivers/net/macmace.c +++ b/drivers/net/macmace.c | |||
@@ -65,7 +65,6 @@ struct mace_data { | |||
65 | unsigned char *rx_ring; | 65 | unsigned char *rx_ring; |
66 | dma_addr_t rx_ring_phys; | 66 | dma_addr_t rx_ring_phys; |
67 | int dma_intr; | 67 | int dma_intr; |
68 | struct net_device_stats stats; | ||
69 | int rx_slot, rx_tail; | 68 | int rx_slot, rx_tail; |
70 | int tx_slot, tx_sloti, tx_count; | 69 | int tx_slot, tx_sloti, tx_count; |
71 | int chipid; | 70 | int chipid; |
@@ -92,7 +91,6 @@ struct mace_frame { | |||
92 | static int mace_open(struct net_device *dev); | 91 | static int mace_open(struct net_device *dev); |
93 | static int mace_close(struct net_device *dev); | 92 | static int mace_close(struct net_device *dev); |
94 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); | 93 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); |
95 | static struct net_device_stats *mace_stats(struct net_device *dev); | ||
96 | static void mace_set_multicast(struct net_device *dev); | 94 | static void mace_set_multicast(struct net_device *dev); |
97 | static int mace_set_address(struct net_device *dev, void *addr); | 95 | static int mace_set_address(struct net_device *dev, void *addr); |
98 | static void mace_reset(struct net_device *dev); | 96 | static void mace_reset(struct net_device *dev); |
@@ -242,14 +240,11 @@ static int __devinit mace_probe(struct platform_device *pdev) | |||
242 | return -ENODEV; | 240 | return -ENODEV; |
243 | } | 241 | } |
244 | 242 | ||
245 | memset(&mp->stats, 0, sizeof(mp->stats)); | ||
246 | |||
247 | dev->open = mace_open; | 243 | dev->open = mace_open; |
248 | dev->stop = mace_close; | 244 | dev->stop = mace_close; |
249 | dev->hard_start_xmit = mace_xmit_start; | 245 | dev->hard_start_xmit = mace_xmit_start; |
250 | dev->tx_timeout = mace_tx_timeout; | 246 | dev->tx_timeout = mace_tx_timeout; |
251 | dev->watchdog_timeo = TX_TIMEOUT; | 247 | dev->watchdog_timeo = TX_TIMEOUT; |
252 | dev->get_stats = mace_stats; | ||
253 | dev->set_multicast_list = mace_set_multicast; | 248 | dev->set_multicast_list = mace_set_multicast; |
254 | dev->set_mac_address = mace_set_address; | 249 | dev->set_mac_address = mace_set_address; |
255 | 250 | ||
@@ -472,8 +467,8 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | |||
472 | mp->tx_count--; | 467 | mp->tx_count--; |
473 | local_irq_restore(flags); | 468 | local_irq_restore(flags); |
474 | 469 | ||
475 | mp->stats.tx_packets++; | 470 | dev->stats.tx_packets++; |
476 | mp->stats.tx_bytes += skb->len; | 471 | dev->stats.tx_bytes += skb->len; |
477 | 472 | ||
478 | /* We need to copy into our xmit buffer to take care of alignment and caching issues */ | 473 | /* We need to copy into our xmit buffer to take care of alignment and caching issues */ |
479 | skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); | 474 | skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); |
@@ -492,12 +487,6 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | |||
492 | return NETDEV_TX_OK; | 487 | return NETDEV_TX_OK; |
493 | } | 488 | } |
494 | 489 | ||
495 | static struct net_device_stats *mace_stats(struct net_device *dev) | ||
496 | { | ||
497 | struct mace_data *mp = netdev_priv(dev); | ||
498 | return &mp->stats; | ||
499 | } | ||
500 | |||
501 | static void mace_set_multicast(struct net_device *dev) | 490 | static void mace_set_multicast(struct net_device *dev) |
502 | { | 491 | { |
503 | struct mace_data *mp = netdev_priv(dev); | 492 | struct mace_data *mp = netdev_priv(dev); |
@@ -555,13 +544,13 @@ static void mace_handle_misc_intrs(struct mace_data *mp, int intr) | |||
555 | static int mace_babbles, mace_jabbers; | 544 | static int mace_babbles, mace_jabbers; |
556 | 545 | ||
557 | if (intr & MPCO) | 546 | if (intr & MPCO) |
558 | mp->stats.rx_missed_errors += 256; | 547 | dev->stats.rx_missed_errors += 256; |
559 | mp->stats.rx_missed_errors += mb->mpc; /* reading clears it */ | 548 | dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */ |
560 | if (intr & RNTPCO) | 549 | if (intr & RNTPCO) |
561 | mp->stats.rx_length_errors += 256; | 550 | dev->stats.rx_length_errors += 256; |
562 | mp->stats.rx_length_errors += mb->rntpc; /* reading clears it */ | 551 | dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */ |
563 | if (intr & CERR) | 552 | if (intr & CERR) |
564 | ++mp->stats.tx_heartbeat_errors; | 553 | ++dev->stats.tx_heartbeat_errors; |
565 | if (intr & BABBLE) | 554 | if (intr & BABBLE) |
566 | if (mace_babbles++ < 4) | 555 | if (mace_babbles++ < 4) |
567 | printk(KERN_DEBUG "macmace: babbling transmitter\n"); | 556 | printk(KERN_DEBUG "macmace: babbling transmitter\n"); |
@@ -600,14 +589,14 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) | |||
600 | } | 589 | } |
601 | /* Update stats */ | 590 | /* Update stats */ |
602 | if (fs & (UFLO|LCOL|LCAR|RTRY)) { | 591 | if (fs & (UFLO|LCOL|LCAR|RTRY)) { |
603 | ++mp->stats.tx_errors; | 592 | ++dev->stats.tx_errors; |
604 | if (fs & LCAR) | 593 | if (fs & LCAR) |
605 | ++mp->stats.tx_carrier_errors; | 594 | ++dev->stats.tx_carrier_errors; |
606 | else if (fs & (UFLO|LCOL|RTRY)) { | 595 | else if (fs & (UFLO|LCOL|RTRY)) { |
607 | ++mp->stats.tx_aborted_errors; | 596 | ++dev->stats.tx_aborted_errors; |
608 | if (mb->xmtfs & UFLO) { | 597 | if (mb->xmtfs & UFLO) { |
609 | printk(KERN_ERR "%s: DMA underrun.\n", dev->name); | 598 | printk(KERN_ERR "%s: DMA underrun.\n", dev->name); |
610 | mp->stats.tx_fifo_errors++; | 599 | dev->stats.tx_fifo_errors++; |
611 | mace_txdma_reset(dev); | 600 | mace_txdma_reset(dev); |
612 | } | 601 | } |
613 | } | 602 | } |
@@ -661,23 +650,23 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) | |||
661 | unsigned int frame_status = mf->rcvsts; | 650 | unsigned int frame_status = mf->rcvsts; |
662 | 651 | ||
663 | if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) { | 652 | if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) { |
664 | mp->stats.rx_errors++; | 653 | dev->stats.rx_errors++; |
665 | if (frame_status & RS_OFLO) { | 654 | if (frame_status & RS_OFLO) { |
666 | printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name); | 655 | printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name); |
667 | mp->stats.rx_fifo_errors++; | 656 | dev->stats.rx_fifo_errors++; |
668 | } | 657 | } |
669 | if (frame_status & RS_CLSN) | 658 | if (frame_status & RS_CLSN) |
670 | mp->stats.collisions++; | 659 | dev->stats.collisions++; |
671 | if (frame_status & RS_FRAMERR) | 660 | if (frame_status & RS_FRAMERR) |
672 | mp->stats.rx_frame_errors++; | 661 | dev->stats.rx_frame_errors++; |
673 | if (frame_status & RS_FCSERR) | 662 | if (frame_status & RS_FCSERR) |
674 | mp->stats.rx_crc_errors++; | 663 | dev->stats.rx_crc_errors++; |
675 | } else { | 664 | } else { |
676 | unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 ); | 665 | unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 ); |
677 | 666 | ||
678 | skb = dev_alloc_skb(frame_length + 2); | 667 | skb = dev_alloc_skb(frame_length + 2); |
679 | if (!skb) { | 668 | if (!skb) { |
680 | mp->stats.rx_dropped++; | 669 | dev->stats.rx_dropped++; |
681 | return; | 670 | return; |
682 | } | 671 | } |
683 | skb_reserve(skb, 2); | 672 | skb_reserve(skb, 2); |
@@ -686,8 +675,8 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) | |||
686 | skb->protocol = eth_type_trans(skb, dev); | 675 | skb->protocol = eth_type_trans(skb, dev); |
687 | netif_rx(skb); | 676 | netif_rx(skb); |
688 | dev->last_rx = jiffies; | 677 | dev->last_rx = jiffies; |
689 | mp->stats.rx_packets++; | 678 | dev->stats.rx_packets++; |
690 | mp->stats.rx_bytes += frame_length; | 679 | dev->stats.rx_bytes += frame_length; |
691 | } | 680 | } |
692 | } | 681 | } |
693 | 682 | ||
diff --git a/drivers/net/meth.c b/drivers/net/meth.c index 32bed6bc6c06..fe5b6c372072 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c | |||
@@ -66,7 +66,6 @@ module_param(timeout, int, 0); | |||
66 | * packets in and out, so there is place for a packet | 66 | * packets in and out, so there is place for a packet |
67 | */ | 67 | */ |
68 | struct meth_private { | 68 | struct meth_private { |
69 | struct net_device_stats stats; | ||
70 | /* in-memory copy of MAC Control register */ | 69 | /* in-memory copy of MAC Control register */ |
71 | unsigned long mac_ctrl; | 70 | unsigned long mac_ctrl; |
72 | /* in-memory copy of DMA Control register */ | 71 | /* in-memory copy of DMA Control register */ |
@@ -401,15 +400,15 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) | |||
401 | printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2lx.\n", | 400 | printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2lx.\n", |
402 | dev->name, priv->rx_write, | 401 | dev->name, priv->rx_write, |
403 | priv->rx_ring[priv->rx_write]->status.raw); | 402 | priv->rx_ring[priv->rx_write]->status.raw); |
404 | priv->stats.rx_errors++; | 403 | dev->stats.rx_errors++; |
405 | priv->stats.rx_length_errors++; | 404 | dev->stats.rx_length_errors++; |
406 | skb = priv->rx_skbs[priv->rx_write]; | 405 | skb = priv->rx_skbs[priv->rx_write]; |
407 | } else { | 406 | } else { |
408 | skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC); | 407 | skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC); |
409 | if (!skb) { | 408 | if (!skb) { |
410 | /* Ouch! No memory! Drop packet on the floor */ | 409 | /* Ouch! No memory! Drop packet on the floor */ |
411 | DPRINTK("No mem: dropping packet\n"); | 410 | DPRINTK("No mem: dropping packet\n"); |
412 | priv->stats.rx_dropped++; | 411 | dev->stats.rx_dropped++; |
413 | skb = priv->rx_skbs[priv->rx_write]; | 412 | skb = priv->rx_skbs[priv->rx_write]; |
414 | } else { | 413 | } else { |
415 | struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write]; | 414 | struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write]; |
@@ -421,13 +420,13 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) | |||
421 | priv->rx_skbs[priv->rx_write] = skb; | 420 | priv->rx_skbs[priv->rx_write] = skb; |
422 | skb_c->protocol = eth_type_trans(skb_c, dev); | 421 | skb_c->protocol = eth_type_trans(skb_c, dev); |
423 | dev->last_rx = jiffies; | 422 | dev->last_rx = jiffies; |
424 | priv->stats.rx_packets++; | 423 | dev->stats.rx_packets++; |
425 | priv->stats.rx_bytes += len; | 424 | dev->stats.rx_bytes += len; |
426 | netif_rx(skb_c); | 425 | netif_rx(skb_c); |
427 | } | 426 | } |
428 | } | 427 | } |
429 | } else { | 428 | } else { |
430 | priv->stats.rx_errors++; | 429 | dev->stats.rx_errors++; |
431 | skb=priv->rx_skbs[priv->rx_write]; | 430 | skb=priv->rx_skbs[priv->rx_write]; |
432 | #if MFE_DEBUG>0 | 431 | #if MFE_DEBUG>0 |
433 | printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); | 432 | printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); |
@@ -490,10 +489,10 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) | |||
490 | #endif | 489 | #endif |
491 | if (status & METH_TX_ST_DONE) { | 490 | if (status & METH_TX_ST_DONE) { |
492 | if (status & METH_TX_ST_SUCCESS){ | 491 | if (status & METH_TX_ST_SUCCESS){ |
493 | priv->stats.tx_packets++; | 492 | dev->stats.tx_packets++; |
494 | priv->stats.tx_bytes += skb->len; | 493 | dev->stats.tx_bytes += skb->len; |
495 | } else { | 494 | } else { |
496 | priv->stats.tx_errors++; | 495 | dev->stats.tx_errors++; |
497 | #if MFE_DEBUG>=1 | 496 | #if MFE_DEBUG>=1 |
498 | DPRINTK("TX error: status=%016lx <",status); | 497 | DPRINTK("TX error: status=%016lx <",status); |
499 | if(status & METH_TX_ST_SUCCESS) | 498 | if(status & METH_TX_ST_SUCCESS) |
@@ -734,7 +733,7 @@ static void meth_tx_timeout(struct net_device *dev) | |||
734 | /* Try to reset the interface. */ | 733 | /* Try to reset the interface. */ |
735 | meth_reset(dev); | 734 | meth_reset(dev); |
736 | 735 | ||
737 | priv->stats.tx_errors++; | 736 | dev->stats.tx_errors++; |
738 | 737 | ||
739 | /* Clear all rings */ | 738 | /* Clear all rings */ |
740 | meth_free_tx_ring(priv); | 739 | meth_free_tx_ring(priv); |
@@ -773,12 +772,6 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
773 | /* | 772 | /* |
774 | * Return statistics to the caller | 773 | * Return statistics to the caller |
775 | */ | 774 | */ |
776 | static struct net_device_stats *meth_stats(struct net_device *dev) | ||
777 | { | ||
778 | struct meth_private *priv = netdev_priv(dev); | ||
779 | return &priv->stats; | ||
780 | } | ||
781 | |||
782 | /* | 775 | /* |
783 | * The init function. | 776 | * The init function. |
784 | */ | 777 | */ |
@@ -796,7 +789,6 @@ static int __init meth_probe(struct platform_device *pdev) | |||
796 | dev->stop = meth_release; | 789 | dev->stop = meth_release; |
797 | dev->hard_start_xmit = meth_tx; | 790 | dev->hard_start_xmit = meth_tx; |
798 | dev->do_ioctl = meth_ioctl; | 791 | dev->do_ioctl = meth_ioctl; |
799 | dev->get_stats = meth_stats; | ||
800 | #ifdef HAVE_TX_TIMEOUT | 792 | #ifdef HAVE_TX_TIMEOUT |
801 | dev->tx_timeout = meth_tx_timeout; | 793 | dev->tx_timeout = meth_tx_timeout; |
802 | dev->watchdog_timeo = timeout; | 794 | dev->watchdog_timeo = timeout; |
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c index c0f5ad38fb17..d593175ab6f0 100644 --- a/drivers/net/mipsnet.c +++ b/drivers/net/mipsnet.c | |||
@@ -21,10 +21,6 @@ | |||
21 | 21 | ||
22 | #define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field)) | 22 | #define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field)) |
23 | 23 | ||
24 | struct mipsnet_priv { | ||
25 | struct net_device_stats stats; | ||
26 | }; | ||
27 | |||
28 | static char mipsnet_string[] = "mipsnet"; | 24 | static char mipsnet_string[] = "mipsnet"; |
29 | 25 | ||
30 | /* | 26 | /* |
@@ -49,7 +45,6 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev, | |||
49 | { | 45 | { |
50 | int count_to_go = skb->len; | 46 | int count_to_go = skb->len; |
51 | char *buf_ptr = skb->data; | 47 | char *buf_ptr = skb->data; |
52 | struct mipsnet_priv *mp = netdev_priv(dev); | ||
53 | 48 | ||
54 | pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n", | 49 | pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n", |
55 | dev->name, __FUNCTION__, skb->len); | 50 | dev->name, __FUNCTION__, skb->len); |
@@ -63,8 +58,8 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev, | |||
63 | outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); | 58 | outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); |
64 | } | 59 | } |
65 | 60 | ||
66 | mp->stats.tx_packets++; | 61 | dev->stats.tx_packets++; |
67 | mp->stats.tx_bytes += skb->len; | 62 | dev->stats.tx_bytes += skb->len; |
68 | 63 | ||
69 | return skb->len; | 64 | return skb->len; |
70 | } | 65 | } |
@@ -87,10 +82,9 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) | |||
87 | { | 82 | { |
88 | struct sk_buff *skb; | 83 | struct sk_buff *skb; |
89 | size_t len = count; | 84 | size_t len = count; |
90 | struct mipsnet_priv *mp = netdev_priv(dev); | ||
91 | 85 | ||
92 | if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) { | 86 | if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) { |
93 | mp->stats.rx_dropped++; | 87 | dev->stats.rx_dropped++; |
94 | return -ENOMEM; | 88 | return -ENOMEM; |
95 | } | 89 | } |
96 | 90 | ||
@@ -105,8 +99,8 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) | |||
105 | dev->name, __FUNCTION__); | 99 | dev->name, __FUNCTION__); |
106 | netif_rx(skb); | 100 | netif_rx(skb); |
107 | 101 | ||
108 | mp->stats.rx_packets++; | 102 | dev->stats.rx_packets++; |
109 | mp->stats.rx_bytes += len; | 103 | dev->stats.rx_bytes += len; |
110 | 104 | ||
111 | return count; | 105 | return count; |
112 | } | 106 | } |
@@ -203,13 +197,6 @@ static int mipsnet_close(struct net_device *dev) | |||
203 | return 0; | 197 | return 0; |
204 | } | 198 | } |
205 | 199 | ||
206 | static struct net_device_stats *mipsnet_get_stats(struct net_device *dev) | ||
207 | { | ||
208 | struct mipsnet_priv *mp = netdev_priv(dev); | ||
209 | |||
210 | return &mp->stats; | ||
211 | } | ||
212 | |||
213 | static void mipsnet_set_mclist(struct net_device *dev) | 200 | static void mipsnet_set_mclist(struct net_device *dev) |
214 | { | 201 | { |
215 | // we don't do anything | 202 | // we don't do anything |
@@ -221,7 +208,7 @@ static int __init mipsnet_probe(struct device *dev) | |||
221 | struct net_device *netdev; | 208 | struct net_device *netdev; |
222 | int err; | 209 | int err; |
223 | 210 | ||
224 | netdev = alloc_etherdev(sizeof(struct mipsnet_priv)); | 211 | netdev = alloc_etherdev(0); |
225 | if (!netdev) { | 212 | if (!netdev) { |
226 | err = -ENOMEM; | 213 | err = -ENOMEM; |
227 | goto out; | 214 | goto out; |
@@ -232,7 +219,6 @@ static int __init mipsnet_probe(struct device *dev) | |||
232 | netdev->open = mipsnet_open; | 219 | netdev->open = mipsnet_open; |
233 | netdev->stop = mipsnet_close; | 220 | netdev->stop = mipsnet_close; |
234 | netdev->hard_start_xmit = mipsnet_xmit; | 221 | netdev->hard_start_xmit = mipsnet_xmit; |
235 | netdev->get_stats = mipsnet_get_stats; | ||
236 | netdev->set_multicast_list = mipsnet_set_mclist; | 222 | netdev->set_multicast_list = mipsnet_set_mclist; |
237 | 223 | ||
238 | /* | 224 | /* |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 2a808e265a3e..35781616eb23 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -63,7 +63,6 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num); | |||
63 | static int mv643xx_eth_open(struct net_device *); | 63 | static int mv643xx_eth_open(struct net_device *); |
64 | static int mv643xx_eth_stop(struct net_device *); | 64 | static int mv643xx_eth_stop(struct net_device *); |
65 | static int mv643xx_eth_change_mtu(struct net_device *, int); | 65 | static int mv643xx_eth_change_mtu(struct net_device *, int); |
66 | static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *); | ||
67 | static void eth_port_init_mac_tables(unsigned int eth_port_num); | 66 | static void eth_port_init_mac_tables(unsigned int eth_port_num); |
68 | #ifdef MV643XX_NAPI | 67 | #ifdef MV643XX_NAPI |
69 | static int mv643xx_poll(struct napi_struct *napi, int budget); | 68 | static int mv643xx_poll(struct napi_struct *napi, int budget); |
@@ -341,7 +340,7 @@ int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) | |||
341 | 340 | ||
342 | if (cmd_sts & ETH_ERROR_SUMMARY) { | 341 | if (cmd_sts & ETH_ERROR_SUMMARY) { |
343 | printk("%s: Error in TX\n", dev->name); | 342 | printk("%s: Error in TX\n", dev->name); |
344 | mp->stats.tx_errors++; | 343 | dev->stats.tx_errors++; |
345 | } | 344 | } |
346 | 345 | ||
347 | spin_unlock_irqrestore(&mp->lock, flags); | 346 | spin_unlock_irqrestore(&mp->lock, flags); |
@@ -388,7 +387,7 @@ static void mv643xx_eth_free_all_tx_descs(struct net_device *dev) | |||
388 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | 387 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) |
389 | { | 388 | { |
390 | struct mv643xx_private *mp = netdev_priv(dev); | 389 | struct mv643xx_private *mp = netdev_priv(dev); |
391 | struct net_device_stats *stats = &mp->stats; | 390 | struct net_device_stats *stats = &dev->stats; |
392 | unsigned int received_packets = 0; | 391 | unsigned int received_packets = 0; |
393 | struct sk_buff *skb; | 392 | struct sk_buff *skb; |
394 | struct pkt_info pkt_info; | 393 | struct pkt_info pkt_info; |
@@ -1192,7 +1191,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, | |||
1192 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1191 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1193 | { | 1192 | { |
1194 | struct mv643xx_private *mp = netdev_priv(dev); | 1193 | struct mv643xx_private *mp = netdev_priv(dev); |
1195 | struct net_device_stats *stats = &mp->stats; | 1194 | struct net_device_stats *stats = &dev->stats; |
1196 | unsigned long flags; | 1195 | unsigned long flags; |
1197 | 1196 | ||
1198 | BUG_ON(netif_queue_stopped(dev)); | 1197 | BUG_ON(netif_queue_stopped(dev)); |
@@ -1228,23 +1227,6 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1228 | return 0; /* success */ | 1227 | return 0; /* success */ |
1229 | } | 1228 | } |
1230 | 1229 | ||
1231 | /* | ||
1232 | * mv643xx_eth_get_stats | ||
1233 | * | ||
1234 | * Returns a pointer to the interface statistics. | ||
1235 | * | ||
1236 | * Input : dev - a pointer to the required interface | ||
1237 | * | ||
1238 | * Output : a pointer to the interface's statistics | ||
1239 | */ | ||
1240 | |||
1241 | static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) | ||
1242 | { | ||
1243 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1244 | |||
1245 | return &mp->stats; | ||
1246 | } | ||
1247 | |||
1248 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1230 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1249 | static void mv643xx_netpoll(struct net_device *netdev) | 1231 | static void mv643xx_netpoll(struct net_device *netdev) |
1250 | { | 1232 | { |
@@ -1339,7 +1321,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1339 | dev->open = mv643xx_eth_open; | 1321 | dev->open = mv643xx_eth_open; |
1340 | dev->stop = mv643xx_eth_stop; | 1322 | dev->stop = mv643xx_eth_stop; |
1341 | dev->hard_start_xmit = mv643xx_eth_start_xmit; | 1323 | dev->hard_start_xmit = mv643xx_eth_start_xmit; |
1342 | dev->get_stats = mv643xx_eth_get_stats; | ||
1343 | dev->set_mac_address = mv643xx_eth_set_mac_address; | 1324 | dev->set_mac_address = mv643xx_eth_set_mac_address; |
1344 | dev->set_multicast_list = mv643xx_eth_set_rx_mode; | 1325 | dev->set_multicast_list = mv643xx_eth_set_rx_mode; |
1345 | 1326 | ||
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index 331b76c49561..35c4c598c8d2 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c | |||
@@ -353,7 +353,7 @@ static void myri_tx(struct myri_eth *mp, struct net_device *dev) | |||
353 | sbus_unmap_single(mp->myri_sdev, dma_addr, skb->len, SBUS_DMA_TODEVICE); | 353 | sbus_unmap_single(mp->myri_sdev, dma_addr, skb->len, SBUS_DMA_TODEVICE); |
354 | dev_kfree_skb(skb); | 354 | dev_kfree_skb(skb); |
355 | mp->tx_skbs[entry] = NULL; | 355 | mp->tx_skbs[entry] = NULL; |
356 | mp->enet_stats.tx_packets++; | 356 | dev->stats.tx_packets++; |
357 | entry = NEXT_TX(entry); | 357 | entry = NEXT_TX(entry); |
358 | } | 358 | } |
359 | mp->tx_old = entry; | 359 | mp->tx_old = entry; |
@@ -434,20 +434,20 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) | |||
434 | RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE); | 434 | RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE); |
435 | if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) { | 435 | if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) { |
436 | DRX(("ERROR[")); | 436 | DRX(("ERROR[")); |
437 | mp->enet_stats.rx_errors++; | 437 | dev->stats.rx_errors++; |
438 | if (len < (ETH_HLEN + MYRI_PAD_LEN)) { | 438 | if (len < (ETH_HLEN + MYRI_PAD_LEN)) { |
439 | DRX(("BAD_LENGTH] ")); | 439 | DRX(("BAD_LENGTH] ")); |
440 | mp->enet_stats.rx_length_errors++; | 440 | dev->stats.rx_length_errors++; |
441 | } else { | 441 | } else { |
442 | DRX(("NO_PADDING] ")); | 442 | DRX(("NO_PADDING] ")); |
443 | mp->enet_stats.rx_frame_errors++; | 443 | dev->stats.rx_frame_errors++; |
444 | } | 444 | } |
445 | 445 | ||
446 | /* Return it to the LANAI. */ | 446 | /* Return it to the LANAI. */ |
447 | drop_it: | 447 | drop_it: |
448 | drops++; | 448 | drops++; |
449 | DRX(("DROP ")); | 449 | DRX(("DROP ")); |
450 | mp->enet_stats.rx_dropped++; | 450 | dev->stats.rx_dropped++; |
451 | sbus_dma_sync_single_for_device(mp->myri_sdev, | 451 | sbus_dma_sync_single_for_device(mp->myri_sdev, |
452 | sbus_readl(&rxd->myri_scatters[0].addr), | 452 | sbus_readl(&rxd->myri_scatters[0].addr), |
453 | RX_ALLOC_SIZE, | 453 | RX_ALLOC_SIZE, |
@@ -527,8 +527,8 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) | |||
527 | netif_rx(skb); | 527 | netif_rx(skb); |
528 | 528 | ||
529 | dev->last_rx = jiffies; | 529 | dev->last_rx = jiffies; |
530 | mp->enet_stats.rx_packets++; | 530 | dev->stats.rx_packets++; |
531 | mp->enet_stats.rx_bytes += len; | 531 | dev->stats.rx_bytes += len; |
532 | next: | 532 | next: |
533 | DRX(("NEXT\n")); | 533 | DRX(("NEXT\n")); |
534 | entry = NEXT_RX(entry); | 534 | entry = NEXT_RX(entry); |
@@ -596,7 +596,7 @@ static void myri_tx_timeout(struct net_device *dev) | |||
596 | 596 | ||
597 | printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); | 597 | printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); |
598 | 598 | ||
599 | mp->enet_stats.tx_errors++; | 599 | dev->stats.tx_errors++; |
600 | myri_init(mp, 0); | 600 | myri_init(mp, 0); |
601 | netif_wake_queue(dev); | 601 | netif_wake_queue(dev); |
602 | } | 602 | } |
@@ -806,9 +806,6 @@ static int myri_change_mtu(struct net_device *dev, int new_mtu) | |||
806 | return 0; | 806 | return 0; |
807 | } | 807 | } |
808 | 808 | ||
809 | static struct net_device_stats *myri_get_stats(struct net_device *dev) | ||
810 | { return &(((struct myri_eth *)dev->priv)->enet_stats); } | ||
811 | |||
812 | static void myri_set_multicast(struct net_device *dev) | 809 | static void myri_set_multicast(struct net_device *dev) |
813 | { | 810 | { |
814 | /* Do nothing, all MyriCOM nodes transmit multicast frames | 811 | /* Do nothing, all MyriCOM nodes transmit multicast frames |
@@ -1060,7 +1057,6 @@ static int __devinit myri_ether_init(struct sbus_dev *sdev) | |||
1060 | dev->hard_start_xmit = &myri_start_xmit; | 1057 | dev->hard_start_xmit = &myri_start_xmit; |
1061 | dev->tx_timeout = &myri_tx_timeout; | 1058 | dev->tx_timeout = &myri_tx_timeout; |
1062 | dev->watchdog_timeo = 5*HZ; | 1059 | dev->watchdog_timeo = 5*HZ; |
1063 | dev->get_stats = &myri_get_stats; | ||
1064 | dev->set_multicast_list = &myri_set_multicast; | 1060 | dev->set_multicast_list = &myri_set_multicast; |
1065 | dev->irq = sdev->irqs[0]; | 1061 | dev->irq = sdev->irqs[0]; |
1066 | 1062 | ||
diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h index 2f69ef7cdccb..5d93fcc95d55 100644 --- a/drivers/net/myri_sbus.h +++ b/drivers/net/myri_sbus.h | |||
@@ -280,7 +280,6 @@ struct myri_eth { | |||
280 | void __iomem *lregs; /* Quick ptr to LANAI regs. */ | 280 | void __iomem *lregs; /* Quick ptr to LANAI regs. */ |
281 | struct sk_buff *rx_skbs[RX_RING_SIZE+1];/* RX skb's */ | 281 | struct sk_buff *rx_skbs[RX_RING_SIZE+1];/* RX skb's */ |
282 | struct sk_buff *tx_skbs[TX_RING_SIZE]; /* TX skb's */ | 282 | struct sk_buff *tx_skbs[TX_RING_SIZE]; /* TX skb's */ |
283 | struct net_device_stats enet_stats; /* Interface stats. */ | ||
284 | 283 | ||
285 | /* These are less frequently accessed. */ | 284 | /* These are less frequently accessed. */ |
286 | void __iomem *regs; /* MyriCOM register space. */ | 285 | void __iomem *regs; /* MyriCOM register space. */ |
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c index 6fee405d8403..eb0aff787dfd 100644 --- a/drivers/net/netx-eth.c +++ b/drivers/net/netx-eth.c | |||
@@ -97,7 +97,6 @@ | |||
97 | struct netx_eth_priv { | 97 | struct netx_eth_priv { |
98 | void __iomem *sram_base, *xpec_base, *xmac_base; | 98 | void __iomem *sram_base, *xpec_base, *xmac_base; |
99 | int id; | 99 | int id; |
100 | struct net_device_stats stats; | ||
101 | struct mii_if_info mii; | 100 | struct mii_if_info mii; |
102 | u32 msg_enable; | 101 | u32 msg_enable; |
103 | struct xc *xc; | 102 | struct xc *xc; |
@@ -129,8 +128,8 @@ netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
129 | FIFO_PTR_FRAMELEN(len)); | 128 | FIFO_PTR_FRAMELEN(len)); |
130 | 129 | ||
131 | ndev->trans_start = jiffies; | 130 | ndev->trans_start = jiffies; |
132 | priv->stats.tx_packets++; | 131 | dev->stats.tx_packets++; |
133 | priv->stats.tx_bytes += skb->len; | 132 | dev->stats.tx_bytes += skb->len; |
134 | 133 | ||
135 | netif_stop_queue(ndev); | 134 | netif_stop_queue(ndev); |
136 | spin_unlock_irq(&priv->lock); | 135 | spin_unlock_irq(&priv->lock); |
@@ -156,7 +155,7 @@ static void netx_eth_receive(struct net_device *ndev) | |||
156 | if (unlikely(skb == NULL)) { | 155 | if (unlikely(skb == NULL)) { |
157 | printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", | 156 | printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", |
158 | ndev->name); | 157 | ndev->name); |
159 | priv->stats.rx_dropped++; | 158 | dev->stats.rx_dropped++; |
160 | return; | 159 | return; |
161 | } | 160 | } |
162 | 161 | ||
@@ -170,8 +169,8 @@ static void netx_eth_receive(struct net_device *ndev) | |||
170 | ndev->last_rx = jiffies; | 169 | ndev->last_rx = jiffies; |
171 | skb->protocol = eth_type_trans(skb, ndev); | 170 | skb->protocol = eth_type_trans(skb, ndev); |
172 | netif_rx(skb); | 171 | netif_rx(skb); |
173 | priv->stats.rx_packets++; | 172 | dev->stats.rx_packets++; |
174 | priv->stats.rx_bytes += len; | 173 | dev->stats.rx_bytes += len; |
175 | } | 174 | } |
176 | 175 | ||
177 | static irqreturn_t | 176 | static irqreturn_t |
@@ -210,12 +209,6 @@ netx_eth_interrupt(int irq, void *dev_id) | |||
210 | return IRQ_HANDLED; | 209 | return IRQ_HANDLED; |
211 | } | 210 | } |
212 | 211 | ||
213 | static struct net_device_stats *netx_eth_query_statistics(struct net_device *ndev) | ||
214 | { | ||
215 | struct netx_eth_priv *priv = netdev_priv(ndev); | ||
216 | return &priv->stats; | ||
217 | } | ||
218 | |||
219 | static int netx_eth_open(struct net_device *ndev) | 212 | static int netx_eth_open(struct net_device *ndev) |
220 | { | 213 | { |
221 | struct netx_eth_priv *priv = netdev_priv(ndev); | 214 | struct netx_eth_priv *priv = netdev_priv(ndev); |
@@ -323,7 +316,6 @@ static int netx_eth_enable(struct net_device *ndev) | |||
323 | ndev->hard_start_xmit = netx_eth_hard_start_xmit; | 316 | ndev->hard_start_xmit = netx_eth_hard_start_xmit; |
324 | ndev->tx_timeout = netx_eth_timeout; | 317 | ndev->tx_timeout = netx_eth_timeout; |
325 | ndev->watchdog_timeo = msecs_to_jiffies(5000); | 318 | ndev->watchdog_timeo = msecs_to_jiffies(5000); |
326 | ndev->get_stats = netx_eth_query_statistics; | ||
327 | ndev->set_multicast_list = netx_eth_set_multicast_list; | 319 | ndev->set_multicast_list = netx_eth_set_multicast_list; |
328 | 320 | ||
329 | priv->msg_enable = NETIF_MSG_LINK; | 321 | priv->msg_enable = NETIF_MSG_LINK; |
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index cc1d09a21c0c..1dc74a78afa6 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c | |||
@@ -89,7 +89,6 @@ static unsigned int ports[] __initdata = | |||
89 | 89 | ||
90 | /* Information that needs to be kept for each board. */ | 90 | /* Information that needs to be kept for each board. */ |
91 | struct ni5010_local { | 91 | struct ni5010_local { |
92 | struct net_device_stats stats; | ||
93 | int o_pkt_size; | 92 | int o_pkt_size; |
94 | spinlock_t lock; | 93 | spinlock_t lock; |
95 | }; | 94 | }; |
@@ -103,7 +102,6 @@ static irqreturn_t ni5010_interrupt(int irq, void *dev_id); | |||
103 | static void ni5010_rx(struct net_device *dev); | 102 | static void ni5010_rx(struct net_device *dev); |
104 | static void ni5010_timeout(struct net_device *dev); | 103 | static void ni5010_timeout(struct net_device *dev); |
105 | static int ni5010_close(struct net_device *dev); | 104 | static int ni5010_close(struct net_device *dev); |
106 | static struct net_device_stats *ni5010_get_stats(struct net_device *dev); | ||
107 | static void ni5010_set_multicast_list(struct net_device *dev); | 105 | static void ni5010_set_multicast_list(struct net_device *dev); |
108 | static void reset_receiver(struct net_device *dev); | 106 | static void reset_receiver(struct net_device *dev); |
109 | 107 | ||
@@ -334,7 +332,6 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr) | |||
334 | dev->open = ni5010_open; | 332 | dev->open = ni5010_open; |
335 | dev->stop = ni5010_close; | 333 | dev->stop = ni5010_close; |
336 | dev->hard_start_xmit = ni5010_send_packet; | 334 | dev->hard_start_xmit = ni5010_send_packet; |
337 | dev->get_stats = ni5010_get_stats; | ||
338 | dev->set_multicast_list = ni5010_set_multicast_list; | 335 | dev->set_multicast_list = ni5010_set_multicast_list; |
339 | dev->tx_timeout = ni5010_timeout; | 336 | dev->tx_timeout = ni5010_timeout; |
340 | dev->watchdog_timeo = HZ/20; | 337 | dev->watchdog_timeo = HZ/20; |
@@ -532,11 +529,11 @@ static void ni5010_rx(struct net_device *dev) | |||
532 | 529 | ||
533 | if ( (rcv_stat & RS_VALID_BITS) != RS_PKT_OK) { | 530 | if ( (rcv_stat & RS_VALID_BITS) != RS_PKT_OK) { |
534 | PRINTK((KERN_INFO "%s: receive error.\n", dev->name)); | 531 | PRINTK((KERN_INFO "%s: receive error.\n", dev->name)); |
535 | lp->stats.rx_errors++; | 532 | dev->stats.rx_errors++; |
536 | if (rcv_stat & RS_RUNT) lp->stats.rx_length_errors++; | 533 | if (rcv_stat & RS_RUNT) dev->stats.rx_length_errors++; |
537 | if (rcv_stat & RS_ALIGN) lp->stats.rx_frame_errors++; | 534 | if (rcv_stat & RS_ALIGN) dev->stats.rx_frame_errors++; |
538 | if (rcv_stat & RS_CRC_ERR) lp->stats.rx_crc_errors++; | 535 | if (rcv_stat & RS_CRC_ERR) dev->stats.rx_crc_errors++; |
539 | if (rcv_stat & RS_OFLW) lp->stats.rx_fifo_errors++; | 536 | if (rcv_stat & RS_OFLW) dev->stats.rx_fifo_errors++; |
540 | outb(0xff, EDLC_RCLR); /* Clear the interrupt */ | 537 | outb(0xff, EDLC_RCLR); /* Clear the interrupt */ |
541 | return; | 538 | return; |
542 | } | 539 | } |
@@ -547,8 +544,8 @@ static void ni5010_rx(struct net_device *dev) | |||
547 | if (i_pkt_size > ETH_FRAME_LEN || i_pkt_size < 10 ) { | 544 | if (i_pkt_size > ETH_FRAME_LEN || i_pkt_size < 10 ) { |
548 | PRINTK((KERN_DEBUG "%s: Packet size error, packet size = %#4.4x\n", | 545 | PRINTK((KERN_DEBUG "%s: Packet size error, packet size = %#4.4x\n", |
549 | dev->name, i_pkt_size)); | 546 | dev->name, i_pkt_size)); |
550 | lp->stats.rx_errors++; | 547 | dev->stats.rx_errors++; |
551 | lp->stats.rx_length_errors++; | 548 | dev->stats.rx_length_errors++; |
552 | return; | 549 | return; |
553 | } | 550 | } |
554 | 551 | ||
@@ -556,7 +553,7 @@ static void ni5010_rx(struct net_device *dev) | |||
556 | skb = dev_alloc_skb(i_pkt_size + 3); | 553 | skb = dev_alloc_skb(i_pkt_size + 3); |
557 | if (skb == NULL) { | 554 | if (skb == NULL) { |
558 | printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); | 555 | printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); |
559 | lp->stats.rx_dropped++; | 556 | dev->stats.rx_dropped++; |
560 | return; | 557 | return; |
561 | } | 558 | } |
562 | 559 | ||
@@ -573,8 +570,8 @@ static void ni5010_rx(struct net_device *dev) | |||
573 | skb->protocol = eth_type_trans(skb,dev); | 570 | skb->protocol = eth_type_trans(skb,dev); |
574 | netif_rx(skb); | 571 | netif_rx(skb); |
575 | dev->last_rx = jiffies; | 572 | dev->last_rx = jiffies; |
576 | lp->stats.rx_packets++; | 573 | dev->stats.rx_packets++; |
577 | lp->stats.rx_bytes += i_pkt_size; | 574 | dev->stats.rx_bytes += i_pkt_size; |
578 | 575 | ||
579 | PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n", | 576 | PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n", |
580 | dev->name, i_pkt_size)); | 577 | dev->name, i_pkt_size)); |
@@ -602,14 +599,14 @@ static int process_xmt_interrupt(struct net_device *dev) | |||
602 | /* outb(0, IE_MMODE); */ /* xmt buf on sysbus FIXME: needed ? */ | 599 | /* outb(0, IE_MMODE); */ /* xmt buf on sysbus FIXME: needed ? */ |
603 | outb(MM_EN_XMT | MM_MUX, IE_MMODE); | 600 | outb(MM_EN_XMT | MM_MUX, IE_MMODE); |
604 | outb(XM_ALL, EDLC_XMASK); /* Enable xmt IRQ's */ | 601 | outb(XM_ALL, EDLC_XMASK); /* Enable xmt IRQ's */ |
605 | lp->stats.collisions++; | 602 | dev->stats.collisions++; |
606 | return 1; | 603 | return 1; |
607 | } | 604 | } |
608 | 605 | ||
609 | /* FIXME: handle other xmt error conditions */ | 606 | /* FIXME: handle other xmt error conditions */ |
610 | 607 | ||
611 | lp->stats.tx_packets++; | 608 | dev->stats.tx_packets++; |
612 | lp->stats.tx_bytes += lp->o_pkt_size; | 609 | dev->stats.tx_bytes += lp->o_pkt_size; |
613 | netif_wake_queue(dev); | 610 | netif_wake_queue(dev); |
614 | 611 | ||
615 | PRINTK2((KERN_DEBUG "%s: sent packet, size=%#4.4x\n", | 612 | PRINTK2((KERN_DEBUG "%s: sent packet, size=%#4.4x\n", |
@@ -638,24 +635,6 @@ static int ni5010_close(struct net_device *dev) | |||
638 | 635 | ||
639 | } | 636 | } |
640 | 637 | ||
641 | /* Get the current statistics. This may be called with the card open or | ||
642 | closed. */ | ||
643 | static struct net_device_stats *ni5010_get_stats(struct net_device *dev) | ||
644 | { | ||
645 | struct ni5010_local *lp = netdev_priv(dev); | ||
646 | |||
647 | PRINTK2((KERN_DEBUG "%s: entering ni5010_get_stats\n", dev->name)); | ||
648 | |||
649 | if (NI5010_DEBUG) ni5010_show_registers(dev); | ||
650 | |||
651 | /* cli(); */ | ||
652 | /* Update the statistics from the device registers. */ | ||
653 | /* We do this in the interrupt handler */ | ||
654 | /* sti(); */ | ||
655 | |||
656 | return &lp->stats; | ||
657 | } | ||
658 | |||
659 | /* Set or clear the multicast filter for this adaptor. | 638 | /* Set or clear the multicast filter for this adaptor. |
660 | num_addrs == -1 Promiscuous mode, receive all packets | 639 | num_addrs == -1 Promiscuous mode, receive all packets |
661 | num_addrs == 0 Normal mode, clear multicast list | 640 | num_addrs == 0 Normal mode, clear multicast list |
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 723685ee57aa..f310d94443a0 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -530,8 +530,8 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
530 | } else | 530 | } else |
531 | skb->ip_summed = CHECKSUM_NONE; | 531 | skb->ip_summed = CHECKSUM_NONE; |
532 | 532 | ||
533 | mac->stats.rx_bytes += len; | 533 | mac->netdev->stats.rx_bytes += len; |
534 | mac->stats.rx_packets++; | 534 | mac->netdev->stats.rx_packets++; |
535 | 535 | ||
536 | skb->protocol = eth_type_trans(skb, mac->netdev); | 536 | skb->protocol = eth_type_trans(skb, mac->netdev); |
537 | netif_receive_skb(skb); | 537 | netif_receive_skb(skb); |
@@ -1032,8 +1032,8 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1032 | info->skb = skb; | 1032 | info->skb = skb; |
1033 | 1033 | ||
1034 | txring->next_to_fill++; | 1034 | txring->next_to_fill++; |
1035 | mac->stats.tx_packets++; | 1035 | dev->stats.tx_packets++; |
1036 | mac->stats.tx_bytes += skb->len; | 1036 | dev->stats.tx_bytes += skb->len; |
1037 | 1037 | ||
1038 | spin_unlock_irqrestore(&txring->lock, flags); | 1038 | spin_unlock_irqrestore(&txring->lock, flags); |
1039 | 1039 | ||
@@ -1047,14 +1047,6 @@ out_err: | |||
1047 | return NETDEV_TX_BUSY; | 1047 | return NETDEV_TX_BUSY; |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev) | ||
1051 | { | ||
1052 | struct pasemi_mac *mac = netdev_priv(dev); | ||
1053 | |||
1054 | return &mac->stats; | ||
1055 | } | ||
1056 | |||
1057 | |||
1058 | static void pasemi_mac_set_rx_mode(struct net_device *dev) | 1050 | static void pasemi_mac_set_rx_mode(struct net_device *dev) |
1059 | { | 1051 | { |
1060 | struct pasemi_mac *mac = netdev_priv(dev); | 1052 | struct pasemi_mac *mac = netdev_priv(dev); |
@@ -1223,7 +1215,6 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1223 | dev->open = pasemi_mac_open; | 1215 | dev->open = pasemi_mac_open; |
1224 | dev->stop = pasemi_mac_close; | 1216 | dev->stop = pasemi_mac_close; |
1225 | dev->hard_start_xmit = pasemi_mac_start_tx; | 1217 | dev->hard_start_xmit = pasemi_mac_start_tx; |
1226 | dev->get_stats = pasemi_mac_get_stats; | ||
1227 | dev->set_multicast_list = pasemi_mac_set_rx_mode; | 1218 | dev->set_multicast_list = pasemi_mac_set_rx_mode; |
1228 | 1219 | ||
1229 | err = pasemi_mac_map_regs(mac); | 1220 | err = pasemi_mac_map_regs(mac); |
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h index c5b0adbc182e..c52cfcb6c4ca 100644 --- a/drivers/net/pasemi_mac.h +++ b/drivers/net/pasemi_mac.h | |||
@@ -60,7 +60,6 @@ struct pasemi_mac { | |||
60 | struct pci_dev *iob_pdev; | 60 | struct pci_dev *iob_pdev; |
61 | struct phy_device *phydev; | 61 | struct phy_device *phydev; |
62 | struct napi_struct napi; | 62 | struct napi_struct napi; |
63 | struct net_device_stats stats; | ||
64 | 63 | ||
65 | /* Pointer to the cacheable per-channel status registers */ | 64 | /* Pointer to the cacheable per-channel status registers */ |
66 | u64 *rx_status; | 65 | u64 *rx_status; |
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index a4b16484a5f7..7dace63fb6e6 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c | |||
@@ -457,7 +457,6 @@ struct netdrv_private { | |||
457 | void *mmio_addr; | 457 | void *mmio_addr; |
458 | int drv_flags; | 458 | int drv_flags; |
459 | struct pci_dev *pci_dev; | 459 | struct pci_dev *pci_dev; |
460 | struct net_device_stats stats; | ||
461 | struct timer_list timer; /* Media selection timer. */ | 460 | struct timer_list timer; /* Media selection timer. */ |
462 | unsigned char *rx_ring; | 461 | unsigned char *rx_ring; |
463 | unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ | 462 | unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ |
@@ -505,7 +504,6 @@ static int netdrv_start_xmit (struct sk_buff *skb, | |||
505 | static irqreturn_t netdrv_interrupt (int irq, void *dev_instance); | 504 | static irqreturn_t netdrv_interrupt (int irq, void *dev_instance); |
506 | static int netdrv_close (struct net_device *dev); | 505 | static int netdrv_close (struct net_device *dev); |
507 | static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); | 506 | static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); |
508 | static struct net_device_stats *netdrv_get_stats (struct net_device *dev); | ||
509 | static void netdrv_set_rx_mode (struct net_device *dev); | 507 | static void netdrv_set_rx_mode (struct net_device *dev); |
510 | static void netdrv_hw_start (struct net_device *dev); | 508 | static void netdrv_hw_start (struct net_device *dev); |
511 | 509 | ||
@@ -775,7 +773,6 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev, | |||
775 | dev->open = netdrv_open; | 773 | dev->open = netdrv_open; |
776 | dev->hard_start_xmit = netdrv_start_xmit; | 774 | dev->hard_start_xmit = netdrv_start_xmit; |
777 | dev->stop = netdrv_close; | 775 | dev->stop = netdrv_close; |
778 | dev->get_stats = netdrv_get_stats; | ||
779 | dev->set_multicast_list = netdrv_set_rx_mode; | 776 | dev->set_multicast_list = netdrv_set_rx_mode; |
780 | dev->do_ioctl = netdrv_ioctl; | 777 | dev->do_ioctl = netdrv_ioctl; |
781 | dev->tx_timeout = netdrv_tx_timeout; | 778 | dev->tx_timeout = netdrv_tx_timeout; |
@@ -1276,7 +1273,7 @@ static void netdrv_tx_clear (struct netdrv_private *tp) | |||
1276 | if (rp->skb) { | 1273 | if (rp->skb) { |
1277 | dev_kfree_skb (rp->skb); | 1274 | dev_kfree_skb (rp->skb); |
1278 | rp->skb = NULL; | 1275 | rp->skb = NULL; |
1279 | tp->stats.tx_dropped++; | 1276 | dev->stats.tx_dropped++; |
1280 | } | 1277 | } |
1281 | } | 1278 | } |
1282 | } | 1279 | } |
@@ -1389,25 +1386,25 @@ static void netdrv_tx_interrupt (struct net_device *dev, | |||
1389 | /* There was an major error, log it. */ | 1386 | /* There was an major error, log it. */ |
1390 | DPRINTK ("%s: Transmit error, Tx status %8.8x.\n", | 1387 | DPRINTK ("%s: Transmit error, Tx status %8.8x.\n", |
1391 | dev->name, txstatus); | 1388 | dev->name, txstatus); |
1392 | tp->stats.tx_errors++; | 1389 | dev->stats.tx_errors++; |
1393 | if (txstatus & TxAborted) { | 1390 | if (txstatus & TxAborted) { |
1394 | tp->stats.tx_aborted_errors++; | 1391 | dev->stats.tx_aborted_errors++; |
1395 | NETDRV_W32 (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift)); | 1392 | NETDRV_W32 (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift)); |
1396 | } | 1393 | } |
1397 | if (txstatus & TxCarrierLost) | 1394 | if (txstatus & TxCarrierLost) |
1398 | tp->stats.tx_carrier_errors++; | 1395 | dev->stats.tx_carrier_errors++; |
1399 | if (txstatus & TxOutOfWindow) | 1396 | if (txstatus & TxOutOfWindow) |
1400 | tp->stats.tx_window_errors++; | 1397 | dev->stats.tx_window_errors++; |
1401 | } else { | 1398 | } else { |
1402 | if (txstatus & TxUnderrun) { | 1399 | if (txstatus & TxUnderrun) { |
1403 | /* Add 64 to the Tx FIFO threshold. */ | 1400 | /* Add 64 to the Tx FIFO threshold. */ |
1404 | if (tp->tx_flag < 0x00300000) | 1401 | if (tp->tx_flag < 0x00300000) |
1405 | tp->tx_flag += 0x00020000; | 1402 | tp->tx_flag += 0x00020000; |
1406 | tp->stats.tx_fifo_errors++; | 1403 | dev->stats.tx_fifo_errors++; |
1407 | } | 1404 | } |
1408 | tp->stats.collisions += (txstatus >> 24) & 15; | 1405 | dev->stats.collisions += (txstatus >> 24) & 15; |
1409 | tp->stats.tx_bytes += txstatus & 0x7ff; | 1406 | dev->stats.tx_bytes += txstatus & 0x7ff; |
1410 | tp->stats.tx_packets++; | 1407 | dev->stats.tx_packets++; |
1411 | } | 1408 | } |
1412 | 1409 | ||
1413 | /* Free the original skb. */ | 1410 | /* Free the original skb. */ |
@@ -1460,13 +1457,13 @@ static void netdrv_rx_err (u32 rx_status, struct net_device *dev, | |||
1460 | dev->name, rx_status); | 1457 | dev->name, rx_status); |
1461 | /* A.C.: The chip hangs here. */ | 1458 | /* A.C.: The chip hangs here. */ |
1462 | } | 1459 | } |
1463 | tp->stats.rx_errors++; | 1460 | dev->stats.rx_errors++; |
1464 | if (rx_status & (RxBadSymbol | RxBadAlign)) | 1461 | if (rx_status & (RxBadSymbol | RxBadAlign)) |
1465 | tp->stats.rx_frame_errors++; | 1462 | dev->stats.rx_frame_errors++; |
1466 | if (rx_status & (RxRunt | RxTooLong)) | 1463 | if (rx_status & (RxRunt | RxTooLong)) |
1467 | tp->stats.rx_length_errors++; | 1464 | dev->stats.rx_length_errors++; |
1468 | if (rx_status & RxCRCErr) | 1465 | if (rx_status & RxCRCErr) |
1469 | tp->stats.rx_crc_errors++; | 1466 | dev->stats.rx_crc_errors++; |
1470 | /* Reset the receiver, based on RealTek recommendation. (Bug?) */ | 1467 | /* Reset the receiver, based on RealTek recommendation. (Bug?) */ |
1471 | tp->cur_rx = 0; | 1468 | tp->cur_rx = 0; |
1472 | 1469 | ||
@@ -1572,13 +1569,13 @@ static void netdrv_rx_interrupt (struct net_device *dev, | |||
1572 | skb->protocol = eth_type_trans (skb, dev); | 1569 | skb->protocol = eth_type_trans (skb, dev); |
1573 | netif_rx (skb); | 1570 | netif_rx (skb); |
1574 | dev->last_rx = jiffies; | 1571 | dev->last_rx = jiffies; |
1575 | tp->stats.rx_bytes += pkt_size; | 1572 | dev->stats.rx_bytes += pkt_size; |
1576 | tp->stats.rx_packets++; | 1573 | dev->stats.rx_packets++; |
1577 | } else { | 1574 | } else { |
1578 | printk (KERN_WARNING | 1575 | printk (KERN_WARNING |
1579 | "%s: Memory squeeze, dropping packet.\n", | 1576 | "%s: Memory squeeze, dropping packet.\n", |
1580 | dev->name); | 1577 | dev->name); |
1581 | tp->stats.rx_dropped++; | 1578 | dev->stats.rx_dropped++; |
1582 | } | 1579 | } |
1583 | 1580 | ||
1584 | cur_rx = (cur_rx + rx_size + 4 + 3) & ~3; | 1581 | cur_rx = (cur_rx + rx_size + 4 + 3) & ~3; |
@@ -1607,7 +1604,7 @@ static void netdrv_weird_interrupt (struct net_device *dev, | |||
1607 | assert (ioaddr != NULL); | 1604 | assert (ioaddr != NULL); |
1608 | 1605 | ||
1609 | /* Update the error count. */ | 1606 | /* Update the error count. */ |
1610 | tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); | 1607 | dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); |
1611 | NETDRV_W32 (RxMissed, 0); | 1608 | NETDRV_W32 (RxMissed, 0); |
1612 | 1609 | ||
1613 | if ((status & RxUnderrun) && link_changed && | 1610 | if ((status & RxUnderrun) && link_changed && |
@@ -1628,14 +1625,14 @@ static void netdrv_weird_interrupt (struct net_device *dev, | |||
1628 | /* XXX along with netdrv_rx_err, are we double-counting errors? */ | 1625 | /* XXX along with netdrv_rx_err, are we double-counting errors? */ |
1629 | if (status & | 1626 | if (status & |
1630 | (RxUnderrun | RxOverflow | RxErr | RxFIFOOver)) | 1627 | (RxUnderrun | RxOverflow | RxErr | RxFIFOOver)) |
1631 | tp->stats.rx_errors++; | 1628 | dev->stats.rx_errors++; |
1632 | 1629 | ||
1633 | if (status & (PCSTimeout)) | 1630 | if (status & (PCSTimeout)) |
1634 | tp->stats.rx_length_errors++; | 1631 | dev->stats.rx_length_errors++; |
1635 | if (status & (RxUnderrun | RxFIFOOver)) | 1632 | if (status & (RxUnderrun | RxFIFOOver)) |
1636 | tp->stats.rx_fifo_errors++; | 1633 | dev->stats.rx_fifo_errors++; |
1637 | if (status & RxOverflow) { | 1634 | if (status & RxOverflow) { |
1638 | tp->stats.rx_over_errors++; | 1635 | dev->stats.rx_over_errors++; |
1639 | tp->cur_rx = NETDRV_R16 (RxBufAddr) % RX_BUF_LEN; | 1636 | tp->cur_rx = NETDRV_R16 (RxBufAddr) % RX_BUF_LEN; |
1640 | NETDRV_W16_F (RxBufPtr, tp->cur_rx - 16); | 1637 | NETDRV_W16_F (RxBufPtr, tp->cur_rx - 16); |
1641 | } | 1638 | } |
@@ -1739,7 +1736,7 @@ static int netdrv_close (struct net_device *dev) | |||
1739 | NETDRV_W16 (IntrMask, 0x0000); | 1736 | NETDRV_W16 (IntrMask, 0x0000); |
1740 | 1737 | ||
1741 | /* Update the error counts. */ | 1738 | /* Update the error counts. */ |
1742 | tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); | 1739 | dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); |
1743 | NETDRV_W32 (RxMissed, 0); | 1740 | NETDRV_W32 (RxMissed, 0); |
1744 | 1741 | ||
1745 | spin_unlock_irqrestore (&tp->lock, flags); | 1742 | spin_unlock_irqrestore (&tp->lock, flags); |
@@ -1806,31 +1803,6 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) | |||
1806 | return rc; | 1803 | return rc; |
1807 | } | 1804 | } |
1808 | 1805 | ||
1809 | |||
1810 | static struct net_device_stats *netdrv_get_stats (struct net_device *dev) | ||
1811 | { | ||
1812 | struct netdrv_private *tp = dev->priv; | ||
1813 | void *ioaddr = tp->mmio_addr; | ||
1814 | |||
1815 | DPRINTK ("ENTER\n"); | ||
1816 | |||
1817 | assert (tp != NULL); | ||
1818 | |||
1819 | if (netif_running(dev)) { | ||
1820 | unsigned long flags; | ||
1821 | |||
1822 | spin_lock_irqsave (&tp->lock, flags); | ||
1823 | |||
1824 | tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); | ||
1825 | NETDRV_W32 (RxMissed, 0); | ||
1826 | |||
1827 | spin_unlock_irqrestore (&tp->lock, flags); | ||
1828 | } | ||
1829 | |||
1830 | DPRINTK ("EXIT\n"); | ||
1831 | return &tp->stats; | ||
1832 | } | ||
1833 | |||
1834 | /* Set or clear the multicast filter for this adaptor. | 1806 | /* Set or clear the multicast filter for this adaptor. |
1835 | This routine is not state sensitive and need not be SMP locked. */ | 1807 | This routine is not state sensitive and need not be SMP locked. */ |
1836 | 1808 | ||
@@ -1908,7 +1880,7 @@ static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state) | |||
1908 | NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear)); | 1880 | NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear)); |
1909 | 1881 | ||
1910 | /* Update the error counts. */ | 1882 | /* Update the error counts. */ |
1911 | tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); | 1883 | dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); |
1912 | NETDRV_W32 (RxMissed, 0); | 1884 | NETDRV_W32 (RxMissed, 0); |
1913 | 1885 | ||
1914 | spin_unlock_irqrestore (&tp->lock, flags); | 1886 | spin_unlock_irqrestore (&tp->lock, flags); |
diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 2cfab4b36654..c17d9ac9ff30 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c | |||
@@ -154,7 +154,6 @@ static int plip_hard_header_cache(struct neighbour *neigh, | |||
154 | struct hh_cache *hh); | 154 | struct hh_cache *hh); |
155 | static int plip_open(struct net_device *dev); | 155 | static int plip_open(struct net_device *dev); |
156 | static int plip_close(struct net_device *dev); | 156 | static int plip_close(struct net_device *dev); |
157 | static struct net_device_stats *plip_get_stats(struct net_device *dev); | ||
158 | static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | 157 | static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); |
159 | static int plip_preempt(void *handle); | 158 | static int plip_preempt(void *handle); |
160 | static void plip_wakeup(void *handle); | 159 | static void plip_wakeup(void *handle); |
@@ -206,7 +205,6 @@ struct plip_local { | |||
206 | }; | 205 | }; |
207 | 206 | ||
208 | struct net_local { | 207 | struct net_local { |
209 | struct net_device_stats enet_stats; | ||
210 | struct net_device *dev; | 208 | struct net_device *dev; |
211 | struct work_struct immediate; | 209 | struct work_struct immediate; |
212 | struct delayed_work deferred; | 210 | struct delayed_work deferred; |
@@ -285,7 +283,6 @@ plip_init_netdev(struct net_device *dev) | |||
285 | dev->hard_start_xmit = plip_tx_packet; | 283 | dev->hard_start_xmit = plip_tx_packet; |
286 | dev->open = plip_open; | 284 | dev->open = plip_open; |
287 | dev->stop = plip_close; | 285 | dev->stop = plip_close; |
288 | dev->get_stats = plip_get_stats; | ||
289 | dev->do_ioctl = plip_ioctl; | 286 | dev->do_ioctl = plip_ioctl; |
290 | dev->header_cache_update = NULL; | 287 | dev->header_cache_update = NULL; |
291 | dev->tx_queue_len = 10; | 288 | dev->tx_queue_len = 10; |
@@ -430,8 +427,8 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, | |||
430 | dev->name, snd->state, c0); | 427 | dev->name, snd->state, c0); |
431 | } else | 428 | } else |
432 | error = HS_TIMEOUT; | 429 | error = HS_TIMEOUT; |
433 | nl->enet_stats.tx_errors++; | 430 | dev->stats.tx_errors++; |
434 | nl->enet_stats.tx_aborted_errors++; | 431 | dev->stats.tx_aborted_errors++; |
435 | } else if (nl->connection == PLIP_CN_RECEIVE) { | 432 | } else if (nl->connection == PLIP_CN_RECEIVE) { |
436 | if (rcv->state == PLIP_PK_TRIGGER) { | 433 | if (rcv->state == PLIP_PK_TRIGGER) { |
437 | /* Transmission was interrupted. */ | 434 | /* Transmission was interrupted. */ |
@@ -448,7 +445,7 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, | |||
448 | printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n", | 445 | printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n", |
449 | dev->name, rcv->state, c0); | 446 | dev->name, rcv->state, c0); |
450 | } | 447 | } |
451 | nl->enet_stats.rx_dropped++; | 448 | dev->stats.rx_dropped++; |
452 | } | 449 | } |
453 | rcv->state = PLIP_PK_DONE; | 450 | rcv->state = PLIP_PK_DONE; |
454 | if (rcv->skb) { | 451 | if (rcv->skb) { |
@@ -661,7 +658,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, | |||
661 | &rcv->nibble, &rcv->data)) | 658 | &rcv->nibble, &rcv->data)) |
662 | return TIMEOUT; | 659 | return TIMEOUT; |
663 | if (rcv->data != rcv->checksum) { | 660 | if (rcv->data != rcv->checksum) { |
664 | nl->enet_stats.rx_crc_errors++; | 661 | dev->stats.rx_crc_errors++; |
665 | if (net_debug) | 662 | if (net_debug) |
666 | printk(KERN_DEBUG "%s: checksum error\n", dev->name); | 663 | printk(KERN_DEBUG "%s: checksum error\n", dev->name); |
667 | return ERROR; | 664 | return ERROR; |
@@ -673,8 +670,8 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, | |||
673 | rcv->skb->protocol=plip_type_trans(rcv->skb, dev); | 670 | rcv->skb->protocol=plip_type_trans(rcv->skb, dev); |
674 | netif_rx(rcv->skb); | 671 | netif_rx(rcv->skb); |
675 | dev->last_rx = jiffies; | 672 | dev->last_rx = jiffies; |
676 | nl->enet_stats.rx_bytes += rcv->length.h; | 673 | dev->stats.rx_bytes += rcv->length.h; |
677 | nl->enet_stats.rx_packets++; | 674 | dev->stats.rx_packets++; |
678 | rcv->skb = NULL; | 675 | rcv->skb = NULL; |
679 | if (net_debug > 2) | 676 | if (net_debug > 2) |
680 | printk(KERN_DEBUG "%s: receive end\n", dev->name); | 677 | printk(KERN_DEBUG "%s: receive end\n", dev->name); |
@@ -776,7 +773,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, | |||
776 | if (nl->connection == PLIP_CN_RECEIVE) { | 773 | if (nl->connection == PLIP_CN_RECEIVE) { |
777 | spin_unlock_irq(&nl->lock); | 774 | spin_unlock_irq(&nl->lock); |
778 | /* Interrupted. */ | 775 | /* Interrupted. */ |
779 | nl->enet_stats.collisions++; | 776 | dev->stats.collisions++; |
780 | return OK; | 777 | return OK; |
781 | } | 778 | } |
782 | c0 = read_status(dev); | 779 | c0 = read_status(dev); |
@@ -792,7 +789,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, | |||
792 | {enable,disable}_irq *counts* | 789 | {enable,disable}_irq *counts* |
793 | them. -- AV */ | 790 | them. -- AV */ |
794 | ENABLE(dev->irq); | 791 | ENABLE(dev->irq); |
795 | nl->enet_stats.collisions++; | 792 | dev->stats.collisions++; |
796 | return OK; | 793 | return OK; |
797 | } | 794 | } |
798 | disable_parport_interrupts (dev); | 795 | disable_parport_interrupts (dev); |
@@ -840,9 +837,9 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, | |||
840 | &snd->nibble, snd->checksum)) | 837 | &snd->nibble, snd->checksum)) |
841 | return TIMEOUT; | 838 | return TIMEOUT; |
842 | 839 | ||
843 | nl->enet_stats.tx_bytes += snd->skb->len; | 840 | dev->stats.tx_bytes += snd->skb->len; |
844 | dev_kfree_skb(snd->skb); | 841 | dev_kfree_skb(snd->skb); |
845 | nl->enet_stats.tx_packets++; | 842 | dev->stats.tx_packets++; |
846 | snd->state = PLIP_PK_DONE; | 843 | snd->state = PLIP_PK_DONE; |
847 | 844 | ||
848 | case PLIP_PK_DONE: | 845 | case PLIP_PK_DONE: |
@@ -1199,15 +1196,6 @@ plip_wakeup(void *handle) | |||
1199 | return; | 1196 | return; |
1200 | } | 1197 | } |
1201 | 1198 | ||
1202 | static struct net_device_stats * | ||
1203 | plip_get_stats(struct net_device *dev) | ||
1204 | { | ||
1205 | struct net_local *nl = netdev_priv(dev); | ||
1206 | struct net_device_stats *r = &nl->enet_stats; | ||
1207 | |||
1208 | return r; | ||
1209 | } | ||
1210 | |||
1211 | static int | 1199 | static int |
1212 | plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 1200 | plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1213 | { | 1201 | { |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 309199bb7d12..97c6ed07dd15 100755 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -2053,7 +2053,7 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, | |||
2053 | if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { | 2053 | if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { |
2054 | printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); | 2054 | printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); |
2055 | 2055 | ||
2056 | qdev->stats.tx_errors++; | 2056 | qdev->ndev->stats.tx_errors++; |
2057 | retval = -EIO; | 2057 | retval = -EIO; |
2058 | goto frame_not_sent; | 2058 | goto frame_not_sent; |
2059 | } | 2059 | } |
@@ -2061,7 +2061,7 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, | |||
2061 | if(tx_cb->seg_count == 0) { | 2061 | if(tx_cb->seg_count == 0) { |
2062 | printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); | 2062 | printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); |
2063 | 2063 | ||
2064 | qdev->stats.tx_errors++; | 2064 | qdev->ndev->stats.tx_errors++; |
2065 | retval = -EIO; | 2065 | retval = -EIO; |
2066 | goto invalid_seg_count; | 2066 | goto invalid_seg_count; |
2067 | } | 2067 | } |
@@ -2080,8 +2080,8 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, | |||
2080 | PCI_DMA_TODEVICE); | 2080 | PCI_DMA_TODEVICE); |
2081 | } | 2081 | } |
2082 | } | 2082 | } |
2083 | qdev->stats.tx_packets++; | 2083 | qdev->ndev->stats.tx_packets++; |
2084 | qdev->stats.tx_bytes += tx_cb->skb->len; | 2084 | qdev->ndev->stats.tx_bytes += tx_cb->skb->len; |
2085 | 2085 | ||
2086 | frame_not_sent: | 2086 | frame_not_sent: |
2087 | dev_kfree_skb_irq(tx_cb->skb); | 2087 | dev_kfree_skb_irq(tx_cb->skb); |
@@ -2140,8 +2140,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, | |||
2140 | lrg_buf_cb2 = ql_get_lbuf(qdev); | 2140 | lrg_buf_cb2 = ql_get_lbuf(qdev); |
2141 | skb = lrg_buf_cb2->skb; | 2141 | skb = lrg_buf_cb2->skb; |
2142 | 2142 | ||
2143 | qdev->stats.rx_packets++; | 2143 | qdev->ndev->stats.rx_packets++; |
2144 | qdev->stats.rx_bytes += length; | 2144 | qdev->ndev->stats.rx_bytes += length; |
2145 | 2145 | ||
2146 | skb_put(skb, length); | 2146 | skb_put(skb, length); |
2147 | pci_unmap_single(qdev->pdev, | 2147 | pci_unmap_single(qdev->pdev, |
@@ -2225,8 +2225,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, | |||
2225 | skb2->protocol = eth_type_trans(skb2, qdev->ndev); | 2225 | skb2->protocol = eth_type_trans(skb2, qdev->ndev); |
2226 | 2226 | ||
2227 | netif_receive_skb(skb2); | 2227 | netif_receive_skb(skb2); |
2228 | qdev->stats.rx_packets++; | 2228 | ndev->stats.rx_packets++; |
2229 | qdev->stats.rx_bytes += length; | 2229 | ndev->stats.rx_bytes += length; |
2230 | ndev->last_rx = jiffies; | 2230 | ndev->last_rx = jiffies; |
2231 | lrg_buf_cb2->skb = NULL; | 2231 | lrg_buf_cb2->skb = NULL; |
2232 | 2232 | ||
@@ -3753,12 +3753,6 @@ static int ql3xxx_open(struct net_device *ndev) | |||
3753 | return (ql_adapter_up(qdev)); | 3753 | return (ql_adapter_up(qdev)); |
3754 | } | 3754 | } |
3755 | 3755 | ||
3756 | static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev) | ||
3757 | { | ||
3758 | struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv; | ||
3759 | return &qdev->stats; | ||
3760 | } | ||
3761 | |||
3762 | static void ql3xxx_set_multicast_list(struct net_device *ndev) | 3756 | static void ql3xxx_set_multicast_list(struct net_device *ndev) |
3763 | { | 3757 | { |
3764 | /* | 3758 | /* |
@@ -4048,7 +4042,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
4048 | ndev->open = ql3xxx_open; | 4042 | ndev->open = ql3xxx_open; |
4049 | ndev->hard_start_xmit = ql3xxx_send; | 4043 | ndev->hard_start_xmit = ql3xxx_send; |
4050 | ndev->stop = ql3xxx_close; | 4044 | ndev->stop = ql3xxx_close; |
4051 | ndev->get_stats = ql3xxx_get_stats; | ||
4052 | ndev->set_multicast_list = ql3xxx_set_multicast_list; | 4045 | ndev->set_multicast_list = ql3xxx_set_multicast_list; |
4053 | SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); | 4046 | SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); |
4054 | ndev->set_mac_address = ql3xxx_set_mac_address; | 4047 | ndev->set_mac_address = ql3xxx_set_mac_address; |
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h index aa2216f0d7b8..483840f7fc10 100755 --- a/drivers/net/qla3xxx.h +++ b/drivers/net/qla3xxx.h | |||
@@ -1283,7 +1283,6 @@ struct ql3_adapter { | |||
1283 | u32 update_ob_opcode; /* Opcode to use for updating NCB */ | 1283 | u32 update_ob_opcode; /* Opcode to use for updating NCB */ |
1284 | u32 mb_bit_mask; /* MA Bits mask to use on transmission */ | 1284 | u32 mb_bit_mask; /* MA Bits mask to use on transmission */ |
1285 | u32 numPorts; | 1285 | u32 numPorts; |
1286 | struct net_device_stats stats; | ||
1287 | struct workqueue_struct *workqueue; | 1286 | struct workqueue_struct *workqueue; |
1288 | struct delayed_work reset_work; | 1287 | struct delayed_work reset_work; |
1289 | struct delayed_work tx_timeout_work; | 1288 | struct delayed_work tx_timeout_work; |
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 25a9dd821aa2..d43dcf3ed5a9 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c | |||
@@ -53,7 +53,6 @@ struct rionet_private { | |||
53 | struct rio_mport *mport; | 53 | struct rio_mport *mport; |
54 | struct sk_buff *rx_skb[RIONET_RX_RING_SIZE]; | 54 | struct sk_buff *rx_skb[RIONET_RX_RING_SIZE]; |
55 | struct sk_buff *tx_skb[RIONET_TX_RING_SIZE]; | 55 | struct sk_buff *tx_skb[RIONET_TX_RING_SIZE]; |
56 | struct net_device_stats stats; | ||
57 | int rx_slot; | 56 | int rx_slot; |
58 | int tx_slot; | 57 | int tx_slot; |
59 | int tx_cnt; | 58 | int tx_cnt; |
@@ -91,12 +90,6 @@ static struct rio_dev *rionet_active[RIO_MAX_ROUTE_ENTRIES]; | |||
91 | #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) | 90 | #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) |
92 | #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) | 91 | #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) |
93 | 92 | ||
94 | static struct net_device_stats *rionet_stats(struct net_device *ndev) | ||
95 | { | ||
96 | struct rionet_private *rnet = ndev->priv; | ||
97 | return &rnet->stats; | ||
98 | } | ||
99 | |||
100 | static int rionet_rx_clean(struct net_device *ndev) | 93 | static int rionet_rx_clean(struct net_device *ndev) |
101 | { | 94 | { |
102 | int i; | 95 | int i; |
@@ -120,15 +113,15 @@ static int rionet_rx_clean(struct net_device *ndev) | |||
120 | error = netif_rx(rnet->rx_skb[i]); | 113 | error = netif_rx(rnet->rx_skb[i]); |
121 | 114 | ||
122 | if (error == NET_RX_DROP) { | 115 | if (error == NET_RX_DROP) { |
123 | rnet->stats.rx_dropped++; | 116 | ndev->stats.rx_dropped++; |
124 | } else if (error == NET_RX_BAD) { | 117 | } else if (error == NET_RX_BAD) { |
125 | if (netif_msg_rx_err(rnet)) | 118 | if (netif_msg_rx_err(rnet)) |
126 | printk(KERN_WARNING "%s: bad rx packet\n", | 119 | printk(KERN_WARNING "%s: bad rx packet\n", |
127 | DRV_NAME); | 120 | DRV_NAME); |
128 | rnet->stats.rx_errors++; | 121 | ndev->stats.rx_errors++; |
129 | } else { | 122 | } else { |
130 | rnet->stats.rx_packets++; | 123 | ndev->stats.rx_packets++; |
131 | rnet->stats.rx_bytes += RIO_MAX_MSG_SIZE; | 124 | ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE; |
132 | } | 125 | } |
133 | 126 | ||
134 | } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot); | 127 | } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot); |
@@ -163,8 +156,8 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev, | |||
163 | rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len); | 156 | rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len); |
164 | rnet->tx_skb[rnet->tx_slot] = skb; | 157 | rnet->tx_skb[rnet->tx_slot] = skb; |
165 | 158 | ||
166 | rnet->stats.tx_packets++; | 159 | ndev->stats.tx_packets++; |
167 | rnet->stats.tx_bytes += skb->len; | 160 | ndev->stats.tx_bytes += skb->len; |
168 | 161 | ||
169 | if (++rnet->tx_cnt == RIONET_TX_RING_SIZE) | 162 | if (++rnet->tx_cnt == RIONET_TX_RING_SIZE) |
170 | netif_stop_queue(ndev); | 163 | netif_stop_queue(ndev); |
@@ -466,7 +459,6 @@ static int rionet_setup_netdev(struct rio_mport *mport) | |||
466 | ndev->open = &rionet_open; | 459 | ndev->open = &rionet_open; |
467 | ndev->hard_start_xmit = &rionet_start_xmit; | 460 | ndev->hard_start_xmit = &rionet_start_xmit; |
468 | ndev->stop = &rionet_close; | 461 | ndev->stop = &rionet_close; |
469 | ndev->get_stats = &rionet_stats; | ||
470 | ndev->mtu = RIO_MAX_MSG_SIZE - 14; | 462 | ndev->mtu = RIO_MAX_MSG_SIZE - 14; |
471 | ndev->features = NETIF_F_LLTX; | 463 | ndev->features = NETIF_F_LLTX; |
472 | SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops); | 464 | SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops); |
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c index 41f877d482c7..03facba05259 100644 --- a/drivers/net/rrunner.c +++ b/drivers/net/rrunner.c | |||
@@ -126,7 +126,6 @@ static int __devinit rr_init_one(struct pci_dev *pdev, | |||
126 | dev->open = &rr_open; | 126 | dev->open = &rr_open; |
127 | dev->hard_start_xmit = &rr_start_xmit; | 127 | dev->hard_start_xmit = &rr_start_xmit; |
128 | dev->stop = &rr_close; | 128 | dev->stop = &rr_close; |
129 | dev->get_stats = &rr_get_stats; | ||
130 | dev->do_ioctl = &rr_ioctl; | 129 | dev->do_ioctl = &rr_ioctl; |
131 | 130 | ||
132 | dev->base_addr = pci_resource_start(pdev, 0); | 131 | dev->base_addr = pci_resource_start(pdev, 0); |
@@ -808,7 +807,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) | |||
808 | case E_CON_REJ: | 807 | case E_CON_REJ: |
809 | printk(KERN_WARNING "%s: Connection rejected\n", | 808 | printk(KERN_WARNING "%s: Connection rejected\n", |
810 | dev->name); | 809 | dev->name); |
811 | rrpriv->stats.tx_aborted_errors++; | 810 | dev->stats.tx_aborted_errors++; |
812 | break; | 811 | break; |
813 | case E_CON_TMOUT: | 812 | case E_CON_TMOUT: |
814 | printk(KERN_WARNING "%s: Connection timeout\n", | 813 | printk(KERN_WARNING "%s: Connection timeout\n", |
@@ -817,7 +816,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) | |||
817 | case E_DISC_ERR: | 816 | case E_DISC_ERR: |
818 | printk(KERN_WARNING "%s: HIPPI disconnect error\n", | 817 | printk(KERN_WARNING "%s: HIPPI disconnect error\n", |
819 | dev->name); | 818 | dev->name); |
820 | rrpriv->stats.tx_aborted_errors++; | 819 | dev->stats.tx_aborted_errors++; |
821 | break; | 820 | break; |
822 | case E_INT_PRTY: | 821 | case E_INT_PRTY: |
823 | printk(KERN_ERR "%s: HIPPI Internal Parity error\n", | 822 | printk(KERN_ERR "%s: HIPPI Internal Parity error\n", |
@@ -833,7 +832,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) | |||
833 | case E_TX_LINK_DROP: | 832 | case E_TX_LINK_DROP: |
834 | printk(KERN_WARNING "%s: Link lost during transmit\n", | 833 | printk(KERN_WARNING "%s: Link lost during transmit\n", |
835 | dev->name); | 834 | dev->name); |
836 | rrpriv->stats.tx_aborted_errors++; | 835 | dev->stats.tx_aborted_errors++; |
837 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, | 836 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
838 | ®s->HostCtrl); | 837 | ®s->HostCtrl); |
839 | wmb(); | 838 | wmb(); |
@@ -973,7 +972,7 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) | |||
973 | printk("len %x, mode %x\n", pkt_len, desc->mode); | 972 | printk("len %x, mode %x\n", pkt_len, desc->mode); |
974 | #endif | 973 | #endif |
975 | if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){ | 974 | if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){ |
976 | rrpriv->stats.rx_dropped++; | 975 | dev->stats.rx_dropped++; |
977 | goto defer; | 976 | goto defer; |
978 | } | 977 | } |
979 | 978 | ||
@@ -986,7 +985,7 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) | |||
986 | skb = alloc_skb(pkt_len, GFP_ATOMIC); | 985 | skb = alloc_skb(pkt_len, GFP_ATOMIC); |
987 | if (skb == NULL){ | 986 | if (skb == NULL){ |
988 | printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len); | 987 | printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len); |
989 | rrpriv->stats.rx_dropped++; | 988 | dev->stats.rx_dropped++; |
990 | goto defer; | 989 | goto defer; |
991 | } else { | 990 | } else { |
992 | pci_dma_sync_single_for_cpu(rrpriv->pci_dev, | 991 | pci_dma_sync_single_for_cpu(rrpriv->pci_dev, |
@@ -1024,7 +1023,7 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) | |||
1024 | } else { | 1023 | } else { |
1025 | printk("%s: Out of memory, deferring " | 1024 | printk("%s: Out of memory, deferring " |
1026 | "packet\n", dev->name); | 1025 | "packet\n", dev->name); |
1027 | rrpriv->stats.rx_dropped++; | 1026 | dev->stats.rx_dropped++; |
1028 | goto defer; | 1027 | goto defer; |
1029 | } | 1028 | } |
1030 | } | 1029 | } |
@@ -1033,8 +1032,8 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) | |||
1033 | netif_rx(skb); /* send it up */ | 1032 | netif_rx(skb); /* send it up */ |
1034 | 1033 | ||
1035 | dev->last_rx = jiffies; | 1034 | dev->last_rx = jiffies; |
1036 | rrpriv->stats.rx_packets++; | 1035 | dev->stats.rx_packets++; |
1037 | rrpriv->stats.rx_bytes += pkt_len; | 1036 | dev->stats.rx_bytes += pkt_len; |
1038 | } | 1037 | } |
1039 | defer: | 1038 | defer: |
1040 | desc->mode = 0; | 1039 | desc->mode = 0; |
@@ -1102,8 +1101,8 @@ static irqreturn_t rr_interrupt(int irq, void *dev_id) | |||
1102 | desc = &(rrpriv->tx_ring[txcon]); | 1101 | desc = &(rrpriv->tx_ring[txcon]); |
1103 | skb = rrpriv->tx_skbuff[txcon]; | 1102 | skb = rrpriv->tx_skbuff[txcon]; |
1104 | 1103 | ||
1105 | rrpriv->stats.tx_packets++; | 1104 | dev->stats.tx_packets++; |
1106 | rrpriv->stats.tx_bytes += skb->len; | 1105 | dev->stats.tx_bytes += skb->len; |
1107 | 1106 | ||
1108 | pci_unmap_single(rrpriv->pci_dev, | 1107 | pci_unmap_single(rrpriv->pci_dev, |
1109 | desc->addr.addrlo, skb->len, | 1108 | desc->addr.addrlo, skb->len, |
@@ -1491,16 +1490,6 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1491 | } | 1490 | } |
1492 | 1491 | ||
1493 | 1492 | ||
1494 | static struct net_device_stats *rr_get_stats(struct net_device *dev) | ||
1495 | { | ||
1496 | struct rr_private *rrpriv; | ||
1497 | |||
1498 | rrpriv = netdev_priv(dev); | ||
1499 | |||
1500 | return(&rrpriv->stats); | ||
1501 | } | ||
1502 | |||
1503 | |||
1504 | /* | 1493 | /* |
1505 | * Read the firmware out of the EEPROM and put it into the SRAM | 1494 | * Read the firmware out of the EEPROM and put it into the SRAM |
1506 | * (or from user space - later) | 1495 | * (or from user space - later) |
diff --git a/drivers/net/rrunner.h b/drivers/net/rrunner.h index 9f3e050c4dc6..6a79825bc8cf 100644 --- a/drivers/net/rrunner.h +++ b/drivers/net/rrunner.h | |||
@@ -819,7 +819,6 @@ struct rr_private | |||
819 | u32 tx_full; | 819 | u32 tx_full; |
820 | u32 fw_rev; | 820 | u32 fw_rev; |
821 | volatile short fw_running; | 821 | volatile short fw_running; |
822 | struct net_device_stats stats; | ||
823 | struct pci_dev *pci_dev; | 822 | struct pci_dev *pci_dev; |
824 | }; | 823 | }; |
825 | 824 | ||
@@ -834,7 +833,6 @@ static irqreturn_t rr_interrupt(int irq, void *dev_id); | |||
834 | static int rr_open(struct net_device *dev); | 833 | static int rr_open(struct net_device *dev); |
835 | static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev); | 834 | static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev); |
836 | static int rr_close(struct net_device *dev); | 835 | static int rr_close(struct net_device *dev); |
837 | static struct net_device_stats *rr_get_stats(struct net_device *dev); | ||
838 | static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 836 | static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
839 | static unsigned int rr_read_eeprom(struct rr_private *rrpriv, | 837 | static unsigned int rr_read_eeprom(struct rr_private *rrpriv, |
840 | unsigned long offset, | 838 | unsigned long offset, |
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c index 7dae4d404978..14361e885415 100644 --- a/drivers/net/saa9730.c +++ b/drivers/net/saa9730.c | |||
@@ -151,30 +151,30 @@ static void __attribute_used__ show_saa9730_regs(struct lan_saa9730_private *lp) | |||
151 | printk("lp->lan_saa9730_regs->CamData = %x\n", | 151 | printk("lp->lan_saa9730_regs->CamData = %x\n", |
152 | readl(&lp->lan_saa9730_regs->CamData)); | 152 | readl(&lp->lan_saa9730_regs->CamData)); |
153 | } | 153 | } |
154 | printk("lp->stats.tx_packets = %lx\n", lp->stats.tx_packets); | 154 | printk("dev->stats.tx_packets = %lx\n", dev->stats.tx_packets); |
155 | printk("lp->stats.tx_errors = %lx\n", lp->stats.tx_errors); | 155 | printk("dev->stats.tx_errors = %lx\n", dev->stats.tx_errors); |
156 | printk("lp->stats.tx_aborted_errors = %lx\n", | 156 | printk("dev->stats.tx_aborted_errors = %lx\n", |
157 | lp->stats.tx_aborted_errors); | 157 | dev->stats.tx_aborted_errors); |
158 | printk("lp->stats.tx_window_errors = %lx\n", | 158 | printk("dev->stats.tx_window_errors = %lx\n", |
159 | lp->stats.tx_window_errors); | 159 | dev->stats.tx_window_errors); |
160 | printk("lp->stats.tx_carrier_errors = %lx\n", | 160 | printk("dev->stats.tx_carrier_errors = %lx\n", |
161 | lp->stats.tx_carrier_errors); | 161 | dev->stats.tx_carrier_errors); |
162 | printk("lp->stats.tx_fifo_errors = %lx\n", | 162 | printk("dev->stats.tx_fifo_errors = %lx\n", |
163 | lp->stats.tx_fifo_errors); | 163 | dev->stats.tx_fifo_errors); |
164 | printk("lp->stats.tx_heartbeat_errors = %lx\n", | 164 | printk("dev->stats.tx_heartbeat_errors = %lx\n", |
165 | lp->stats.tx_heartbeat_errors); | 165 | dev->stats.tx_heartbeat_errors); |
166 | printk("lp->stats.collisions = %lx\n", lp->stats.collisions); | 166 | printk("dev->stats.collisions = %lx\n", dev->stats.collisions); |
167 | 167 | ||
168 | printk("lp->stats.rx_packets = %lx\n", lp->stats.rx_packets); | 168 | printk("dev->stats.rx_packets = %lx\n", dev->stats.rx_packets); |
169 | printk("lp->stats.rx_errors = %lx\n", lp->stats.rx_errors); | 169 | printk("dev->stats.rx_errors = %lx\n", dev->stats.rx_errors); |
170 | printk("lp->stats.rx_dropped = %lx\n", lp->stats.rx_dropped); | 170 | printk("dev->stats.rx_dropped = %lx\n", dev->stats.rx_dropped); |
171 | printk("lp->stats.rx_crc_errors = %lx\n", lp->stats.rx_crc_errors); | 171 | printk("dev->stats.rx_crc_errors = %lx\n", dev->stats.rx_crc_errors); |
172 | printk("lp->stats.rx_frame_errors = %lx\n", | 172 | printk("dev->stats.rx_frame_errors = %lx\n", |
173 | lp->stats.rx_frame_errors); | 173 | dev->stats.rx_frame_errors); |
174 | printk("lp->stats.rx_fifo_errors = %lx\n", | 174 | printk("dev->stats.rx_fifo_errors = %lx\n", |
175 | lp->stats.rx_fifo_errors); | 175 | dev->stats.rx_fifo_errors); |
176 | printk("lp->stats.rx_length_errors = %lx\n", | 176 | printk("dev->stats.rx_length_errors = %lx\n", |
177 | lp->stats.rx_length_errors); | 177 | dev->stats.rx_length_errors); |
178 | 178 | ||
179 | printk("lp->lan_saa9730_regs->DebugPCIMasterAddr = %x\n", | 179 | printk("lp->lan_saa9730_regs->DebugPCIMasterAddr = %x\n", |
180 | readl(&lp->lan_saa9730_regs->DebugPCIMasterAddr)); | 180 | readl(&lp->lan_saa9730_regs->DebugPCIMasterAddr)); |
@@ -605,24 +605,24 @@ static int lan_saa9730_tx(struct net_device *dev) | |||
605 | printk("lan_saa9730_tx: tx error = %x\n", | 605 | printk("lan_saa9730_tx: tx error = %x\n", |
606 | tx_status); | 606 | tx_status); |
607 | 607 | ||
608 | lp->stats.tx_errors++; | 608 | dev->stats.tx_errors++; |
609 | if (tx_status & | 609 | if (tx_status & |
610 | (TX_STATUS_EX_COLL << TX_STAT_CTL_STATUS_SHF)) | 610 | (TX_STATUS_EX_COLL << TX_STAT_CTL_STATUS_SHF)) |
611 | lp->stats.tx_aborted_errors++; | 611 | dev->stats.tx_aborted_errors++; |
612 | if (tx_status & | 612 | if (tx_status & |
613 | (TX_STATUS_LATE_COLL << TX_STAT_CTL_STATUS_SHF)) | 613 | (TX_STATUS_LATE_COLL << TX_STAT_CTL_STATUS_SHF)) |
614 | lp->stats.tx_window_errors++; | 614 | dev->stats.tx_window_errors++; |
615 | if (tx_status & | 615 | if (tx_status & |
616 | (TX_STATUS_L_CARR << TX_STAT_CTL_STATUS_SHF)) | 616 | (TX_STATUS_L_CARR << TX_STAT_CTL_STATUS_SHF)) |
617 | lp->stats.tx_carrier_errors++; | 617 | dev->stats.tx_carrier_errors++; |
618 | if (tx_status & | 618 | if (tx_status & |
619 | (TX_STATUS_UNDER << TX_STAT_CTL_STATUS_SHF)) | 619 | (TX_STATUS_UNDER << TX_STAT_CTL_STATUS_SHF)) |
620 | lp->stats.tx_fifo_errors++; | 620 | dev->stats.tx_fifo_errors++; |
621 | if (tx_status & | 621 | if (tx_status & |
622 | (TX_STATUS_SQ_ERR << TX_STAT_CTL_STATUS_SHF)) | 622 | (TX_STATUS_SQ_ERR << TX_STAT_CTL_STATUS_SHF)) |
623 | lp->stats.tx_heartbeat_errors++; | 623 | dev->stats.tx_heartbeat_errors++; |
624 | 624 | ||
625 | lp->stats.collisions += | 625 | dev->stats.collisions += |
626 | tx_status & TX_STATUS_TX_COLL_MSK; | 626 | tx_status & TX_STATUS_TX_COLL_MSK; |
627 | } | 627 | } |
628 | 628 | ||
@@ -684,10 +684,10 @@ static int lan_saa9730_rx(struct net_device *dev) | |||
684 | printk | 684 | printk |
685 | ("%s: Memory squeeze, deferring packet.\n", | 685 | ("%s: Memory squeeze, deferring packet.\n", |
686 | dev->name); | 686 | dev->name); |
687 | lp->stats.rx_dropped++; | 687 | dev->stats.rx_dropped++; |
688 | } else { | 688 | } else { |
689 | lp->stats.rx_bytes += len; | 689 | dev->stats.rx_bytes += len; |
690 | lp->stats.rx_packets++; | 690 | dev->stats.rx_packets++; |
691 | skb_reserve(skb, 2); /* 16 byte align */ | 691 | skb_reserve(skb, 2); /* 16 byte align */ |
692 | skb_put(skb, len); /* make room */ | 692 | skb_put(skb, len); /* make room */ |
693 | skb_copy_to_linear_data(skb, | 693 | skb_copy_to_linear_data(skb, |
@@ -704,19 +704,19 @@ static int lan_saa9730_rx(struct net_device *dev) | |||
704 | ("lan_saa9730_rx: We got an error packet = %x\n", | 704 | ("lan_saa9730_rx: We got an error packet = %x\n", |
705 | rx_status); | 705 | rx_status); |
706 | 706 | ||
707 | lp->stats.rx_errors++; | 707 | dev->stats.rx_errors++; |
708 | if (rx_status & | 708 | if (rx_status & |
709 | (RX_STATUS_CRC_ERR << RX_STAT_CTL_STATUS_SHF)) | 709 | (RX_STATUS_CRC_ERR << RX_STAT_CTL_STATUS_SHF)) |
710 | lp->stats.rx_crc_errors++; | 710 | dev->stats.rx_crc_errors++; |
711 | if (rx_status & | 711 | if (rx_status & |
712 | (RX_STATUS_ALIGN_ERR << RX_STAT_CTL_STATUS_SHF)) | 712 | (RX_STATUS_ALIGN_ERR << RX_STAT_CTL_STATUS_SHF)) |
713 | lp->stats.rx_frame_errors++; | 713 | dev->stats.rx_frame_errors++; |
714 | if (rx_status & | 714 | if (rx_status & |
715 | (RX_STATUS_OVERFLOW << RX_STAT_CTL_STATUS_SHF)) | 715 | (RX_STATUS_OVERFLOW << RX_STAT_CTL_STATUS_SHF)) |
716 | lp->stats.rx_fifo_errors++; | 716 | dev->stats.rx_fifo_errors++; |
717 | if (rx_status & | 717 | if (rx_status & |
718 | (RX_STATUS_LONG_ERR << RX_STAT_CTL_STATUS_SHF)) | 718 | (RX_STATUS_LONG_ERR << RX_STAT_CTL_STATUS_SHF)) |
719 | lp->stats.rx_length_errors++; | 719 | dev->stats.rx_length_errors++; |
720 | } | 720 | } |
721 | 721 | ||
722 | /* Indicate we have processed the buffer. */ | 722 | /* Indicate we have processed the buffer. */ |
@@ -853,7 +853,7 @@ static void lan_saa9730_tx_timeout(struct net_device *dev) | |||
853 | struct lan_saa9730_private *lp = netdev_priv(dev); | 853 | struct lan_saa9730_private *lp = netdev_priv(dev); |
854 | 854 | ||
855 | /* Transmitter timeout, serious problems */ | 855 | /* Transmitter timeout, serious problems */ |
856 | lp->stats.tx_errors++; | 856 | dev->stats.tx_errors++; |
857 | printk("%s: transmit timed out, reset\n", dev->name); | 857 | printk("%s: transmit timed out, reset\n", dev->name); |
858 | /*show_saa9730_regs(lp); */ | 858 | /*show_saa9730_regs(lp); */ |
859 | lan_saa9730_restart(lp); | 859 | lan_saa9730_restart(lp); |
@@ -886,8 +886,8 @@ static int lan_saa9730_start_xmit(struct sk_buff *skb, | |||
886 | return -1; | 886 | return -1; |
887 | } | 887 | } |
888 | 888 | ||
889 | lp->stats.tx_bytes += len; | 889 | dev->stats.tx_bytes += len; |
890 | lp->stats.tx_packets++; | 890 | dev->stats.tx_packets++; |
891 | 891 | ||
892 | dev->trans_start = jiffies; | 892 | dev->trans_start = jiffies; |
893 | netif_wake_queue(dev); | 893 | netif_wake_queue(dev); |
@@ -919,14 +919,6 @@ static int lan_saa9730_close(struct net_device *dev) | |||
919 | return 0; | 919 | return 0; |
920 | } | 920 | } |
921 | 921 | ||
922 | static struct net_device_stats *lan_saa9730_get_stats(struct net_device | ||
923 | *dev) | ||
924 | { | ||
925 | struct lan_saa9730_private *lp = netdev_priv(dev); | ||
926 | |||
927 | return &lp->stats; | ||
928 | } | ||
929 | |||
930 | static void lan_saa9730_set_multicast(struct net_device *dev) | 922 | static void lan_saa9730_set_multicast(struct net_device *dev) |
931 | { | 923 | { |
932 | struct lan_saa9730_private *lp = netdev_priv(dev); | 924 | struct lan_saa9730_private *lp = netdev_priv(dev); |
@@ -1040,7 +1032,6 @@ static int lan_saa9730_init(struct net_device *dev, struct pci_dev *pdev, | |||
1040 | dev->open = lan_saa9730_open; | 1032 | dev->open = lan_saa9730_open; |
1041 | dev->hard_start_xmit = lan_saa9730_start_xmit; | 1033 | dev->hard_start_xmit = lan_saa9730_start_xmit; |
1042 | dev->stop = lan_saa9730_close; | 1034 | dev->stop = lan_saa9730_close; |
1043 | dev->get_stats = lan_saa9730_get_stats; | ||
1044 | dev->set_multicast_list = lan_saa9730_set_multicast; | 1035 | dev->set_multicast_list = lan_saa9730_set_multicast; |
1045 | dev->tx_timeout = lan_saa9730_tx_timeout; | 1036 | dev->tx_timeout = lan_saa9730_tx_timeout; |
1046 | dev->watchdog_timeo = (HZ >> 1); | 1037 | dev->watchdog_timeo = (HZ >> 1); |
diff --git a/drivers/net/saa9730.h b/drivers/net/saa9730.h index f656f2f40bb8..010a120ea938 100644 --- a/drivers/net/saa9730.h +++ b/drivers/net/saa9730.h | |||
@@ -378,7 +378,6 @@ struct lan_saa9730_private { | |||
378 | 378 | ||
379 | unsigned char PhysicalAddress[LAN_SAA9730_CAM_ENTRIES][6]; | 379 | unsigned char PhysicalAddress[LAN_SAA9730_CAM_ENTRIES][6]; |
380 | 380 | ||
381 | struct net_device_stats stats; | ||
382 | spinlock_t lock; | 381 | spinlock_t lock; |
383 | }; | 382 | }; |
384 | 383 | ||
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index aeaa75f549e6..487f9d2ac5b4 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c | |||
@@ -76,7 +76,6 @@ struct sb1000_private { | |||
76 | unsigned char rx_session_id[NPIDS]; | 76 | unsigned char rx_session_id[NPIDS]; |
77 | unsigned char rx_frame_id[NPIDS]; | 77 | unsigned char rx_frame_id[NPIDS]; |
78 | unsigned char rx_pkt_type[NPIDS]; | 78 | unsigned char rx_pkt_type[NPIDS]; |
79 | struct net_device_stats stats; | ||
80 | }; | 79 | }; |
81 | 80 | ||
82 | /* prototypes for Linux interface */ | 81 | /* prototypes for Linux interface */ |
@@ -85,7 +84,6 @@ static int sb1000_open(struct net_device *dev); | |||
85 | static int sb1000_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd); | 84 | static int sb1000_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd); |
86 | static int sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev); | 85 | static int sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev); |
87 | static irqreturn_t sb1000_interrupt(int irq, void *dev_id); | 86 | static irqreturn_t sb1000_interrupt(int irq, void *dev_id); |
88 | static struct net_device_stats *sb1000_stats(struct net_device *dev); | ||
89 | static int sb1000_close(struct net_device *dev); | 87 | static int sb1000_close(struct net_device *dev); |
90 | 88 | ||
91 | 89 | ||
@@ -199,7 +197,6 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id) | |||
199 | dev->do_ioctl = sb1000_dev_ioctl; | 197 | dev->do_ioctl = sb1000_dev_ioctl; |
200 | dev->hard_start_xmit = sb1000_start_xmit; | 198 | dev->hard_start_xmit = sb1000_start_xmit; |
201 | dev->stop = sb1000_close; | 199 | dev->stop = sb1000_close; |
202 | dev->get_stats = sb1000_stats; | ||
203 | 200 | ||
204 | /* hardware address is 0:0:serial_number */ | 201 | /* hardware address is 0:0:serial_number */ |
205 | dev->dev_addr[2] = serial_number >> 24 & 0xff; | 202 | dev->dev_addr[2] = serial_number >> 24 & 0xff; |
@@ -739,7 +736,7 @@ sb1000_rx(struct net_device *dev) | |||
739 | unsigned int skbsize; | 736 | unsigned int skbsize; |
740 | struct sk_buff *skb; | 737 | struct sk_buff *skb; |
741 | struct sb1000_private *lp = netdev_priv(dev); | 738 | struct sb1000_private *lp = netdev_priv(dev); |
742 | struct net_device_stats *stats = &lp->stats; | 739 | struct net_device_stats *stats = &dev->stats; |
743 | 740 | ||
744 | /* SB1000 frame constants */ | 741 | /* SB1000 frame constants */ |
745 | const int FrameSize = FRAMESIZE; | 742 | const int FrameSize = FRAMESIZE; |
@@ -1002,11 +999,11 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1002 | 999 | ||
1003 | switch (cmd) { | 1000 | switch (cmd) { |
1004 | case SIOCGCMSTATS: /* get statistics */ | 1001 | case SIOCGCMSTATS: /* get statistics */ |
1005 | stats[0] = lp->stats.rx_bytes; | 1002 | stats[0] = dev->stats.rx_bytes; |
1006 | stats[1] = lp->rx_frames; | 1003 | stats[1] = lp->rx_frames; |
1007 | stats[2] = lp->stats.rx_packets; | 1004 | stats[2] = dev->stats.rx_packets; |
1008 | stats[3] = lp->stats.rx_errors; | 1005 | stats[3] = dev->stats.rx_errors; |
1009 | stats[4] = lp->stats.rx_dropped; | 1006 | stats[4] = dev->stats.rx_dropped; |
1010 | if(copy_to_user(ifr->ifr_data, stats, sizeof(stats))) | 1007 | if(copy_to_user(ifr->ifr_data, stats, sizeof(stats))) |
1011 | return -EFAULT; | 1008 | return -EFAULT; |
1012 | status = 0; | 1009 | status = 0; |
@@ -1132,12 +1129,6 @@ static irqreturn_t sb1000_interrupt(int irq, void *dev_id) | |||
1132 | return IRQ_HANDLED; | 1129 | return IRQ_HANDLED; |
1133 | } | 1130 | } |
1134 | 1131 | ||
1135 | static struct net_device_stats *sb1000_stats(struct net_device *dev) | ||
1136 | { | ||
1137 | struct sb1000_private *lp = netdev_priv(dev); | ||
1138 | return &lp->stats; | ||
1139 | } | ||
1140 | |||
1141 | static int sb1000_close(struct net_device *dev) | 1132 | static int sb1000_close(struct net_device *dev) |
1142 | { | 1133 | { |
1143 | int i; | 1134 | int i; |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index b6cafac97200..76e7ee9a6cbc 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -241,7 +241,6 @@ struct sbmac_softc { | |||
241 | struct napi_struct napi; | 241 | struct napi_struct napi; |
242 | spinlock_t sbm_lock; /* spin lock */ | 242 | spinlock_t sbm_lock; /* spin lock */ |
243 | struct timer_list sbm_timer; /* for monitoring MII */ | 243 | struct timer_list sbm_timer; /* for monitoring MII */ |
244 | struct net_device_stats sbm_stats; | ||
245 | int sbm_devflags; /* current device flags */ | 244 | int sbm_devflags; /* current device flags */ |
246 | 245 | ||
247 | int sbm_phy_oldbmsr; | 246 | int sbm_phy_oldbmsr; |
@@ -317,7 +316,6 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc | |||
317 | static int sbmac_open(struct net_device *dev); | 316 | static int sbmac_open(struct net_device *dev); |
318 | static void sbmac_timer(unsigned long data); | 317 | static void sbmac_timer(unsigned long data); |
319 | static void sbmac_tx_timeout (struct net_device *dev); | 318 | static void sbmac_tx_timeout (struct net_device *dev); |
320 | static struct net_device_stats *sbmac_get_stats(struct net_device *dev); | ||
321 | static void sbmac_set_rx_mode(struct net_device *dev); | 319 | static void sbmac_set_rx_mode(struct net_device *dev); |
322 | static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 320 | static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
323 | static int sbmac_close(struct net_device *dev); | 321 | static int sbmac_close(struct net_device *dev); |
@@ -1190,6 +1188,7 @@ static void sbmac_netpoll(struct net_device *netdev) | |||
1190 | static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, | 1188 | static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, |
1191 | int work_to_do, int poll) | 1189 | int work_to_do, int poll) |
1192 | { | 1190 | { |
1191 | struct net_device *dev = sc->sbm_dev; | ||
1193 | int curidx; | 1192 | int curidx; |
1194 | int hwidx; | 1193 | int hwidx; |
1195 | sbdmadscr_t *dsc; | 1194 | sbdmadscr_t *dsc; |
@@ -1202,7 +1201,7 @@ static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, | |||
1202 | 1201 | ||
1203 | again: | 1202 | again: |
1204 | /* Check if the HW dropped any frames */ | 1203 | /* Check if the HW dropped any frames */ |
1205 | sc->sbm_stats.rx_fifo_errors | 1204 | dev->stats.rx_fifo_errors |
1206 | += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff; | 1205 | += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff; |
1207 | __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost); | 1206 | __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost); |
1208 | 1207 | ||
@@ -1261,7 +1260,7 @@ again: | |||
1261 | 1260 | ||
1262 | if (unlikely (sbdma_add_rcvbuffer(d,NULL) == | 1261 | if (unlikely (sbdma_add_rcvbuffer(d,NULL) == |
1263 | -ENOBUFS)) { | 1262 | -ENOBUFS)) { |
1264 | sc->sbm_stats.rx_dropped++; | 1263 | dev->stats.rx_dropped++; |
1265 | sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ | 1264 | sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ |
1266 | /* No point in continuing at the moment */ | 1265 | /* No point in continuing at the moment */ |
1267 | printk(KERN_ERR "dropped packet (1)\n"); | 1266 | printk(KERN_ERR "dropped packet (1)\n"); |
@@ -1297,13 +1296,13 @@ again: | |||
1297 | dropped = netif_rx(sb); | 1296 | dropped = netif_rx(sb); |
1298 | 1297 | ||
1299 | if (dropped == NET_RX_DROP) { | 1298 | if (dropped == NET_RX_DROP) { |
1300 | sc->sbm_stats.rx_dropped++; | 1299 | dev->stats.rx_dropped++; |
1301 | d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); | 1300 | d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); |
1302 | goto done; | 1301 | goto done; |
1303 | } | 1302 | } |
1304 | else { | 1303 | else { |
1305 | sc->sbm_stats.rx_bytes += len; | 1304 | dev->stats.rx_bytes += len; |
1306 | sc->sbm_stats.rx_packets++; | 1305 | dev->stats.rx_packets++; |
1307 | } | 1306 | } |
1308 | } | 1307 | } |
1309 | } else { | 1308 | } else { |
@@ -1311,7 +1310,7 @@ again: | |||
1311 | * Packet was mangled somehow. Just drop it and | 1310 | * Packet was mangled somehow. Just drop it and |
1312 | * put it back on the receive ring. | 1311 | * put it back on the receive ring. |
1313 | */ | 1312 | */ |
1314 | sc->sbm_stats.rx_errors++; | 1313 | dev->stats.rx_errors++; |
1315 | sbdma_add_rcvbuffer(d,sb); | 1314 | sbdma_add_rcvbuffer(d,sb); |
1316 | } | 1315 | } |
1317 | 1316 | ||
@@ -1351,6 +1350,7 @@ done: | |||
1351 | 1350 | ||
1352 | static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll) | 1351 | static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll) |
1353 | { | 1352 | { |
1353 | struct net_device *dev = sc->sbm_dev; | ||
1354 | int curidx; | 1354 | int curidx; |
1355 | int hwidx; | 1355 | int hwidx; |
1356 | sbdmadscr_t *dsc; | 1356 | sbdmadscr_t *dsc; |
@@ -1401,8 +1401,8 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll) | |||
1401 | * Stats | 1401 | * Stats |
1402 | */ | 1402 | */ |
1403 | 1403 | ||
1404 | sc->sbm_stats.tx_bytes += sb->len; | 1404 | dev->stats.tx_bytes += sb->len; |
1405 | sc->sbm_stats.tx_packets++; | 1405 | dev->stats.tx_packets++; |
1406 | 1406 | ||
1407 | /* | 1407 | /* |
1408 | * for transmits, we just free buffers. | 1408 | * for transmits, we just free buffers. |
@@ -2457,7 +2457,6 @@ static int sbmac_init(struct net_device *dev, int idx) | |||
2457 | dev->open = sbmac_open; | 2457 | dev->open = sbmac_open; |
2458 | dev->hard_start_xmit = sbmac_start_tx; | 2458 | dev->hard_start_xmit = sbmac_start_tx; |
2459 | dev->stop = sbmac_close; | 2459 | dev->stop = sbmac_close; |
2460 | dev->get_stats = sbmac_get_stats; | ||
2461 | dev->set_multicast_list = sbmac_set_rx_mode; | 2460 | dev->set_multicast_list = sbmac_set_rx_mode; |
2462 | dev->do_ioctl = sbmac_mii_ioctl; | 2461 | dev->do_ioctl = sbmac_mii_ioctl; |
2463 | dev->tx_timeout = sbmac_tx_timeout; | 2462 | dev->tx_timeout = sbmac_tx_timeout; |
@@ -2748,7 +2747,7 @@ static void sbmac_tx_timeout (struct net_device *dev) | |||
2748 | 2747 | ||
2749 | 2748 | ||
2750 | dev->trans_start = jiffies; | 2749 | dev->trans_start = jiffies; |
2751 | sc->sbm_stats.tx_errors++; | 2750 | dev->stats.tx_errors++; |
2752 | 2751 | ||
2753 | spin_unlock_irq (&sc->sbm_lock); | 2752 | spin_unlock_irq (&sc->sbm_lock); |
2754 | 2753 | ||
@@ -2758,22 +2757,6 @@ static void sbmac_tx_timeout (struct net_device *dev) | |||
2758 | 2757 | ||
2759 | 2758 | ||
2760 | 2759 | ||
2761 | static struct net_device_stats *sbmac_get_stats(struct net_device *dev) | ||
2762 | { | ||
2763 | struct sbmac_softc *sc = netdev_priv(dev); | ||
2764 | unsigned long flags; | ||
2765 | |||
2766 | spin_lock_irqsave(&sc->sbm_lock, flags); | ||
2767 | |||
2768 | /* XXX update other stats here */ | ||
2769 | |||
2770 | spin_unlock_irqrestore(&sc->sbm_lock, flags); | ||
2771 | |||
2772 | return &sc->sbm_stats; | ||
2773 | } | ||
2774 | |||
2775 | |||
2776 | |||
2777 | static void sbmac_set_rx_mode(struct net_device *dev) | 2760 | static void sbmac_set_rx_mode(struct net_device *dev) |
2778 | { | 2761 | { |
2779 | unsigned long flags; | 2762 | unsigned long flags; |
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index 4bce7c4f373c..8ef94028cba5 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c | |||
@@ -67,7 +67,6 @@ static unsigned int net_debug = NET_DEBUG; | |||
67 | 67 | ||
68 | /* Information that need to be kept for each board. */ | 68 | /* Information that need to be kept for each board. */ |
69 | struct net_local { | 69 | struct net_local { |
70 | struct net_device_stats stats; | ||
71 | unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */ | 70 | unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */ |
72 | long open_time; /* Useless example local info. */ | 71 | long open_time; /* Useless example local info. */ |
73 | }; | 72 | }; |
@@ -86,7 +85,6 @@ static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev); | |||
86 | static irqreturn_t seeq8005_interrupt(int irq, void *dev_id); | 85 | static irqreturn_t seeq8005_interrupt(int irq, void *dev_id); |
87 | static void seeq8005_rx(struct net_device *dev); | 86 | static void seeq8005_rx(struct net_device *dev); |
88 | static int seeq8005_close(struct net_device *dev); | 87 | static int seeq8005_close(struct net_device *dev); |
89 | static struct net_device_stats *seeq8005_get_stats(struct net_device *dev); | ||
90 | static void set_multicast_list(struct net_device *dev); | 88 | static void set_multicast_list(struct net_device *dev); |
91 | 89 | ||
92 | /* Example routines you must write ;->. */ | 90 | /* Example routines you must write ;->. */ |
@@ -338,7 +336,6 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr) | |||
338 | dev->hard_start_xmit = seeq8005_send_packet; | 336 | dev->hard_start_xmit = seeq8005_send_packet; |
339 | dev->tx_timeout = seeq8005_timeout; | 337 | dev->tx_timeout = seeq8005_timeout; |
340 | dev->watchdog_timeo = HZ/20; | 338 | dev->watchdog_timeo = HZ/20; |
341 | dev->get_stats = seeq8005_get_stats; | ||
342 | dev->set_multicast_list = set_multicast_list; | 339 | dev->set_multicast_list = set_multicast_list; |
343 | dev->flags &= ~IFF_MULTICAST; | 340 | dev->flags &= ~IFF_MULTICAST; |
344 | 341 | ||
@@ -391,7 +388,6 @@ static void seeq8005_timeout(struct net_device *dev) | |||
391 | 388 | ||
392 | static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev) | 389 | static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev) |
393 | { | 390 | { |
394 | struct net_local *lp = netdev_priv(dev); | ||
395 | short length = skb->len; | 391 | short length = skb->len; |
396 | unsigned char *buf; | 392 | unsigned char *buf; |
397 | 393 | ||
@@ -407,7 +403,7 @@ static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
407 | 403 | ||
408 | hardware_send_packet(dev, buf, length); | 404 | hardware_send_packet(dev, buf, length); |
409 | dev->trans_start = jiffies; | 405 | dev->trans_start = jiffies; |
410 | lp->stats.tx_bytes += length; | 406 | dev->stats.tx_bytes += length; |
411 | dev_kfree_skb (skb); | 407 | dev_kfree_skb (skb); |
412 | /* You might need to clean up and record Tx statistics here. */ | 408 | /* You might need to clean up and record Tx statistics here. */ |
413 | 409 | ||
@@ -463,7 +459,7 @@ static irqreturn_t seeq8005_interrupt(int irq, void *dev_id) | |||
463 | if (status & SEEQSTAT_TX_INT) { | 459 | if (status & SEEQSTAT_TX_INT) { |
464 | handled = 1; | 460 | handled = 1; |
465 | outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); | 461 | outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); |
466 | lp->stats.tx_packets++; | 462 | dev->stats.tx_packets++; |
467 | netif_wake_queue(dev); /* Inform upper layers. */ | 463 | netif_wake_queue(dev); /* Inform upper layers. */ |
468 | } | 464 | } |
469 | if (status & SEEQSTAT_RX_INT) { | 465 | if (status & SEEQSTAT_RX_INT) { |
@@ -531,11 +527,11 @@ static void seeq8005_rx(struct net_device *dev) | |||
531 | } | 527 | } |
532 | 528 | ||
533 | if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */ | 529 | if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */ |
534 | lp->stats.rx_errors++; | 530 | dev->stats.rx_errors++; |
535 | if (pkt_hdr & SEEQPKTS_SHORT) lp->stats.rx_frame_errors++; | 531 | if (pkt_hdr & SEEQPKTS_SHORT) dev->stats.rx_frame_errors++; |
536 | if (pkt_hdr & SEEQPKTS_DRIB) lp->stats.rx_frame_errors++; | 532 | if (pkt_hdr & SEEQPKTS_DRIB) dev->stats.rx_frame_errors++; |
537 | if (pkt_hdr & SEEQPKTS_OVERSIZE) lp->stats.rx_over_errors++; | 533 | if (pkt_hdr & SEEQPKTS_OVERSIZE) dev->stats.rx_over_errors++; |
538 | if (pkt_hdr & SEEQPKTS_CRC_ERR) lp->stats.rx_crc_errors++; | 534 | if (pkt_hdr & SEEQPKTS_CRC_ERR) dev->stats.rx_crc_errors++; |
539 | /* skip over this packet */ | 535 | /* skip over this packet */ |
540 | outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); | 536 | outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); |
541 | outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA); | 537 | outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA); |
@@ -547,7 +543,7 @@ static void seeq8005_rx(struct net_device *dev) | |||
547 | skb = dev_alloc_skb(pkt_len); | 543 | skb = dev_alloc_skb(pkt_len); |
548 | if (skb == NULL) { | 544 | if (skb == NULL) { |
549 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); | 545 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); |
550 | lp->stats.rx_dropped++; | 546 | dev->stats.rx_dropped++; |
551 | break; | 547 | break; |
552 | } | 548 | } |
553 | skb_reserve(skb, 2); /* align data on 16 byte */ | 549 | skb_reserve(skb, 2); /* align data on 16 byte */ |
@@ -567,8 +563,8 @@ static void seeq8005_rx(struct net_device *dev) | |||
567 | skb->protocol=eth_type_trans(skb,dev); | 563 | skb->protocol=eth_type_trans(skb,dev); |
568 | netif_rx(skb); | 564 | netif_rx(skb); |
569 | dev->last_rx = jiffies; | 565 | dev->last_rx = jiffies; |
570 | lp->stats.rx_packets++; | 566 | dev->stats.rx_packets++; |
571 | lp->stats.rx_bytes += pkt_len; | 567 | dev->stats.rx_bytes += pkt_len; |
572 | } | 568 | } |
573 | } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN)); | 569 | } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN)); |
574 | 570 | ||
@@ -599,15 +595,6 @@ static int seeq8005_close(struct net_device *dev) | |||
599 | 595 | ||
600 | } | 596 | } |
601 | 597 | ||
602 | /* Get the current statistics. This may be called with the card open or | ||
603 | closed. */ | ||
604 | static struct net_device_stats *seeq8005_get_stats(struct net_device *dev) | ||
605 | { | ||
606 | struct net_local *lp = netdev_priv(dev); | ||
607 | |||
608 | return &lp->stats; | ||
609 | } | ||
610 | |||
611 | /* Set or clear the multicast filter for this adaptor. | 598 | /* Set or clear the multicast filter for this adaptor. |
612 | num_addrs == -1 Promiscuous mode, receive all packets | 599 | num_addrs == -1 Promiscuous mode, receive all packets |
613 | num_addrs == 0 Normal mode, clear multicast list | 600 | num_addrs == 0 Normal mode, clear multicast list |
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index eb67b024e413..5189ef066884 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c | |||
@@ -93,8 +93,6 @@ struct sgiseeq_private { | |||
93 | unsigned char control; | 93 | unsigned char control; |
94 | unsigned char mode; | 94 | unsigned char mode; |
95 | 95 | ||
96 | struct net_device_stats stats; | ||
97 | |||
98 | spinlock_t tx_lock; | 96 | spinlock_t tx_lock; |
99 | }; | 97 | }; |
100 | 98 | ||
@@ -267,18 +265,17 @@ static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, | |||
267 | return 0; | 265 | return 0; |
268 | } | 266 | } |
269 | 267 | ||
270 | static inline void record_rx_errors(struct sgiseeq_private *sp, | 268 | static void record_rx_errors(struct net_device *dev, unsigned char status) |
271 | unsigned char status) | ||
272 | { | 269 | { |
273 | if (status & SEEQ_RSTAT_OVERF || | 270 | if (status & SEEQ_RSTAT_OVERF || |
274 | status & SEEQ_RSTAT_SFRAME) | 271 | status & SEEQ_RSTAT_SFRAME) |
275 | sp->stats.rx_over_errors++; | 272 | dev->stats.rx_over_errors++; |
276 | if (status & SEEQ_RSTAT_CERROR) | 273 | if (status & SEEQ_RSTAT_CERROR) |
277 | sp->stats.rx_crc_errors++; | 274 | dev->stats.rx_crc_errors++; |
278 | if (status & SEEQ_RSTAT_DERROR) | 275 | if (status & SEEQ_RSTAT_DERROR) |
279 | sp->stats.rx_frame_errors++; | 276 | dev->stats.rx_frame_errors++; |
280 | if (status & SEEQ_RSTAT_REOF) | 277 | if (status & SEEQ_RSTAT_REOF) |
281 | sp->stats.rx_errors++; | 278 | dev->stats.rx_errors++; |
282 | } | 279 | } |
283 | 280 | ||
284 | static inline void rx_maybe_restart(struct sgiseeq_private *sp, | 281 | static inline void rx_maybe_restart(struct sgiseeq_private *sp, |
@@ -328,8 +325,8 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp | |||
328 | if (memcmp(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN)) { | 325 | if (memcmp(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN)) { |
329 | netif_rx(skb); | 326 | netif_rx(skb); |
330 | dev->last_rx = jiffies; | 327 | dev->last_rx = jiffies; |
331 | sp->stats.rx_packets++; | 328 | dev->stats.rx_packets++; |
332 | sp->stats.rx_bytes += len; | 329 | dev->stats.rx_bytes += len; |
333 | } else { | 330 | } else { |
334 | /* Silently drop my own packets */ | 331 | /* Silently drop my own packets */ |
335 | dev_kfree_skb_irq(skb); | 332 | dev_kfree_skb_irq(skb); |
@@ -337,10 +334,10 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp | |||
337 | } else { | 334 | } else { |
338 | printk (KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", | 335 | printk (KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", |
339 | dev->name); | 336 | dev->name); |
340 | sp->stats.rx_dropped++; | 337 | dev->stats.rx_dropped++; |
341 | } | 338 | } |
342 | } else { | 339 | } else { |
343 | record_rx_errors(sp, pkt_status); | 340 | record_rx_errors(dev, pkt_status); |
344 | } | 341 | } |
345 | 342 | ||
346 | /* Return the entry to the ring pool. */ | 343 | /* Return the entry to the ring pool. */ |
@@ -392,11 +389,11 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp | |||
392 | if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { | 389 | if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { |
393 | /* Oops, HPC detected some sort of error. */ | 390 | /* Oops, HPC detected some sort of error. */ |
394 | if (status & SEEQ_TSTAT_R16) | 391 | if (status & SEEQ_TSTAT_R16) |
395 | sp->stats.tx_aborted_errors++; | 392 | dev->stats.tx_aborted_errors++; |
396 | if (status & SEEQ_TSTAT_UFLOW) | 393 | if (status & SEEQ_TSTAT_UFLOW) |
397 | sp->stats.tx_fifo_errors++; | 394 | dev->stats.tx_fifo_errors++; |
398 | if (status & SEEQ_TSTAT_LCLS) | 395 | if (status & SEEQ_TSTAT_LCLS) |
399 | sp->stats.collisions++; | 396 | dev->stats.collisions++; |
400 | } | 397 | } |
401 | 398 | ||
402 | /* Ack 'em... */ | 399 | /* Ack 'em... */ |
@@ -412,7 +409,7 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp | |||
412 | } | 409 | } |
413 | break; | 410 | break; |
414 | } | 411 | } |
415 | sp->stats.tx_packets++; | 412 | dev->stats.tx_packets++; |
416 | sp->tx_old = NEXT_TX(sp->tx_old); | 413 | sp->tx_old = NEXT_TX(sp->tx_old); |
417 | td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); | 414 | td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); |
418 | td->tdma.cntinfo |= HPCDMA_EOX; | 415 | td->tdma.cntinfo |= HPCDMA_EOX; |
@@ -516,7 +513,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
516 | /* Setup... */ | 513 | /* Setup... */ |
517 | skblen = skb->len; | 514 | skblen = skb->len; |
518 | len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; | 515 | len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; |
519 | sp->stats.tx_bytes += len; | 516 | dev->stats.tx_bytes += len; |
520 | entry = sp->tx_new; | 517 | entry = sp->tx_new; |
521 | td = &sp->tx_desc[entry]; | 518 | td = &sp->tx_desc[entry]; |
522 | 519 | ||
@@ -569,13 +566,6 @@ static void timeout(struct net_device *dev) | |||
569 | netif_wake_queue(dev); | 566 | netif_wake_queue(dev); |
570 | } | 567 | } |
571 | 568 | ||
572 | static struct net_device_stats *sgiseeq_get_stats(struct net_device *dev) | ||
573 | { | ||
574 | struct sgiseeq_private *sp = netdev_priv(dev); | ||
575 | |||
576 | return &sp->stats; | ||
577 | } | ||
578 | |||
579 | static void sgiseeq_set_multicast(struct net_device *dev) | 569 | static void sgiseeq_set_multicast(struct net_device *dev) |
580 | { | 570 | { |
581 | struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv; | 571 | struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv; |
@@ -694,7 +684,6 @@ static int __init sgiseeq_probe(struct platform_device *pdev) | |||
694 | dev->hard_start_xmit = sgiseeq_start_xmit; | 684 | dev->hard_start_xmit = sgiseeq_start_xmit; |
695 | dev->tx_timeout = timeout; | 685 | dev->tx_timeout = timeout; |
696 | dev->watchdog_timeo = (200 * HZ) / 1000; | 686 | dev->watchdog_timeo = (200 * HZ) / 1000; |
697 | dev->get_stats = sgiseeq_get_stats; | ||
698 | dev->set_multicast_list = sgiseeq_set_multicast; | 687 | dev->set_multicast_list = sgiseeq_set_multicast; |
699 | dev->set_mac_address = sgiseeq_set_mac_address; | 688 | dev->set_mac_address = sgiseeq_set_mac_address; |
700 | dev->irq = irq; | 689 | dev->irq = irq; |
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c index b56721a68a85..315feba7dacc 100644 --- a/drivers/net/shaper.c +++ b/drivers/net/shaper.c | |||
@@ -171,7 +171,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
171 | */ | 171 | */ |
172 | if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) { | 172 | if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) { |
173 | dev_kfree_skb(skb); | 173 | dev_kfree_skb(skb); |
174 | shaper->stats.tx_dropped++; | 174 | dev->stats.tx_dropped++; |
175 | } else | 175 | } else |
176 | skb_queue_tail(&shaper->sendq, skb); | 176 | skb_queue_tail(&shaper->sendq, skb); |
177 | } | 177 | } |
@@ -182,7 +182,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
182 | { | 182 | { |
183 | ptr=skb_dequeue(&shaper->sendq); | 183 | ptr=skb_dequeue(&shaper->sendq); |
184 | dev_kfree_skb(ptr); | 184 | dev_kfree_skb(ptr); |
185 | shaper->stats.collisions++; | 185 | dev->stats.collisions++; |
186 | } | 186 | } |
187 | shaper_kick(shaper); | 187 | shaper_kick(shaper); |
188 | spin_unlock(&shaper->lock); | 188 | spin_unlock(&shaper->lock); |
@@ -207,8 +207,8 @@ static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb) | |||
207 | shaper->dev->name,newskb->priority); | 207 | shaper->dev->name,newskb->priority); |
208 | dev_queue_xmit(newskb); | 208 | dev_queue_xmit(newskb); |
209 | 209 | ||
210 | shaper->stats.tx_bytes += skb->len; | 210 | shaper->dev->stats.tx_bytes += skb->len; |
211 | shaper->stats.tx_packets++; | 211 | shaper->dev->stats.tx_packets++; |
212 | 212 | ||
213 | if(sh_debug) | 213 | if(sh_debug) |
214 | printk("Kicked new frame out.\n"); | 214 | printk("Kicked new frame out.\n"); |
@@ -330,12 +330,6 @@ static int shaper_close(struct net_device *dev) | |||
330 | * ARP and other resolutions and not before. | 330 | * ARP and other resolutions and not before. |
331 | */ | 331 | */ |
332 | 332 | ||
333 | static struct net_device_stats *shaper_get_stats(struct net_device *dev) | ||
334 | { | ||
335 | struct shaper *sh=dev->priv; | ||
336 | return &sh->stats; | ||
337 | } | ||
338 | |||
339 | static int shaper_header(struct sk_buff *skb, struct net_device *dev, | 333 | static int shaper_header(struct sk_buff *skb, struct net_device *dev, |
340 | unsigned short type, void *daddr, void *saddr, unsigned len) | 334 | unsigned short type, void *daddr, void *saddr, unsigned len) |
341 | { | 335 | { |
@@ -538,7 +532,6 @@ static void __init shaper_setup(struct net_device *dev) | |||
538 | dev->open = shaper_open; | 532 | dev->open = shaper_open; |
539 | dev->stop = shaper_close; | 533 | dev->stop = shaper_close; |
540 | dev->hard_start_xmit = shaper_start_xmit; | 534 | dev->hard_start_xmit = shaper_start_xmit; |
541 | dev->get_stats = shaper_get_stats; | ||
542 | dev->set_multicast_list = NULL; | 535 | dev->set_multicast_list = NULL; |
543 | 536 | ||
544 | /* | 537 | /* |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index e810ae942cd6..808141b46585 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -270,7 +270,6 @@ struct sis190_private { | |||
270 | void __iomem *mmio_addr; | 270 | void __iomem *mmio_addr; |
271 | struct pci_dev *pci_dev; | 271 | struct pci_dev *pci_dev; |
272 | struct net_device *dev; | 272 | struct net_device *dev; |
273 | struct net_device_stats stats; | ||
274 | spinlock_t lock; | 273 | spinlock_t lock; |
275 | u32 rx_buf_sz; | 274 | u32 rx_buf_sz; |
276 | u32 cur_rx; | 275 | u32 cur_rx; |
@@ -569,7 +568,7 @@ static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) | |||
569 | static int sis190_rx_interrupt(struct net_device *dev, | 568 | static int sis190_rx_interrupt(struct net_device *dev, |
570 | struct sis190_private *tp, void __iomem *ioaddr) | 569 | struct sis190_private *tp, void __iomem *ioaddr) |
571 | { | 570 | { |
572 | struct net_device_stats *stats = &tp->stats; | 571 | struct net_device_stats *stats = &dev->stats; |
573 | u32 rx_left, cur_rx = tp->cur_rx; | 572 | u32 rx_left, cur_rx = tp->cur_rx; |
574 | u32 delta, count; | 573 | u32 delta, count; |
575 | 574 | ||
@@ -683,8 +682,8 @@ static void sis190_tx_interrupt(struct net_device *dev, | |||
683 | 682 | ||
684 | skb = tp->Tx_skbuff[entry]; | 683 | skb = tp->Tx_skbuff[entry]; |
685 | 684 | ||
686 | tp->stats.tx_packets++; | 685 | dev->stats.tx_packets++; |
687 | tp->stats.tx_bytes += skb->len; | 686 | dev->stats.tx_bytes += skb->len; |
688 | 687 | ||
689 | sis190_unmap_tx_skb(tp->pci_dev, skb, txd); | 688 | sis190_unmap_tx_skb(tp->pci_dev, skb, txd); |
690 | tp->Tx_skbuff[entry] = NULL; | 689 | tp->Tx_skbuff[entry] = NULL; |
@@ -1080,7 +1079,7 @@ static void sis190_tx_clear(struct sis190_private *tp) | |||
1080 | tp->Tx_skbuff[i] = NULL; | 1079 | tp->Tx_skbuff[i] = NULL; |
1081 | dev_kfree_skb(skb); | 1080 | dev_kfree_skb(skb); |
1082 | 1081 | ||
1083 | tp->stats.tx_dropped++; | 1082 | tp->dev->stats.tx_dropped++; |
1084 | } | 1083 | } |
1085 | tp->cur_tx = tp->dirty_tx = 0; | 1084 | tp->cur_tx = tp->dirty_tx = 0; |
1086 | } | 1085 | } |
@@ -1143,7 +1142,7 @@ static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1143 | 1142 | ||
1144 | if (unlikely(skb->len < ETH_ZLEN)) { | 1143 | if (unlikely(skb->len < ETH_ZLEN)) { |
1145 | if (skb_padto(skb, ETH_ZLEN)) { | 1144 | if (skb_padto(skb, ETH_ZLEN)) { |
1146 | tp->stats.tx_dropped++; | 1145 | dev->stats.tx_dropped++; |
1147 | goto out; | 1146 | goto out; |
1148 | } | 1147 | } |
1149 | len = ETH_ZLEN; | 1148 | len = ETH_ZLEN; |
@@ -1196,13 +1195,6 @@ out: | |||
1196 | return NETDEV_TX_OK; | 1195 | return NETDEV_TX_OK; |
1197 | } | 1196 | } |
1198 | 1197 | ||
1199 | static struct net_device_stats *sis190_get_stats(struct net_device *dev) | ||
1200 | { | ||
1201 | struct sis190_private *tp = netdev_priv(dev); | ||
1202 | |||
1203 | return &tp->stats; | ||
1204 | } | ||
1205 | |||
1206 | static void sis190_free_phy(struct list_head *first_phy) | 1198 | static void sis190_free_phy(struct list_head *first_phy) |
1207 | { | 1199 | { |
1208 | struct sis190_phy *cur, *next; | 1200 | struct sis190_phy *cur, *next; |
@@ -1795,7 +1787,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, | |||
1795 | dev->open = sis190_open; | 1787 | dev->open = sis190_open; |
1796 | dev->stop = sis190_close; | 1788 | dev->stop = sis190_close; |
1797 | dev->do_ioctl = sis190_ioctl; | 1789 | dev->do_ioctl = sis190_ioctl; |
1798 | dev->get_stats = sis190_get_stats; | ||
1799 | dev->tx_timeout = sis190_tx_timeout; | 1790 | dev->tx_timeout = sis190_tx_timeout; |
1800 | dev->watchdog_timeo = SIS190_TX_TIMEOUT; | 1791 | dev->watchdog_timeo = SIS190_TX_TIMEOUT; |
1801 | dev->hard_start_xmit = sis190_start_xmit; | 1792 | dev->hard_start_xmit = sis190_start_xmit; |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index e1930c3ee75d..5da8e671324d 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -158,7 +158,6 @@ typedef struct _BufferDesc { | |||
158 | } BufferDesc; | 158 | } BufferDesc; |
159 | 159 | ||
160 | struct sis900_private { | 160 | struct sis900_private { |
161 | struct net_device_stats stats; | ||
162 | struct pci_dev * pci_dev; | 161 | struct pci_dev * pci_dev; |
163 | 162 | ||
164 | spinlock_t lock; | 163 | spinlock_t lock; |
@@ -221,7 +220,6 @@ static void sis900_finish_xmit (struct net_device *net_dev); | |||
221 | static irqreturn_t sis900_interrupt(int irq, void *dev_instance); | 220 | static irqreturn_t sis900_interrupt(int irq, void *dev_instance); |
222 | static int sis900_close(struct net_device *net_dev); | 221 | static int sis900_close(struct net_device *net_dev); |
223 | static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd); | 222 | static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd); |
224 | static struct net_device_stats *sis900_get_stats(struct net_device *net_dev); | ||
225 | static u16 sis900_mcast_bitnr(u8 *addr, u8 revision); | 223 | static u16 sis900_mcast_bitnr(u8 *addr, u8 revision); |
226 | static void set_rx_mode(struct net_device *net_dev); | 224 | static void set_rx_mode(struct net_device *net_dev); |
227 | static void sis900_reset(struct net_device *net_dev); | 225 | static void sis900_reset(struct net_device *net_dev); |
@@ -466,7 +464,6 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, | |||
466 | net_dev->open = &sis900_open; | 464 | net_dev->open = &sis900_open; |
467 | net_dev->hard_start_xmit = &sis900_start_xmit; | 465 | net_dev->hard_start_xmit = &sis900_start_xmit; |
468 | net_dev->stop = &sis900_close; | 466 | net_dev->stop = &sis900_close; |
469 | net_dev->get_stats = &sis900_get_stats; | ||
470 | net_dev->set_config = &sis900_set_config; | 467 | net_dev->set_config = &sis900_set_config; |
471 | net_dev->set_multicast_list = &set_rx_mode; | 468 | net_dev->set_multicast_list = &set_rx_mode; |
472 | net_dev->do_ioctl = &mii_ioctl; | 469 | net_dev->do_ioctl = &mii_ioctl; |
@@ -1542,7 +1539,7 @@ static void sis900_tx_timeout(struct net_device *net_dev) | |||
1542 | sis_priv->tx_skbuff[i] = NULL; | 1539 | sis_priv->tx_skbuff[i] = NULL; |
1543 | sis_priv->tx_ring[i].cmdsts = 0; | 1540 | sis_priv->tx_ring[i].cmdsts = 0; |
1544 | sis_priv->tx_ring[i].bufptr = 0; | 1541 | sis_priv->tx_ring[i].bufptr = 0; |
1545 | sis_priv->stats.tx_dropped++; | 1542 | net_dev->stats.tx_dropped++; |
1546 | } | 1543 | } |
1547 | } | 1544 | } |
1548 | sis_priv->tx_full = 0; | 1545 | sis_priv->tx_full = 0; |
@@ -1739,15 +1736,15 @@ static int sis900_rx(struct net_device *net_dev) | |||
1739 | printk(KERN_DEBUG "%s: Corrupted packet " | 1736 | printk(KERN_DEBUG "%s: Corrupted packet " |
1740 | "received, buffer status = 0x%8.8x/%d.\n", | 1737 | "received, buffer status = 0x%8.8x/%d.\n", |
1741 | net_dev->name, rx_status, data_size); | 1738 | net_dev->name, rx_status, data_size); |
1742 | sis_priv->stats.rx_errors++; | 1739 | net_dev->stats.rx_errors++; |
1743 | if (rx_status & OVERRUN) | 1740 | if (rx_status & OVERRUN) |
1744 | sis_priv->stats.rx_over_errors++; | 1741 | net_dev->stats.rx_over_errors++; |
1745 | if (rx_status & (TOOLONG|RUNT)) | 1742 | if (rx_status & (TOOLONG|RUNT)) |
1746 | sis_priv->stats.rx_length_errors++; | 1743 | net_dev->stats.rx_length_errors++; |
1747 | if (rx_status & (RXISERR | FAERR)) | 1744 | if (rx_status & (RXISERR | FAERR)) |
1748 | sis_priv->stats.rx_frame_errors++; | 1745 | net_dev->stats.rx_frame_errors++; |
1749 | if (rx_status & CRCERR) | 1746 | if (rx_status & CRCERR) |
1750 | sis_priv->stats.rx_crc_errors++; | 1747 | net_dev->stats.rx_crc_errors++; |
1751 | /* reset buffer descriptor state */ | 1748 | /* reset buffer descriptor state */ |
1752 | sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; | 1749 | sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; |
1753 | } else { | 1750 | } else { |
@@ -1768,7 +1765,7 @@ static int sis900_rx(struct net_device *net_dev) | |||
1768 | * in the rx ring | 1765 | * in the rx ring |
1769 | */ | 1766 | */ |
1770 | skb = sis_priv->rx_skbuff[entry]; | 1767 | skb = sis_priv->rx_skbuff[entry]; |
1771 | sis_priv->stats.rx_dropped++; | 1768 | net_dev->stats.rx_dropped++; |
1772 | goto refill_rx_ring; | 1769 | goto refill_rx_ring; |
1773 | } | 1770 | } |
1774 | 1771 | ||
@@ -1793,10 +1790,10 @@ static int sis900_rx(struct net_device *net_dev) | |||
1793 | 1790 | ||
1794 | /* some network statistics */ | 1791 | /* some network statistics */ |
1795 | if ((rx_status & BCAST) == MCAST) | 1792 | if ((rx_status & BCAST) == MCAST) |
1796 | sis_priv->stats.multicast++; | 1793 | net_dev->stats.multicast++; |
1797 | net_dev->last_rx = jiffies; | 1794 | net_dev->last_rx = jiffies; |
1798 | sis_priv->stats.rx_bytes += rx_size; | 1795 | net_dev->stats.rx_bytes += rx_size; |
1799 | sis_priv->stats.rx_packets++; | 1796 | net_dev->stats.rx_packets++; |
1800 | sis_priv->dirty_rx++; | 1797 | sis_priv->dirty_rx++; |
1801 | refill_rx_ring: | 1798 | refill_rx_ring: |
1802 | sis_priv->rx_skbuff[entry] = skb; | 1799 | sis_priv->rx_skbuff[entry] = skb; |
@@ -1827,7 +1824,7 @@ refill_rx_ring: | |||
1827 | printk(KERN_INFO "%s: Memory squeeze," | 1824 | printk(KERN_INFO "%s: Memory squeeze," |
1828 | "deferring packet.\n", | 1825 | "deferring packet.\n", |
1829 | net_dev->name); | 1826 | net_dev->name); |
1830 | sis_priv->stats.rx_dropped++; | 1827 | net_dev->stats.rx_dropped++; |
1831 | break; | 1828 | break; |
1832 | } | 1829 | } |
1833 | sis_priv->rx_skbuff[entry] = skb; | 1830 | sis_priv->rx_skbuff[entry] = skb; |
@@ -1878,20 +1875,20 @@ static void sis900_finish_xmit (struct net_device *net_dev) | |||
1878 | printk(KERN_DEBUG "%s: Transmit " | 1875 | printk(KERN_DEBUG "%s: Transmit " |
1879 | "error, Tx status %8.8x.\n", | 1876 | "error, Tx status %8.8x.\n", |
1880 | net_dev->name, tx_status); | 1877 | net_dev->name, tx_status); |
1881 | sis_priv->stats.tx_errors++; | 1878 | net_dev->stats.tx_errors++; |
1882 | if (tx_status & UNDERRUN) | 1879 | if (tx_status & UNDERRUN) |
1883 | sis_priv->stats.tx_fifo_errors++; | 1880 | net_dev->stats.tx_fifo_errors++; |
1884 | if (tx_status & ABORT) | 1881 | if (tx_status & ABORT) |
1885 | sis_priv->stats.tx_aborted_errors++; | 1882 | net_dev->stats.tx_aborted_errors++; |
1886 | if (tx_status & NOCARRIER) | 1883 | if (tx_status & NOCARRIER) |
1887 | sis_priv->stats.tx_carrier_errors++; | 1884 | net_dev->stats.tx_carrier_errors++; |
1888 | if (tx_status & OWCOLL) | 1885 | if (tx_status & OWCOLL) |
1889 | sis_priv->stats.tx_window_errors++; | 1886 | net_dev->stats.tx_window_errors++; |
1890 | } else { | 1887 | } else { |
1891 | /* packet successfully transmitted */ | 1888 | /* packet successfully transmitted */ |
1892 | sis_priv->stats.collisions += (tx_status & COLCNT) >> 16; | 1889 | net_dev->stats.collisions += (tx_status & COLCNT) >> 16; |
1893 | sis_priv->stats.tx_bytes += tx_status & DSIZE; | 1890 | net_dev->stats.tx_bytes += tx_status & DSIZE; |
1894 | sis_priv->stats.tx_packets++; | 1891 | net_dev->stats.tx_packets++; |
1895 | } | 1892 | } |
1896 | /* Free the original skb. */ | 1893 | /* Free the original skb. */ |
1897 | skb = sis_priv->tx_skbuff[entry]; | 1894 | skb = sis_priv->tx_skbuff[entry]; |
@@ -2138,21 +2135,6 @@ static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) | |||
2138 | } | 2135 | } |
2139 | 2136 | ||
2140 | /** | 2137 | /** |
2141 | * sis900_get_stats - Get sis900 read/write statistics | ||
2142 | * @net_dev: the net device to get statistics for | ||
2143 | * | ||
2144 | * get tx/rx statistics for sis900 | ||
2145 | */ | ||
2146 | |||
2147 | static struct net_device_stats * | ||
2148 | sis900_get_stats(struct net_device *net_dev) | ||
2149 | { | ||
2150 | struct sis900_private *sis_priv = net_dev->priv; | ||
2151 | |||
2152 | return &sis_priv->stats; | ||
2153 | } | ||
2154 | |||
2155 | /** | ||
2156 | * sis900_set_config - Set media type by net_device.set_config | 2138 | * sis900_set_config - Set media type by net_device.set_config |
2157 | * @dev: the net device for media type change | 2139 | * @dev: the net device for media type change |
2158 | * @map: ifmap passed by ifconfig | 2140 | * @map: ifmap passed by ifconfig |
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index 5f03e44ad135..c0276c04dece 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c | |||
@@ -115,13 +115,6 @@ struct smc911x_local { | |||
115 | */ | 115 | */ |
116 | struct sk_buff *pending_tx_skb; | 116 | struct sk_buff *pending_tx_skb; |
117 | 117 | ||
118 | /* | ||
119 | * these are things that the kernel wants me to keep, so users | ||
120 | * can find out semi-useless statistics of how well the card is | ||
121 | * performing | ||
122 | */ | ||
123 | struct net_device_stats stats; | ||
124 | |||
125 | /* version/revision of the SMC911x chip */ | 118 | /* version/revision of the SMC911x chip */ |
126 | u16 version; | 119 | u16 version; |
127 | u16 revision; | 120 | u16 revision; |
@@ -315,8 +308,8 @@ static void smc911x_reset(struct net_device *dev) | |||
315 | if (lp->pending_tx_skb != NULL) { | 308 | if (lp->pending_tx_skb != NULL) { |
316 | dev_kfree_skb (lp->pending_tx_skb); | 309 | dev_kfree_skb (lp->pending_tx_skb); |
317 | lp->pending_tx_skb = NULL; | 310 | lp->pending_tx_skb = NULL; |
318 | lp->stats.tx_errors++; | 311 | dev->stats.tx_errors++; |
319 | lp->stats.tx_aborted_errors++; | 312 | dev->stats.tx_aborted_errors++; |
320 | } | 313 | } |
321 | } | 314 | } |
322 | 315 | ||
@@ -449,14 +442,14 @@ static inline void smc911x_rcv(struct net_device *dev) | |||
449 | pkt_len = (status & RX_STS_PKT_LEN_) >> 16; | 442 | pkt_len = (status & RX_STS_PKT_LEN_) >> 16; |
450 | if (status & RX_STS_ES_) { | 443 | if (status & RX_STS_ES_) { |
451 | /* Deal with a bad packet */ | 444 | /* Deal with a bad packet */ |
452 | lp->stats.rx_errors++; | 445 | dev->stats.rx_errors++; |
453 | if (status & RX_STS_CRC_ERR_) | 446 | if (status & RX_STS_CRC_ERR_) |
454 | lp->stats.rx_crc_errors++; | 447 | dev->stats.rx_crc_errors++; |
455 | else { | 448 | else { |
456 | if (status & RX_STS_LEN_ERR_) | 449 | if (status & RX_STS_LEN_ERR_) |
457 | lp->stats.rx_length_errors++; | 450 | dev->stats.rx_length_errors++; |
458 | if (status & RX_STS_MCAST_) | 451 | if (status & RX_STS_MCAST_) |
459 | lp->stats.multicast++; | 452 | dev->stats.multicast++; |
460 | } | 453 | } |
461 | /* Remove the bad packet data from the RX FIFO */ | 454 | /* Remove the bad packet data from the RX FIFO */ |
462 | smc911x_drop_pkt(dev); | 455 | smc911x_drop_pkt(dev); |
@@ -467,7 +460,7 @@ static inline void smc911x_rcv(struct net_device *dev) | |||
467 | if (unlikely(skb == NULL)) { | 460 | if (unlikely(skb == NULL)) { |
468 | PRINTK( "%s: Low memory, rcvd packet dropped.\n", | 461 | PRINTK( "%s: Low memory, rcvd packet dropped.\n", |
469 | dev->name); | 462 | dev->name); |
470 | lp->stats.rx_dropped++; | 463 | dev->stats.rx_dropped++; |
471 | smc911x_drop_pkt(dev); | 464 | smc911x_drop_pkt(dev); |
472 | return; | 465 | return; |
473 | } | 466 | } |
@@ -503,8 +496,8 @@ static inline void smc911x_rcv(struct net_device *dev) | |||
503 | dev->last_rx = jiffies; | 496 | dev->last_rx = jiffies; |
504 | skb->protocol = eth_type_trans(skb, dev); | 497 | skb->protocol = eth_type_trans(skb, dev); |
505 | netif_rx(skb); | 498 | netif_rx(skb); |
506 | lp->stats.rx_packets++; | 499 | dev->stats.rx_packets++; |
507 | lp->stats.rx_bytes += pkt_len-4; | 500 | dev->stats.rx_bytes += pkt_len-4; |
508 | #endif | 501 | #endif |
509 | } | 502 | } |
510 | } | 503 | } |
@@ -616,8 +609,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
616 | printk("%s: No Tx free space %d < %d\n", | 609 | printk("%s: No Tx free space %d < %d\n", |
617 | dev->name, free, skb->len); | 610 | dev->name, free, skb->len); |
618 | lp->pending_tx_skb = NULL; | 611 | lp->pending_tx_skb = NULL; |
619 | lp->stats.tx_errors++; | 612 | dev->stats.tx_errors++; |
620 | lp->stats.tx_dropped++; | 613 | dev->stats.tx_dropped++; |
621 | dev_kfree_skb(skb); | 614 | dev_kfree_skb(skb); |
622 | return 0; | 615 | return 0; |
623 | } | 616 | } |
@@ -667,8 +660,8 @@ static void smc911x_tx(struct net_device *dev) | |||
667 | dev->name, | 660 | dev->name, |
668 | (SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16); | 661 | (SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16); |
669 | tx_status = SMC_GET_TX_STS_FIFO(); | 662 | tx_status = SMC_GET_TX_STS_FIFO(); |
670 | lp->stats.tx_packets++; | 663 | dev->stats.tx_packets++; |
671 | lp->stats.tx_bytes+=tx_status>>16; | 664 | dev->stats.tx_bytes+=tx_status>>16; |
672 | DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n", | 665 | DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n", |
673 | dev->name, (tx_status & 0xffff0000) >> 16, | 666 | dev->name, (tx_status & 0xffff0000) >> 16, |
674 | tx_status & 0x0000ffff); | 667 | tx_status & 0x0000ffff); |
@@ -676,22 +669,22 @@ static void smc911x_tx(struct net_device *dev) | |||
676 | * full-duplex mode */ | 669 | * full-duplex mode */ |
677 | if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx && | 670 | if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx && |
678 | !(tx_status & 0x00000306))) { | 671 | !(tx_status & 0x00000306))) { |
679 | lp->stats.tx_errors++; | 672 | dev->stats.tx_errors++; |
680 | } | 673 | } |
681 | if (tx_status & TX_STS_MANY_COLL_) { | 674 | if (tx_status & TX_STS_MANY_COLL_) { |
682 | lp->stats.collisions+=16; | 675 | dev->stats.collisions+=16; |
683 | lp->stats.tx_aborted_errors++; | 676 | dev->stats.tx_aborted_errors++; |
684 | } else { | 677 | } else { |
685 | lp->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; | 678 | dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; |
686 | } | 679 | } |
687 | /* carrier error only has meaning for half-duplex communication */ | 680 | /* carrier error only has meaning for half-duplex communication */ |
688 | if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) && | 681 | if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) && |
689 | !lp->ctl_rfduplx) { | 682 | !lp->ctl_rfduplx) { |
690 | lp->stats.tx_carrier_errors++; | 683 | dev->stats.tx_carrier_errors++; |
691 | } | 684 | } |
692 | if (tx_status & TX_STS_LATE_COLL_) { | 685 | if (tx_status & TX_STS_LATE_COLL_) { |
693 | lp->stats.collisions++; | 686 | dev->stats.collisions++; |
694 | lp->stats.tx_aborted_errors++; | 687 | dev->stats.tx_aborted_errors++; |
695 | } | 688 | } |
696 | } | 689 | } |
697 | } | 690 | } |
@@ -1121,11 +1114,11 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) | |||
1121 | /* Handle various error conditions */ | 1114 | /* Handle various error conditions */ |
1122 | if (status & INT_STS_RXE_) { | 1115 | if (status & INT_STS_RXE_) { |
1123 | SMC_ACK_INT(INT_STS_RXE_); | 1116 | SMC_ACK_INT(INT_STS_RXE_); |
1124 | lp->stats.rx_errors++; | 1117 | dev->stats.rx_errors++; |
1125 | } | 1118 | } |
1126 | if (status & INT_STS_RXDFH_INT_) { | 1119 | if (status & INT_STS_RXDFH_INT_) { |
1127 | SMC_ACK_INT(INT_STS_RXDFH_INT_); | 1120 | SMC_ACK_INT(INT_STS_RXDFH_INT_); |
1128 | lp->stats.rx_dropped+=SMC_GET_RX_DROP(); | 1121 | dev->stats.rx_dropped+=SMC_GET_RX_DROP(); |
1129 | } | 1122 | } |
1130 | /* Undocumented interrupt-what is the right thing to do here? */ | 1123 | /* Undocumented interrupt-what is the right thing to do here? */ |
1131 | if (status & INT_STS_RXDF_INT_) { | 1124 | if (status & INT_STS_RXDF_INT_) { |
@@ -1140,8 +1133,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) | |||
1140 | cr &= ~MAC_CR_RXEN_; | 1133 | cr &= ~MAC_CR_RXEN_; |
1141 | SMC_SET_MAC_CR(cr); | 1134 | SMC_SET_MAC_CR(cr); |
1142 | DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); | 1135 | DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); |
1143 | lp->stats.rx_errors++; | 1136 | dev->stats.rx_errors++; |
1144 | lp->stats.rx_fifo_errors++; | 1137 | dev->stats.rx_fifo_errors++; |
1145 | } | 1138 | } |
1146 | SMC_ACK_INT(INT_STS_RDFL_); | 1139 | SMC_ACK_INT(INT_STS_RDFL_); |
1147 | } | 1140 | } |
@@ -1152,8 +1145,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) | |||
1152 | SMC_SET_MAC_CR(cr); | 1145 | SMC_SET_MAC_CR(cr); |
1153 | rx_overrun=1; | 1146 | rx_overrun=1; |
1154 | DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); | 1147 | DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); |
1155 | lp->stats.rx_errors++; | 1148 | dev->stats.rx_errors++; |
1156 | lp->stats.rx_fifo_errors++; | 1149 | dev->stats.rx_fifo_errors++; |
1157 | } | 1150 | } |
1158 | SMC_ACK_INT(INT_STS_RDFO_); | 1151 | SMC_ACK_INT(INT_STS_RDFO_); |
1159 | } | 1152 | } |
@@ -1307,8 +1300,8 @@ smc911x_rx_dma_irq(int dma, void *data) | |||
1307 | dev->last_rx = jiffies; | 1300 | dev->last_rx = jiffies; |
1308 | skb->protocol = eth_type_trans(skb, dev); | 1301 | skb->protocol = eth_type_trans(skb, dev); |
1309 | netif_rx(skb); | 1302 | netif_rx(skb); |
1310 | lp->stats.rx_packets++; | 1303 | dev->stats.rx_packets++; |
1311 | lp->stats.rx_bytes += skb->len; | 1304 | dev->stats.rx_bytes += skb->len; |
1312 | 1305 | ||
1313 | spin_lock_irqsave(&lp->lock, flags); | 1306 | spin_lock_irqsave(&lp->lock, flags); |
1314 | pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16; | 1307 | pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16; |
@@ -1568,19 +1561,6 @@ static int smc911x_close(struct net_device *dev) | |||
1568 | } | 1561 | } |
1569 | 1562 | ||
1570 | /* | 1563 | /* |
1571 | * Get the current statistics. | ||
1572 | * This may be called with the card open or closed. | ||
1573 | */ | ||
1574 | static struct net_device_stats *smc911x_query_statistics(struct net_device *dev) | ||
1575 | { | ||
1576 | struct smc911x_local *lp = netdev_priv(dev); | ||
1577 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1578 | |||
1579 | |||
1580 | return &lp->stats; | ||
1581 | } | ||
1582 | |||
1583 | /* | ||
1584 | * Ethtool support | 1564 | * Ethtool support |
1585 | */ | 1565 | */ |
1586 | static int | 1566 | static int |
@@ -2056,7 +2036,6 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr) | |||
2056 | dev->hard_start_xmit = smc911x_hard_start_xmit; | 2036 | dev->hard_start_xmit = smc911x_hard_start_xmit; |
2057 | dev->tx_timeout = smc911x_timeout; | 2037 | dev->tx_timeout = smc911x_timeout; |
2058 | dev->watchdog_timeo = msecs_to_jiffies(watchdog); | 2038 | dev->watchdog_timeo = msecs_to_jiffies(watchdog); |
2059 | dev->get_stats = smc911x_query_statistics; | ||
2060 | dev->set_multicast_list = smc911x_set_multicast_list; | 2039 | dev->set_multicast_list = smc911x_set_multicast_list; |
2061 | dev->ethtool_ops = &smc911x_ethtool_ops; | 2040 | dev->ethtool_ops = &smc911x_ethtool_ops; |
2062 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2041 | #ifdef CONFIG_NET_POLL_CONTROLLER |
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c index 0a79516d196e..5b6748e3ea0e 100644 --- a/drivers/net/smc9194.c +++ b/drivers/net/smc9194.c | |||
@@ -191,13 +191,6 @@ static struct devlist smc_devlist[] __initdata = { | |||
191 | /* store this information for the driver.. */ | 191 | /* store this information for the driver.. */ |
192 | struct smc_local { | 192 | struct smc_local { |
193 | /* | 193 | /* |
194 | these are things that the kernel wants me to keep, so users | ||
195 | can find out semi-useless statistics of how well the card is | ||
196 | performing | ||
197 | */ | ||
198 | struct net_device_stats stats; | ||
199 | |||
200 | /* | ||
201 | If I have to wait until memory is available to send | 194 | If I have to wait until memory is available to send |
202 | a packet, I will store the skbuff here, until I get the | 195 | a packet, I will store the skbuff here, until I get the |
203 | desired memory. Then, I'll send it out and free it. | 196 | desired memory. Then, I'll send it out and free it. |
@@ -249,12 +242,6 @@ static void smc_timeout(struct net_device *dev); | |||
249 | static int smc_close(struct net_device *dev); | 242 | static int smc_close(struct net_device *dev); |
250 | 243 | ||
251 | /* | 244 | /* |
252 | . This routine allows the proc file system to query the driver's | ||
253 | . statistics. | ||
254 | */ | ||
255 | static struct net_device_stats * smc_query_statistics( struct net_device *dev); | ||
256 | |||
257 | /* | ||
258 | . Finally, a call to set promiscuous mode ( for TCPDUMP and related | 245 | . Finally, a call to set promiscuous mode ( for TCPDUMP and related |
259 | . programs ) and multicast modes. | 246 | . programs ) and multicast modes. |
260 | */ | 247 | */ |
@@ -514,7 +501,7 @@ static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * de | |||
514 | 501 | ||
515 | if ( lp->saved_skb) { | 502 | if ( lp->saved_skb) { |
516 | /* THIS SHOULD NEVER HAPPEN. */ | 503 | /* THIS SHOULD NEVER HAPPEN. */ |
517 | lp->stats.tx_aborted_errors++; | 504 | dev->stats.tx_aborted_errors++; |
518 | printk(CARDNAME": Bad Craziness - sent packet while busy.\n" ); | 505 | printk(CARDNAME": Bad Craziness - sent packet while busy.\n" ); |
519 | return 1; | 506 | return 1; |
520 | } | 507 | } |
@@ -1065,7 +1052,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) | |||
1065 | dev->hard_start_xmit = smc_wait_to_send_packet; | 1052 | dev->hard_start_xmit = smc_wait_to_send_packet; |
1066 | dev->tx_timeout = smc_timeout; | 1053 | dev->tx_timeout = smc_timeout; |
1067 | dev->watchdog_timeo = HZ/20; | 1054 | dev->watchdog_timeo = HZ/20; |
1068 | dev->get_stats = smc_query_statistics; | ||
1069 | dev->set_multicast_list = smc_set_multicast_list; | 1055 | dev->set_multicast_list = smc_set_multicast_list; |
1070 | 1056 | ||
1071 | return 0; | 1057 | return 0; |
@@ -1199,7 +1185,6 @@ static void smc_timeout(struct net_device *dev) | |||
1199 | */ | 1185 | */ |
1200 | static void smc_rcv(struct net_device *dev) | 1186 | static void smc_rcv(struct net_device *dev) |
1201 | { | 1187 | { |
1202 | struct smc_local *lp = netdev_priv(dev); | ||
1203 | int ioaddr = dev->base_addr; | 1188 | int ioaddr = dev->base_addr; |
1204 | int packet_number; | 1189 | int packet_number; |
1205 | word status; | 1190 | word status; |
@@ -1243,13 +1228,13 @@ static void smc_rcv(struct net_device *dev) | |||
1243 | 1228 | ||
1244 | /* set multicast stats */ | 1229 | /* set multicast stats */ |
1245 | if ( status & RS_MULTICAST ) | 1230 | if ( status & RS_MULTICAST ) |
1246 | lp->stats.multicast++; | 1231 | dev->stats.multicast++; |
1247 | 1232 | ||
1248 | skb = dev_alloc_skb( packet_length + 5); | 1233 | skb = dev_alloc_skb( packet_length + 5); |
1249 | 1234 | ||
1250 | if ( skb == NULL ) { | 1235 | if ( skb == NULL ) { |
1251 | printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n"); | 1236 | printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n"); |
1252 | lp->stats.rx_dropped++; | 1237 | dev->stats.rx_dropped++; |
1253 | goto done; | 1238 | goto done; |
1254 | } | 1239 | } |
1255 | 1240 | ||
@@ -1289,16 +1274,16 @@ static void smc_rcv(struct net_device *dev) | |||
1289 | skb->protocol = eth_type_trans(skb, dev ); | 1274 | skb->protocol = eth_type_trans(skb, dev ); |
1290 | netif_rx(skb); | 1275 | netif_rx(skb); |
1291 | dev->last_rx = jiffies; | 1276 | dev->last_rx = jiffies; |
1292 | lp->stats.rx_packets++; | 1277 | dev->stats.rx_packets++; |
1293 | lp->stats.rx_bytes += packet_length; | 1278 | dev->stats.rx_bytes += packet_length; |
1294 | } else { | 1279 | } else { |
1295 | /* error ... */ | 1280 | /* error ... */ |
1296 | lp->stats.rx_errors++; | 1281 | dev->stats.rx_errors++; |
1297 | 1282 | ||
1298 | if ( status & RS_ALGNERR ) lp->stats.rx_frame_errors++; | 1283 | if ( status & RS_ALGNERR ) dev->stats.rx_frame_errors++; |
1299 | if ( status & (RS_TOOSHORT | RS_TOOLONG ) ) | 1284 | if ( status & (RS_TOOSHORT | RS_TOOLONG ) ) |
1300 | lp->stats.rx_length_errors++; | 1285 | dev->stats.rx_length_errors++; |
1301 | if ( status & RS_BADCRC) lp->stats.rx_crc_errors++; | 1286 | if ( status & RS_BADCRC) dev->stats.rx_crc_errors++; |
1302 | } | 1287 | } |
1303 | 1288 | ||
1304 | done: | 1289 | done: |
@@ -1346,12 +1331,12 @@ static void smc_tx( struct net_device * dev ) | |||
1346 | tx_status = inw( ioaddr + DATA_1 ); | 1331 | tx_status = inw( ioaddr + DATA_1 ); |
1347 | PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status )); | 1332 | PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status )); |
1348 | 1333 | ||
1349 | lp->stats.tx_errors++; | 1334 | dev->stats.tx_errors++; |
1350 | if ( tx_status & TS_LOSTCAR ) lp->stats.tx_carrier_errors++; | 1335 | if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++; |
1351 | if ( tx_status & TS_LATCOL ) { | 1336 | if ( tx_status & TS_LATCOL ) { |
1352 | printk(KERN_DEBUG CARDNAME | 1337 | printk(KERN_DEBUG CARDNAME |
1353 | ": Late collision occurred on last xmit.\n"); | 1338 | ": Late collision occurred on last xmit.\n"); |
1354 | lp->stats.tx_window_errors++; | 1339 | dev->stats.tx_window_errors++; |
1355 | } | 1340 | } |
1356 | #if 0 | 1341 | #if 0 |
1357 | if ( tx_status & TS_16COL ) { ... } | 1342 | if ( tx_status & TS_16COL ) { ... } |
@@ -1446,10 +1431,10 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) | |||
1446 | SMC_SELECT_BANK( 0 ); | 1431 | SMC_SELECT_BANK( 0 ); |
1447 | card_stats = inw( ioaddr + COUNTER ); | 1432 | card_stats = inw( ioaddr + COUNTER ); |
1448 | /* single collisions */ | 1433 | /* single collisions */ |
1449 | lp->stats.collisions += card_stats & 0xF; | 1434 | dev->stats.collisions += card_stats & 0xF; |
1450 | card_stats >>= 4; | 1435 | card_stats >>= 4; |
1451 | /* multiple collisions */ | 1436 | /* multiple collisions */ |
1452 | lp->stats.collisions += card_stats & 0xF; | 1437 | dev->stats.collisions += card_stats & 0xF; |
1453 | 1438 | ||
1454 | /* these are for when linux supports these statistics */ | 1439 | /* these are for when linux supports these statistics */ |
1455 | 1440 | ||
@@ -1458,7 +1443,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) | |||
1458 | ": TX_BUFFER_EMPTY handled\n")); | 1443 | ": TX_BUFFER_EMPTY handled\n")); |
1459 | outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT ); | 1444 | outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT ); |
1460 | mask &= ~IM_TX_EMPTY_INT; | 1445 | mask &= ~IM_TX_EMPTY_INT; |
1461 | lp->stats.tx_packets += lp->packets_waiting; | 1446 | dev->stats.tx_packets += lp->packets_waiting; |
1462 | lp->packets_waiting = 0; | 1447 | lp->packets_waiting = 0; |
1463 | 1448 | ||
1464 | } else if (status & IM_ALLOC_INT ) { | 1449 | } else if (status & IM_ALLOC_INT ) { |
@@ -1477,8 +1462,8 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) | |||
1477 | 1462 | ||
1478 | PRINTK2((CARDNAME": Handoff done successfully.\n")); | 1463 | PRINTK2((CARDNAME": Handoff done successfully.\n")); |
1479 | } else if (status & IM_RX_OVRN_INT ) { | 1464 | } else if (status & IM_RX_OVRN_INT ) { |
1480 | lp->stats.rx_errors++; | 1465 | dev->stats.rx_errors++; |
1481 | lp->stats.rx_fifo_errors++; | 1466 | dev->stats.rx_fifo_errors++; |
1482 | outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT ); | 1467 | outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT ); |
1483 | } else if (status & IM_EPH_INT ) { | 1468 | } else if (status & IM_EPH_INT ) { |
1484 | PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n")); | 1469 | PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n")); |
@@ -1521,16 +1506,6 @@ static int smc_close(struct net_device *dev) | |||
1521 | return 0; | 1506 | return 0; |
1522 | } | 1507 | } |
1523 | 1508 | ||
1524 | /*------------------------------------------------------------ | ||
1525 | . Get the current statistics. | ||
1526 | . This may be called with the card open or closed. | ||
1527 | .-------------------------------------------------------------*/ | ||
1528 | static struct net_device_stats* smc_query_statistics(struct net_device *dev) { | ||
1529 | struct smc_local *lp = netdev_priv(dev); | ||
1530 | |||
1531 | return &lp->stats; | ||
1532 | } | ||
1533 | |||
1534 | /*----------------------------------------------------------- | 1509 | /*----------------------------------------------------------- |
1535 | . smc_set_multicast_list | 1510 | . smc_set_multicast_list |
1536 | . | 1511 | . |
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index c5837ab5c96b..fe28d277f21a 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -183,13 +183,6 @@ struct smc_local { | |||
183 | struct sk_buff *pending_tx_skb; | 183 | struct sk_buff *pending_tx_skb; |
184 | struct tasklet_struct tx_task; | 184 | struct tasklet_struct tx_task; |
185 | 185 | ||
186 | /* | ||
187 | * these are things that the kernel wants me to keep, so users | ||
188 | * can find out semi-useless statistics of how well the card is | ||
189 | * performing | ||
190 | */ | ||
191 | struct net_device_stats stats; | ||
192 | |||
193 | /* version/revision of the SMC91x chip */ | 186 | /* version/revision of the SMC91x chip */ |
194 | int version; | 187 | int version; |
195 | 188 | ||
@@ -332,8 +325,8 @@ static void smc_reset(struct net_device *dev) | |||
332 | /* free any pending tx skb */ | 325 | /* free any pending tx skb */ |
333 | if (pending_skb) { | 326 | if (pending_skb) { |
334 | dev_kfree_skb(pending_skb); | 327 | dev_kfree_skb(pending_skb); |
335 | lp->stats.tx_errors++; | 328 | dev->stats.tx_errors++; |
336 | lp->stats.tx_aborted_errors++; | 329 | dev->stats.tx_aborted_errors++; |
337 | } | 330 | } |
338 | 331 | ||
339 | /* | 332 | /* |
@@ -512,13 +505,13 @@ static inline void smc_rcv(struct net_device *dev) | |||
512 | } | 505 | } |
513 | SMC_WAIT_MMU_BUSY(); | 506 | SMC_WAIT_MMU_BUSY(); |
514 | SMC_SET_MMU_CMD(MC_RELEASE); | 507 | SMC_SET_MMU_CMD(MC_RELEASE); |
515 | lp->stats.rx_errors++; | 508 | dev->stats.rx_errors++; |
516 | if (status & RS_ALGNERR) | 509 | if (status & RS_ALGNERR) |
517 | lp->stats.rx_frame_errors++; | 510 | dev->stats.rx_frame_errors++; |
518 | if (status & (RS_TOOSHORT | RS_TOOLONG)) | 511 | if (status & (RS_TOOSHORT | RS_TOOLONG)) |
519 | lp->stats.rx_length_errors++; | 512 | dev->stats.rx_length_errors++; |
520 | if (status & RS_BADCRC) | 513 | if (status & RS_BADCRC) |
521 | lp->stats.rx_crc_errors++; | 514 | dev->stats.rx_crc_errors++; |
522 | } else { | 515 | } else { |
523 | struct sk_buff *skb; | 516 | struct sk_buff *skb; |
524 | unsigned char *data; | 517 | unsigned char *data; |
@@ -526,7 +519,7 @@ static inline void smc_rcv(struct net_device *dev) | |||
526 | 519 | ||
527 | /* set multicast stats */ | 520 | /* set multicast stats */ |
528 | if (status & RS_MULTICAST) | 521 | if (status & RS_MULTICAST) |
529 | lp->stats.multicast++; | 522 | dev->stats.multicast++; |
530 | 523 | ||
531 | /* | 524 | /* |
532 | * Actual payload is packet_len - 6 (or 5 if odd byte). | 525 | * Actual payload is packet_len - 6 (or 5 if odd byte). |
@@ -542,7 +535,7 @@ static inline void smc_rcv(struct net_device *dev) | |||
542 | dev->name); | 535 | dev->name); |
543 | SMC_WAIT_MMU_BUSY(); | 536 | SMC_WAIT_MMU_BUSY(); |
544 | SMC_SET_MMU_CMD(MC_RELEASE); | 537 | SMC_SET_MMU_CMD(MC_RELEASE); |
545 | lp->stats.rx_dropped++; | 538 | dev->stats.rx_dropped++; |
546 | return; | 539 | return; |
547 | } | 540 | } |
548 | 541 | ||
@@ -570,8 +563,8 @@ static inline void smc_rcv(struct net_device *dev) | |||
570 | dev->last_rx = jiffies; | 563 | dev->last_rx = jiffies; |
571 | skb->protocol = eth_type_trans(skb, dev); | 564 | skb->protocol = eth_type_trans(skb, dev); |
572 | netif_rx(skb); | 565 | netif_rx(skb); |
573 | lp->stats.rx_packets++; | 566 | dev->stats.rx_packets++; |
574 | lp->stats.rx_bytes += data_len; | 567 | dev->stats.rx_bytes += data_len; |
575 | } | 568 | } |
576 | } | 569 | } |
577 | 570 | ||
@@ -644,8 +637,8 @@ static void smc_hardware_send_pkt(unsigned long data) | |||
644 | packet_no = SMC_GET_AR(); | 637 | packet_no = SMC_GET_AR(); |
645 | if (unlikely(packet_no & AR_FAILED)) { | 638 | if (unlikely(packet_no & AR_FAILED)) { |
646 | printk("%s: Memory allocation failed.\n", dev->name); | 639 | printk("%s: Memory allocation failed.\n", dev->name); |
647 | lp->stats.tx_errors++; | 640 | dev->stats.tx_errors++; |
648 | lp->stats.tx_fifo_errors++; | 641 | dev->stats.tx_fifo_errors++; |
649 | smc_special_unlock(&lp->lock); | 642 | smc_special_unlock(&lp->lock); |
650 | goto done; | 643 | goto done; |
651 | } | 644 | } |
@@ -688,8 +681,8 @@ static void smc_hardware_send_pkt(unsigned long data) | |||
688 | smc_special_unlock(&lp->lock); | 681 | smc_special_unlock(&lp->lock); |
689 | 682 | ||
690 | dev->trans_start = jiffies; | 683 | dev->trans_start = jiffies; |
691 | lp->stats.tx_packets++; | 684 | dev->stats.tx_packets++; |
692 | lp->stats.tx_bytes += len; | 685 | dev->stats.tx_bytes += len; |
693 | 686 | ||
694 | SMC_ENABLE_INT(IM_TX_INT | IM_TX_EMPTY_INT); | 687 | SMC_ENABLE_INT(IM_TX_INT | IM_TX_EMPTY_INT); |
695 | 688 | ||
@@ -729,8 +722,8 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
729 | numPages = ((skb->len & ~1) + (6 - 1)) >> 8; | 722 | numPages = ((skb->len & ~1) + (6 - 1)) >> 8; |
730 | if (unlikely(numPages > 7)) { | 723 | if (unlikely(numPages > 7)) { |
731 | printk("%s: Far too big packet error.\n", dev->name); | 724 | printk("%s: Far too big packet error.\n", dev->name); |
732 | lp->stats.tx_errors++; | 725 | dev->stats.tx_errors++; |
733 | lp->stats.tx_dropped++; | 726 | dev->stats.tx_dropped++; |
734 | dev_kfree_skb(skb); | 727 | dev_kfree_skb(skb); |
735 | return 0; | 728 | return 0; |
736 | } | 729 | } |
@@ -803,17 +796,17 @@ static void smc_tx(struct net_device *dev) | |||
803 | dev->name, tx_status, packet_no); | 796 | dev->name, tx_status, packet_no); |
804 | 797 | ||
805 | if (!(tx_status & ES_TX_SUC)) | 798 | if (!(tx_status & ES_TX_SUC)) |
806 | lp->stats.tx_errors++; | 799 | dev->stats.tx_errors++; |
807 | 800 | ||
808 | if (tx_status & ES_LOSTCARR) | 801 | if (tx_status & ES_LOSTCARR) |
809 | lp->stats.tx_carrier_errors++; | 802 | dev->stats.tx_carrier_errors++; |
810 | 803 | ||
811 | if (tx_status & (ES_LATCOL | ES_16COL)) { | 804 | if (tx_status & (ES_LATCOL | ES_16COL)) { |
812 | PRINTK("%s: %s occurred on last xmit\n", dev->name, | 805 | PRINTK("%s: %s occurred on last xmit\n", dev->name, |
813 | (tx_status & ES_LATCOL) ? | 806 | (tx_status & ES_LATCOL) ? |
814 | "late collision" : "too many collisions"); | 807 | "late collision" : "too many collisions"); |
815 | lp->stats.tx_window_errors++; | 808 | dev->stats.tx_window_errors++; |
816 | if (!(lp->stats.tx_window_errors & 63) && net_ratelimit()) { | 809 | if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) { |
817 | printk(KERN_INFO "%s: unexpectedly large number of " | 810 | printk(KERN_INFO "%s: unexpectedly large number of " |
818 | "bad collisions. Please check duplex " | 811 | "bad collisions. Please check duplex " |
819 | "setting.\n", dev->name); | 812 | "setting.\n", dev->name); |
@@ -1347,19 +1340,19 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) | |||
1347 | SMC_SELECT_BANK(2); | 1340 | SMC_SELECT_BANK(2); |
1348 | 1341 | ||
1349 | /* single collisions */ | 1342 | /* single collisions */ |
1350 | lp->stats.collisions += card_stats & 0xF; | 1343 | dev->stats.collisions += card_stats & 0xF; |
1351 | card_stats >>= 4; | 1344 | card_stats >>= 4; |
1352 | 1345 | ||
1353 | /* multiple collisions */ | 1346 | /* multiple collisions */ |
1354 | lp->stats.collisions += card_stats & 0xF; | 1347 | dev->stats.collisions += card_stats & 0xF; |
1355 | } else if (status & IM_RX_OVRN_INT) { | 1348 | } else if (status & IM_RX_OVRN_INT) { |
1356 | DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name, | 1349 | DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name, |
1357 | ({ int eph_st; SMC_SELECT_BANK(0); | 1350 | ({ int eph_st; SMC_SELECT_BANK(0); |
1358 | eph_st = SMC_GET_EPH_STATUS(); | 1351 | eph_st = SMC_GET_EPH_STATUS(); |
1359 | SMC_SELECT_BANK(2); eph_st; }) ); | 1352 | SMC_SELECT_BANK(2); eph_st; }) ); |
1360 | SMC_ACK_INT(IM_RX_OVRN_INT); | 1353 | SMC_ACK_INT(IM_RX_OVRN_INT); |
1361 | lp->stats.rx_errors++; | 1354 | dev->stats.rx_errors++; |
1362 | lp->stats.rx_fifo_errors++; | 1355 | dev->stats.rx_fifo_errors++; |
1363 | } else if (status & IM_EPH_INT) { | 1356 | } else if (status & IM_EPH_INT) { |
1364 | smc_eph_interrupt(dev); | 1357 | smc_eph_interrupt(dev); |
1365 | } else if (status & IM_MDINT) { | 1358 | } else if (status & IM_MDINT) { |
@@ -1628,19 +1621,6 @@ static int smc_close(struct net_device *dev) | |||
1628 | } | 1621 | } |
1629 | 1622 | ||
1630 | /* | 1623 | /* |
1631 | * Get the current statistics. | ||
1632 | * This may be called with the card open or closed. | ||
1633 | */ | ||
1634 | static struct net_device_stats *smc_query_statistics(struct net_device *dev) | ||
1635 | { | ||
1636 | struct smc_local *lp = netdev_priv(dev); | ||
1637 | |||
1638 | DBG(2, "%s: %s\n", dev->name, __FUNCTION__); | ||
1639 | |||
1640 | return &lp->stats; | ||
1641 | } | ||
1642 | |||
1643 | /* | ||
1644 | * Ethtool support | 1624 | * Ethtool support |
1645 | */ | 1625 | */ |
1646 | static int | 1626 | static int |
@@ -1965,7 +1945,6 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr) | |||
1965 | dev->hard_start_xmit = smc_hard_start_xmit; | 1945 | dev->hard_start_xmit = smc_hard_start_xmit; |
1966 | dev->tx_timeout = smc_timeout; | 1946 | dev->tx_timeout = smc_timeout; |
1967 | dev->watchdog_timeo = msecs_to_jiffies(watchdog); | 1947 | dev->watchdog_timeo = msecs_to_jiffies(watchdog); |
1968 | dev->get_stats = smc_query_statistics; | ||
1969 | dev->set_multicast_list = smc_set_multicast_list; | 1948 | dev->set_multicast_list = smc_set_multicast_list; |
1970 | dev->ethtool_ops = &smc_ethtool_ops; | 1949 | dev->ethtool_ops = &smc_ethtool_ops; |
1971 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1950 | #ifdef CONFIG_NET_POLL_CONTROLLER |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index edc736eb3b86..fab055ffcc90 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -795,6 +795,7 @@ spider_net_set_low_watermark(struct spider_net_card *card) | |||
795 | static int | 795 | static int |
796 | spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | 796 | spider_net_release_tx_chain(struct spider_net_card *card, int brutal) |
797 | { | 797 | { |
798 | struct net_device *dev = card->netdev; | ||
798 | struct spider_net_descr_chain *chain = &card->tx_chain; | 799 | struct spider_net_descr_chain *chain = &card->tx_chain; |
799 | struct spider_net_descr *descr; | 800 | struct spider_net_descr *descr; |
800 | struct spider_net_hw_descr *hwdescr; | 801 | struct spider_net_hw_descr *hwdescr; |
@@ -815,8 +816,8 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | |||
815 | status = spider_net_get_descr_status(hwdescr); | 816 | status = spider_net_get_descr_status(hwdescr); |
816 | switch (status) { | 817 | switch (status) { |
817 | case SPIDER_NET_DESCR_COMPLETE: | 818 | case SPIDER_NET_DESCR_COMPLETE: |
818 | card->netdev_stats.tx_packets++; | 819 | dev->stats.tx_packets++; |
819 | card->netdev_stats.tx_bytes += descr->skb->len; | 820 | dev->stats.tx_bytes += descr->skb->len; |
820 | break; | 821 | break; |
821 | 822 | ||
822 | case SPIDER_NET_DESCR_CARDOWNED: | 823 | case SPIDER_NET_DESCR_CARDOWNED: |
@@ -835,11 +836,11 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | |||
835 | if (netif_msg_tx_err(card)) | 836 | if (netif_msg_tx_err(card)) |
836 | dev_err(&card->netdev->dev, "forcing end of tx descriptor " | 837 | dev_err(&card->netdev->dev, "forcing end of tx descriptor " |
837 | "with status x%02x\n", status); | 838 | "with status x%02x\n", status); |
838 | card->netdev_stats.tx_errors++; | 839 | dev->stats.tx_errors++; |
839 | break; | 840 | break; |
840 | 841 | ||
841 | default: | 842 | default: |
842 | card->netdev_stats.tx_dropped++; | 843 | dev->stats.tx_dropped++; |
843 | if (!brutal) { | 844 | if (!brutal) { |
844 | spin_unlock_irqrestore(&chain->lock, flags); | 845 | spin_unlock_irqrestore(&chain->lock, flags); |
845 | return 1; | 846 | return 1; |
@@ -919,7 +920,7 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
919 | spider_net_release_tx_chain(card, 0); | 920 | spider_net_release_tx_chain(card, 0); |
920 | 921 | ||
921 | if (spider_net_prepare_tx_descr(card, skb) != 0) { | 922 | if (spider_net_prepare_tx_descr(card, skb) != 0) { |
922 | card->netdev_stats.tx_dropped++; | 923 | netdev->stats.tx_dropped++; |
923 | netif_stop_queue(netdev); | 924 | netif_stop_queue(netdev); |
924 | return NETDEV_TX_BUSY; | 925 | return NETDEV_TX_BUSY; |
925 | } | 926 | } |
@@ -979,16 +980,12 @@ static void | |||
979 | spider_net_pass_skb_up(struct spider_net_descr *descr, | 980 | spider_net_pass_skb_up(struct spider_net_descr *descr, |
980 | struct spider_net_card *card) | 981 | struct spider_net_card *card) |
981 | { | 982 | { |
982 | struct spider_net_hw_descr *hwdescr= descr->hwdescr; | 983 | struct spider_net_hw_descr *hwdescr = descr->hwdescr; |
983 | struct sk_buff *skb; | 984 | struct sk_buff *skb = descr->skb; |
984 | struct net_device *netdev; | 985 | struct net_device *netdev = card->netdev; |
985 | u32 data_status, data_error; | 986 | u32 data_status = hwdescr->data_status; |
986 | 987 | u32 data_error = hwdescr->data_error; | |
987 | data_status = hwdescr->data_status; | ||
988 | data_error = hwdescr->data_error; | ||
989 | netdev = card->netdev; | ||
990 | 988 | ||
991 | skb = descr->skb; | ||
992 | skb_put(skb, hwdescr->valid_size); | 989 | skb_put(skb, hwdescr->valid_size); |
993 | 990 | ||
994 | /* the card seems to add 2 bytes of junk in front | 991 | /* the card seems to add 2 bytes of junk in front |
@@ -1015,8 +1012,8 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, | |||
1015 | } | 1012 | } |
1016 | 1013 | ||
1017 | /* update netdevice statistics */ | 1014 | /* update netdevice statistics */ |
1018 | card->netdev_stats.rx_packets++; | 1015 | netdev->stats.rx_packets++; |
1019 | card->netdev_stats.rx_bytes += skb->len; | 1016 | netdev->stats.rx_bytes += skb->len; |
1020 | 1017 | ||
1021 | /* pass skb up to stack */ | 1018 | /* pass skb up to stack */ |
1022 | netif_receive_skb(skb); | 1019 | netif_receive_skb(skb); |
@@ -1184,6 +1181,7 @@ static int spider_net_resync_tail_ptr(struct spider_net_card *card) | |||
1184 | static int | 1181 | static int |
1185 | spider_net_decode_one_descr(struct spider_net_card *card) | 1182 | spider_net_decode_one_descr(struct spider_net_card *card) |
1186 | { | 1183 | { |
1184 | struct net_device *dev = card->netdev; | ||
1187 | struct spider_net_descr_chain *chain = &card->rx_chain; | 1185 | struct spider_net_descr_chain *chain = &card->rx_chain; |
1188 | struct spider_net_descr *descr = chain->tail; | 1186 | struct spider_net_descr *descr = chain->tail; |
1189 | struct spider_net_hw_descr *hwdescr = descr->hwdescr; | 1187 | struct spider_net_hw_descr *hwdescr = descr->hwdescr; |
@@ -1210,9 +1208,9 @@ spider_net_decode_one_descr(struct spider_net_card *card) | |||
1210 | (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || | 1208 | (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || |
1211 | (status == SPIDER_NET_DESCR_FORCE_END) ) { | 1209 | (status == SPIDER_NET_DESCR_FORCE_END) ) { |
1212 | if (netif_msg_rx_err(card)) | 1210 | if (netif_msg_rx_err(card)) |
1213 | dev_err(&card->netdev->dev, | 1211 | dev_err(&dev->dev, |
1214 | "dropping RX descriptor with state %d\n", status); | 1212 | "dropping RX descriptor with state %d\n", status); |
1215 | card->netdev_stats.rx_dropped++; | 1213 | dev->stats.rx_dropped++; |
1216 | goto bad_desc; | 1214 | goto bad_desc; |
1217 | } | 1215 | } |
1218 | 1216 | ||
@@ -1315,20 +1313,6 @@ static int spider_net_poll(struct napi_struct *napi, int budget) | |||
1315 | } | 1313 | } |
1316 | 1314 | ||
1317 | /** | 1315 | /** |
1318 | * spider_net_get_stats - get interface statistics | ||
1319 | * @netdev: interface device structure | ||
1320 | * | ||
1321 | * returns the interface statistics residing in the spider_net_card struct | ||
1322 | */ | ||
1323 | static struct net_device_stats * | ||
1324 | spider_net_get_stats(struct net_device *netdev) | ||
1325 | { | ||
1326 | struct spider_net_card *card = netdev_priv(netdev); | ||
1327 | struct net_device_stats *stats = &card->netdev_stats; | ||
1328 | return stats; | ||
1329 | } | ||
1330 | |||
1331 | /** | ||
1332 | * spider_net_change_mtu - changes the MTU of an interface | 1316 | * spider_net_change_mtu - changes the MTU of an interface |
1333 | * @netdev: interface device structure | 1317 | * @netdev: interface device structure |
1334 | * @new_mtu: new MTU value | 1318 | * @new_mtu: new MTU value |
@@ -2290,7 +2274,6 @@ spider_net_setup_netdev_ops(struct net_device *netdev) | |||
2290 | netdev->open = &spider_net_open; | 2274 | netdev->open = &spider_net_open; |
2291 | netdev->stop = &spider_net_stop; | 2275 | netdev->stop = &spider_net_stop; |
2292 | netdev->hard_start_xmit = &spider_net_xmit; | 2276 | netdev->hard_start_xmit = &spider_net_xmit; |
2293 | netdev->get_stats = &spider_net_get_stats; | ||
2294 | netdev->set_multicast_list = &spider_net_set_multi; | 2277 | netdev->set_multicast_list = &spider_net_set_multi; |
2295 | netdev->set_mac_address = &spider_net_set_mac; | 2278 | netdev->set_mac_address = &spider_net_set_mac; |
2296 | netdev->change_mtu = &spider_net_change_mtu; | 2279 | netdev->change_mtu = &spider_net_change_mtu; |
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index a2fcdebc3790..a897beee7d5d 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h | |||
@@ -487,7 +487,6 @@ struct spider_net_card { | |||
487 | 487 | ||
488 | /* for ethtool */ | 488 | /* for ethtool */ |
489 | int msg_enable; | 489 | int msg_enable; |
490 | struct net_device_stats netdev_stats; | ||
491 | struct spider_net_extra_stats spider_stats; | 490 | struct spider_net_extra_stats spider_stats; |
492 | struct spider_net_options options; | 491 | struct spider_net_options options; |
493 | 492 | ||
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c index c67632dcac49..f8fbc0492706 100644 --- a/drivers/net/sun3lance.c +++ b/drivers/net/sun3lance.c | |||
@@ -152,7 +152,6 @@ struct lance_private { | |||
152 | struct lance_memory *mem; | 152 | struct lance_memory *mem; |
153 | int new_rx, new_tx; /* The next free ring entry */ | 153 | int new_rx, new_tx; /* The next free ring entry */ |
154 | int old_tx, old_rx; /* ring entry to be processed */ | 154 | int old_tx, old_rx; /* ring entry to be processed */ |
155 | struct net_device_stats stats; | ||
156 | /* These two must be longs for set_bit() */ | 155 | /* These two must be longs for set_bit() */ |
157 | long tx_full; | 156 | long tx_full; |
158 | long lock; | 157 | long lock; |
@@ -241,7 +240,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); | |||
241 | static irqreturn_t lance_interrupt( int irq, void *dev_id); | 240 | static irqreturn_t lance_interrupt( int irq, void *dev_id); |
242 | static int lance_rx( struct net_device *dev ); | 241 | static int lance_rx( struct net_device *dev ); |
243 | static int lance_close( struct net_device *dev ); | 242 | static int lance_close( struct net_device *dev ); |
244 | static struct net_device_stats *lance_get_stats( struct net_device *dev ); | ||
245 | static void set_multicast_list( struct net_device *dev ); | 243 | static void set_multicast_list( struct net_device *dev ); |
246 | 244 | ||
247 | /************************* End of Prototypes **************************/ | 245 | /************************* End of Prototypes **************************/ |
@@ -401,15 +399,12 @@ static int __init lance_probe( struct net_device *dev) | |||
401 | dev->open = &lance_open; | 399 | dev->open = &lance_open; |
402 | dev->hard_start_xmit = &lance_start_xmit; | 400 | dev->hard_start_xmit = &lance_start_xmit; |
403 | dev->stop = &lance_close; | 401 | dev->stop = &lance_close; |
404 | dev->get_stats = &lance_get_stats; | ||
405 | dev->set_multicast_list = &set_multicast_list; | 402 | dev->set_multicast_list = &set_multicast_list; |
406 | dev->set_mac_address = NULL; | 403 | dev->set_mac_address = NULL; |
407 | // KLUDGE -- REMOVE ME | 404 | // KLUDGE -- REMOVE ME |
408 | set_bit(__LINK_STATE_PRESENT, &dev->state); | 405 | set_bit(__LINK_STATE_PRESENT, &dev->state); |
409 | 406 | ||
410 | 407 | ||
411 | memset( &lp->stats, 0, sizeof(lp->stats) ); | ||
412 | |||
413 | return 1; | 408 | return 1; |
414 | } | 409 | } |
415 | 410 | ||
@@ -534,7 +529,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) | |||
534 | * little endian mode. | 529 | * little endian mode. |
535 | */ | 530 | */ |
536 | REGA(CSR3) = CSR3_BSWP; | 531 | REGA(CSR3) = CSR3_BSWP; |
537 | lp->stats.tx_errors++; | 532 | dev->stats.tx_errors++; |
538 | 533 | ||
539 | if(lance_debug >= 2) { | 534 | if(lance_debug >= 2) { |
540 | int i; | 535 | int i; |
@@ -634,7 +629,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) | |||
634 | 629 | ||
635 | head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; | 630 | head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; |
636 | lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK; | 631 | lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK; |
637 | lp->stats.tx_bytes += skb->len; | 632 | dev->stats.tx_bytes += skb->len; |
638 | 633 | ||
639 | /* Trigger an immediate send poll. */ | 634 | /* Trigger an immediate send poll. */ |
640 | REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT; | 635 | REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT; |
@@ -712,12 +707,12 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id) | |||
712 | 707 | ||
713 | if (head->flag & TMD1_ERR) { | 708 | if (head->flag & TMD1_ERR) { |
714 | int status = head->misc; | 709 | int status = head->misc; |
715 | lp->stats.tx_errors++; | 710 | dev->stats.tx_errors++; |
716 | if (status & TMD3_RTRY) lp->stats.tx_aborted_errors++; | 711 | if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++; |
717 | if (status & TMD3_LCAR) lp->stats.tx_carrier_errors++; | 712 | if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++; |
718 | if (status & TMD3_LCOL) lp->stats.tx_window_errors++; | 713 | if (status & TMD3_LCOL) dev->stats.tx_window_errors++; |
719 | if (status & (TMD3_UFLO | TMD3_BUFF)) { | 714 | if (status & (TMD3_UFLO | TMD3_BUFF)) { |
720 | lp->stats.tx_fifo_errors++; | 715 | dev->stats.tx_fifo_errors++; |
721 | printk("%s: Tx FIFO error\n", | 716 | printk("%s: Tx FIFO error\n", |
722 | dev->name); | 717 | dev->name); |
723 | REGA(CSR0) = CSR0_STOP; | 718 | REGA(CSR0) = CSR0_STOP; |
@@ -730,9 +725,9 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id) | |||
730 | 725 | ||
731 | head->flag &= ~(TMD1_ENP | TMD1_STP); | 726 | head->flag &= ~(TMD1_ENP | TMD1_STP); |
732 | if(head->flag & (TMD1_ONE | TMD1_MORE)) | 727 | if(head->flag & (TMD1_ONE | TMD1_MORE)) |
733 | lp->stats.collisions++; | 728 | dev->stats.collisions++; |
734 | 729 | ||
735 | lp->stats.tx_packets++; | 730 | dev->stats.tx_packets++; |
736 | DPRINTK(3, ("cleared tx ring %d\n", old_tx)); | 731 | DPRINTK(3, ("cleared tx ring %d\n", old_tx)); |
737 | } | 732 | } |
738 | old_tx = (old_tx +1) & TX_RING_MOD_MASK; | 733 | old_tx = (old_tx +1) & TX_RING_MOD_MASK; |
@@ -752,8 +747,8 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id) | |||
752 | lance_rx( dev ); | 747 | lance_rx( dev ); |
753 | 748 | ||
754 | /* Log misc errors. */ | 749 | /* Log misc errors. */ |
755 | if (csr0 & CSR0_BABL) lp->stats.tx_errors++; /* Tx babble. */ | 750 | if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ |
756 | if (csr0 & CSR0_MISS) lp->stats.rx_errors++; /* Missed a Rx frame. */ | 751 | if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ |
757 | if (csr0 & CSR0_MERR) { | 752 | if (csr0 & CSR0_MERR) { |
758 | DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " | 753 | DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " |
759 | "status %04x.\n", dev->name, csr0 )); | 754 | "status %04x.\n", dev->name, csr0 )); |
@@ -799,11 +794,11 @@ static int lance_rx( struct net_device *dev ) | |||
799 | full-sized buffers it's possible for a jabber packet to use two | 794 | full-sized buffers it's possible for a jabber packet to use two |
800 | buffers, with only the last correctly noting the error. */ | 795 | buffers, with only the last correctly noting the error. */ |
801 | if (status & RMD1_ENP) /* Only count a general error at the */ | 796 | if (status & RMD1_ENP) /* Only count a general error at the */ |
802 | lp->stats.rx_errors++; /* end of a packet.*/ | 797 | dev->stats.rx_errors++; /* end of a packet.*/ |
803 | if (status & RMD1_FRAM) lp->stats.rx_frame_errors++; | 798 | if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; |
804 | if (status & RMD1_OFLO) lp->stats.rx_over_errors++; | 799 | if (status & RMD1_OFLO) dev->stats.rx_over_errors++; |
805 | if (status & RMD1_CRC) lp->stats.rx_crc_errors++; | 800 | if (status & RMD1_CRC) dev->stats.rx_crc_errors++; |
806 | if (status & RMD1_BUFF) lp->stats.rx_fifo_errors++; | 801 | if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; |
807 | head->flag &= (RMD1_ENP|RMD1_STP); | 802 | head->flag &= (RMD1_ENP|RMD1_STP); |
808 | } else { | 803 | } else { |
809 | /* Malloc up new buffer, compatible with net-3. */ | 804 | /* Malloc up new buffer, compatible with net-3. */ |
@@ -813,7 +808,7 @@ static int lance_rx( struct net_device *dev ) | |||
813 | 808 | ||
814 | if (pkt_len < 60) { | 809 | if (pkt_len < 60) { |
815 | printk( "%s: Runt packet!\n", dev->name ); | 810 | printk( "%s: Runt packet!\n", dev->name ); |
816 | lp->stats.rx_errors++; | 811 | dev->stats.rx_errors++; |
817 | } | 812 | } |
818 | else { | 813 | else { |
819 | skb = dev_alloc_skb( pkt_len+2 ); | 814 | skb = dev_alloc_skb( pkt_len+2 ); |
@@ -821,7 +816,7 @@ static int lance_rx( struct net_device *dev ) | |||
821 | DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", | 816 | DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", |
822 | dev->name )); | 817 | dev->name )); |
823 | 818 | ||
824 | lp->stats.rx_dropped++; | 819 | dev->stats.rx_dropped++; |
825 | head->msg_length = 0; | 820 | head->msg_length = 0; |
826 | head->flag |= RMD1_OWN_CHIP; | 821 | head->flag |= RMD1_OWN_CHIP; |
827 | lp->new_rx = (lp->new_rx+1) & | 822 | lp->new_rx = (lp->new_rx+1) & |
@@ -859,8 +854,8 @@ static int lance_rx( struct net_device *dev ) | |||
859 | skb->protocol = eth_type_trans( skb, dev ); | 854 | skb->protocol = eth_type_trans( skb, dev ); |
860 | netif_rx( skb ); | 855 | netif_rx( skb ); |
861 | dev->last_rx = jiffies; | 856 | dev->last_rx = jiffies; |
862 | lp->stats.rx_packets++; | 857 | dev->stats.rx_packets++; |
863 | lp->stats.rx_bytes += pkt_len; | 858 | dev->stats.rx_bytes += pkt_len; |
864 | } | 859 | } |
865 | } | 860 | } |
866 | 861 | ||
@@ -897,14 +892,6 @@ static int lance_close( struct net_device *dev ) | |||
897 | } | 892 | } |
898 | 893 | ||
899 | 894 | ||
900 | static struct net_device_stats *lance_get_stats( struct net_device *dev ) | ||
901 | { | ||
902 | struct lance_private *lp = netdev_priv(dev); | ||
903 | |||
904 | return &lp->stats; | ||
905 | } | ||
906 | |||
907 | |||
908 | /* Set or clear the multicast filter for this adaptor. | 895 | /* Set or clear the multicast filter for this adaptor. |
909 | num_addrs == -1 Promiscuous mode, receive all packets | 896 | num_addrs == -1 Promiscuous mode, receive all packets |
910 | num_addrs == 0 Normal mode, clear multicast list | 897 | num_addrs == 0 Normal mode, clear multicast list |
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 17d66c1185cd..7bf5c90b7749 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c | |||
@@ -248,7 +248,6 @@ struct lance_private { | |||
248 | int rx_new, tx_new; | 248 | int rx_new, tx_new; |
249 | int rx_old, tx_old; | 249 | int rx_old, tx_old; |
250 | 250 | ||
251 | struct net_device_stats stats; | ||
252 | struct sbus_dma *ledma; /* If set this points to ledma */ | 251 | struct sbus_dma *ledma; /* If set this points to ledma */ |
253 | char tpe; /* cable-selection is TPE */ | 252 | char tpe; /* cable-selection is TPE */ |
254 | char auto_select; /* cable-selection by carrier */ | 253 | char auto_select; /* cable-selection by carrier */ |
@@ -519,17 +518,17 @@ static void lance_rx_dvma(struct net_device *dev) | |||
519 | 518 | ||
520 | /* We got an incomplete frame? */ | 519 | /* We got an incomplete frame? */ |
521 | if ((bits & LE_R1_POK) != LE_R1_POK) { | 520 | if ((bits & LE_R1_POK) != LE_R1_POK) { |
522 | lp->stats.rx_over_errors++; | 521 | dev->stats.rx_over_errors++; |
523 | lp->stats.rx_errors++; | 522 | dev->stats.rx_errors++; |
524 | } else if (bits & LE_R1_ERR) { | 523 | } else if (bits & LE_R1_ERR) { |
525 | /* Count only the end frame as a rx error, | 524 | /* Count only the end frame as a rx error, |
526 | * not the beginning | 525 | * not the beginning |
527 | */ | 526 | */ |
528 | if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; | 527 | if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; |
529 | if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; | 528 | if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; |
530 | if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; | 529 | if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; |
531 | if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; | 530 | if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; |
532 | if (bits & LE_R1_EOP) lp->stats.rx_errors++; | 531 | if (bits & LE_R1_EOP) dev->stats.rx_errors++; |
533 | } else { | 532 | } else { |
534 | len = (rd->mblength & 0xfff) - 4; | 533 | len = (rd->mblength & 0xfff) - 4; |
535 | skb = dev_alloc_skb(len + 2); | 534 | skb = dev_alloc_skb(len + 2); |
@@ -537,14 +536,14 @@ static void lance_rx_dvma(struct net_device *dev) | |||
537 | if (skb == NULL) { | 536 | if (skb == NULL) { |
538 | printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", | 537 | printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", |
539 | dev->name); | 538 | dev->name); |
540 | lp->stats.rx_dropped++; | 539 | dev->stats.rx_dropped++; |
541 | rd->mblength = 0; | 540 | rd->mblength = 0; |
542 | rd->rmd1_bits = LE_R1_OWN; | 541 | rd->rmd1_bits = LE_R1_OWN; |
543 | lp->rx_new = RX_NEXT(entry); | 542 | lp->rx_new = RX_NEXT(entry); |
544 | return; | 543 | return; |
545 | } | 544 | } |
546 | 545 | ||
547 | lp->stats.rx_bytes += len; | 546 | dev->stats.rx_bytes += len; |
548 | 547 | ||
549 | skb_reserve(skb, 2); /* 16 byte align */ | 548 | skb_reserve(skb, 2); /* 16 byte align */ |
550 | skb_put(skb, len); /* make room */ | 549 | skb_put(skb, len); /* make room */ |
@@ -554,7 +553,7 @@ static void lance_rx_dvma(struct net_device *dev) | |||
554 | skb->protocol = eth_type_trans(skb, dev); | 553 | skb->protocol = eth_type_trans(skb, dev); |
555 | netif_rx(skb); | 554 | netif_rx(skb); |
556 | dev->last_rx = jiffies; | 555 | dev->last_rx = jiffies; |
557 | lp->stats.rx_packets++; | 556 | dev->stats.rx_packets++; |
558 | } | 557 | } |
559 | 558 | ||
560 | /* Return the packet to the pool */ | 559 | /* Return the packet to the pool */ |
@@ -586,12 +585,12 @@ static void lance_tx_dvma(struct net_device *dev) | |||
586 | if (bits & LE_T1_ERR) { | 585 | if (bits & LE_T1_ERR) { |
587 | u16 status = td->misc; | 586 | u16 status = td->misc; |
588 | 587 | ||
589 | lp->stats.tx_errors++; | 588 | dev->stats.tx_errors++; |
590 | if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; | 589 | if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; |
591 | if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; | 590 | if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; |
592 | 591 | ||
593 | if (status & LE_T3_CLOS) { | 592 | if (status & LE_T3_CLOS) { |
594 | lp->stats.tx_carrier_errors++; | 593 | dev->stats.tx_carrier_errors++; |
595 | if (lp->auto_select) { | 594 | if (lp->auto_select) { |
596 | lp->tpe = 1 - lp->tpe; | 595 | lp->tpe = 1 - lp->tpe; |
597 | printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", | 596 | printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", |
@@ -608,7 +607,7 @@ static void lance_tx_dvma(struct net_device *dev) | |||
608 | * transmitter, restart the adapter. | 607 | * transmitter, restart the adapter. |
609 | */ | 608 | */ |
610 | if (status & (LE_T3_BUF|LE_T3_UFL)) { | 609 | if (status & (LE_T3_BUF|LE_T3_UFL)) { |
611 | lp->stats.tx_fifo_errors++; | 610 | dev->stats.tx_fifo_errors++; |
612 | 611 | ||
613 | printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", | 612 | printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", |
614 | dev->name); | 613 | dev->name); |
@@ -626,13 +625,13 @@ static void lance_tx_dvma(struct net_device *dev) | |||
626 | 625 | ||
627 | /* One collision before packet was sent. */ | 626 | /* One collision before packet was sent. */ |
628 | if (bits & LE_T1_EONE) | 627 | if (bits & LE_T1_EONE) |
629 | lp->stats.collisions++; | 628 | dev->stats.collisions++; |
630 | 629 | ||
631 | /* More than one collision, be optimistic. */ | 630 | /* More than one collision, be optimistic. */ |
632 | if (bits & LE_T1_EMORE) | 631 | if (bits & LE_T1_EMORE) |
633 | lp->stats.collisions += 2; | 632 | dev->stats.collisions += 2; |
634 | 633 | ||
635 | lp->stats.tx_packets++; | 634 | dev->stats.tx_packets++; |
636 | } | 635 | } |
637 | 636 | ||
638 | j = TX_NEXT(j); | 637 | j = TX_NEXT(j); |
@@ -692,17 +691,17 @@ static void lance_rx_pio(struct net_device *dev) | |||
692 | 691 | ||
693 | /* We got an incomplete frame? */ | 692 | /* We got an incomplete frame? */ |
694 | if ((bits & LE_R1_POK) != LE_R1_POK) { | 693 | if ((bits & LE_R1_POK) != LE_R1_POK) { |
695 | lp->stats.rx_over_errors++; | 694 | dev->stats.rx_over_errors++; |
696 | lp->stats.rx_errors++; | 695 | dev->stats.rx_errors++; |
697 | } else if (bits & LE_R1_ERR) { | 696 | } else if (bits & LE_R1_ERR) { |
698 | /* Count only the end frame as a rx error, | 697 | /* Count only the end frame as a rx error, |
699 | * not the beginning | 698 | * not the beginning |
700 | */ | 699 | */ |
701 | if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; | 700 | if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; |
702 | if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; | 701 | if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; |
703 | if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; | 702 | if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; |
704 | if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; | 703 | if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; |
705 | if (bits & LE_R1_EOP) lp->stats.rx_errors++; | 704 | if (bits & LE_R1_EOP) dev->stats.rx_errors++; |
706 | } else { | 705 | } else { |
707 | len = (sbus_readw(&rd->mblength) & 0xfff) - 4; | 706 | len = (sbus_readw(&rd->mblength) & 0xfff) - 4; |
708 | skb = dev_alloc_skb(len + 2); | 707 | skb = dev_alloc_skb(len + 2); |
@@ -710,14 +709,14 @@ static void lance_rx_pio(struct net_device *dev) | |||
710 | if (skb == NULL) { | 709 | if (skb == NULL) { |
711 | printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", | 710 | printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", |
712 | dev->name); | 711 | dev->name); |
713 | lp->stats.rx_dropped++; | 712 | dev->stats.rx_dropped++; |
714 | sbus_writew(0, &rd->mblength); | 713 | sbus_writew(0, &rd->mblength); |
715 | sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); | 714 | sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); |
716 | lp->rx_new = RX_NEXT(entry); | 715 | lp->rx_new = RX_NEXT(entry); |
717 | return; | 716 | return; |
718 | } | 717 | } |
719 | 718 | ||
720 | lp->stats.rx_bytes += len; | 719 | dev->stats.rx_bytes += len; |
721 | 720 | ||
722 | skb_reserve (skb, 2); /* 16 byte align */ | 721 | skb_reserve (skb, 2); /* 16 byte align */ |
723 | skb_put(skb, len); /* make room */ | 722 | skb_put(skb, len); /* make room */ |
@@ -725,7 +724,7 @@ static void lance_rx_pio(struct net_device *dev) | |||
725 | skb->protocol = eth_type_trans(skb, dev); | 724 | skb->protocol = eth_type_trans(skb, dev); |
726 | netif_rx(skb); | 725 | netif_rx(skb); |
727 | dev->last_rx = jiffies; | 726 | dev->last_rx = jiffies; |
728 | lp->stats.rx_packets++; | 727 | dev->stats.rx_packets++; |
729 | } | 728 | } |
730 | 729 | ||
731 | /* Return the packet to the pool */ | 730 | /* Return the packet to the pool */ |
@@ -757,12 +756,12 @@ static void lance_tx_pio(struct net_device *dev) | |||
757 | if (bits & LE_T1_ERR) { | 756 | if (bits & LE_T1_ERR) { |
758 | u16 status = sbus_readw(&td->misc); | 757 | u16 status = sbus_readw(&td->misc); |
759 | 758 | ||
760 | lp->stats.tx_errors++; | 759 | dev->stats.tx_errors++; |
761 | if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; | 760 | if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; |
762 | if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; | 761 | if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; |
763 | 762 | ||
764 | if (status & LE_T3_CLOS) { | 763 | if (status & LE_T3_CLOS) { |
765 | lp->stats.tx_carrier_errors++; | 764 | dev->stats.tx_carrier_errors++; |
766 | if (lp->auto_select) { | 765 | if (lp->auto_select) { |
767 | lp->tpe = 1 - lp->tpe; | 766 | lp->tpe = 1 - lp->tpe; |
768 | printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", | 767 | printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", |
@@ -779,7 +778,7 @@ static void lance_tx_pio(struct net_device *dev) | |||
779 | * transmitter, restart the adapter. | 778 | * transmitter, restart the adapter. |
780 | */ | 779 | */ |
781 | if (status & (LE_T3_BUF|LE_T3_UFL)) { | 780 | if (status & (LE_T3_BUF|LE_T3_UFL)) { |
782 | lp->stats.tx_fifo_errors++; | 781 | dev->stats.tx_fifo_errors++; |
783 | 782 | ||
784 | printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", | 783 | printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", |
785 | dev->name); | 784 | dev->name); |
@@ -797,13 +796,13 @@ static void lance_tx_pio(struct net_device *dev) | |||
797 | 796 | ||
798 | /* One collision before packet was sent. */ | 797 | /* One collision before packet was sent. */ |
799 | if (bits & LE_T1_EONE) | 798 | if (bits & LE_T1_EONE) |
800 | lp->stats.collisions++; | 799 | dev->stats.collisions++; |
801 | 800 | ||
802 | /* More than one collision, be optimistic. */ | 801 | /* More than one collision, be optimistic. */ |
803 | if (bits & LE_T1_EMORE) | 802 | if (bits & LE_T1_EMORE) |
804 | lp->stats.collisions += 2; | 803 | dev->stats.collisions += 2; |
805 | 804 | ||
806 | lp->stats.tx_packets++; | 805 | dev->stats.tx_packets++; |
807 | } | 806 | } |
808 | 807 | ||
809 | j = TX_NEXT(j); | 808 | j = TX_NEXT(j); |
@@ -844,10 +843,10 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id) | |||
844 | lp->tx(dev); | 843 | lp->tx(dev); |
845 | 844 | ||
846 | if (csr0 & LE_C0_BABL) | 845 | if (csr0 & LE_C0_BABL) |
847 | lp->stats.tx_errors++; | 846 | dev->stats.tx_errors++; |
848 | 847 | ||
849 | if (csr0 & LE_C0_MISS) | 848 | if (csr0 & LE_C0_MISS) |
850 | lp->stats.rx_errors++; | 849 | dev->stats.rx_errors++; |
851 | 850 | ||
852 | if (csr0 & LE_C0_MERR) { | 851 | if (csr0 & LE_C0_MERR) { |
853 | if (lp->dregs) { | 852 | if (lp->dregs) { |
@@ -1127,7 +1126,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1127 | 1126 | ||
1128 | spin_lock_irq(&lp->lock); | 1127 | spin_lock_irq(&lp->lock); |
1129 | 1128 | ||
1130 | lp->stats.tx_bytes += len; | 1129 | dev->stats.tx_bytes += len; |
1131 | 1130 | ||
1132 | entry = lp->tx_new & TX_RING_MOD_MASK; | 1131 | entry = lp->tx_new & TX_RING_MOD_MASK; |
1133 | if (lp->pio_buffer) { | 1132 | if (lp->pio_buffer) { |
@@ -1170,13 +1169,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1170 | return 0; | 1169 | return 0; |
1171 | } | 1170 | } |
1172 | 1171 | ||
1173 | static struct net_device_stats *lance_get_stats(struct net_device *dev) | ||
1174 | { | ||
1175 | struct lance_private *lp = netdev_priv(dev); | ||
1176 | |||
1177 | return &lp->stats; | ||
1178 | } | ||
1179 | |||
1180 | /* taken from the depca driver */ | 1172 | /* taken from the depca driver */ |
1181 | static void lance_load_multicast(struct net_device *dev) | 1173 | static void lance_load_multicast(struct net_device *dev) |
1182 | { | 1174 | { |
@@ -1463,7 +1455,6 @@ no_link_test: | |||
1463 | dev->hard_start_xmit = &lance_start_xmit; | 1455 | dev->hard_start_xmit = &lance_start_xmit; |
1464 | dev->tx_timeout = &lance_tx_timeout; | 1456 | dev->tx_timeout = &lance_tx_timeout; |
1465 | dev->watchdog_timeo = 5*HZ; | 1457 | dev->watchdog_timeo = 5*HZ; |
1466 | dev->get_stats = &lance_get_stats; | ||
1467 | dev->set_multicast_list = &lance_set_multicast; | 1458 | dev->set_multicast_list = &lance_set_multicast; |
1468 | dev->ethtool_ops = &sparc_lance_ethtool_ops; | 1459 | dev->ethtool_ops = &sparc_lance_ethtool_ops; |
1469 | 1460 | ||
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index b5c2974fd625..ff23c6489efd 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c | |||
@@ -260,31 +260,31 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) | |||
260 | 260 | ||
261 | if (qe_status & CREG_STAT_EDEFER) { | 261 | if (qe_status & CREG_STAT_EDEFER) { |
262 | printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); | 262 | printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); |
263 | qep->net_stats.tx_errors++; | 263 | dev->stats.tx_errors++; |
264 | } | 264 | } |
265 | 265 | ||
266 | if (qe_status & CREG_STAT_CLOSS) { | 266 | if (qe_status & CREG_STAT_CLOSS) { |
267 | printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); | 267 | printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); |
268 | qep->net_stats.tx_errors++; | 268 | dev->stats.tx_errors++; |
269 | qep->net_stats.tx_carrier_errors++; | 269 | dev->stats.tx_carrier_errors++; |
270 | } | 270 | } |
271 | 271 | ||
272 | if (qe_status & CREG_STAT_ERETRIES) { | 272 | if (qe_status & CREG_STAT_ERETRIES) { |
273 | printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); | 273 | printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); |
274 | qep->net_stats.tx_errors++; | 274 | dev->stats.tx_errors++; |
275 | mace_hwbug_workaround = 1; | 275 | mace_hwbug_workaround = 1; |
276 | } | 276 | } |
277 | 277 | ||
278 | if (qe_status & CREG_STAT_LCOLL) { | 278 | if (qe_status & CREG_STAT_LCOLL) { |
279 | printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); | 279 | printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); |
280 | qep->net_stats.tx_errors++; | 280 | dev->stats.tx_errors++; |
281 | qep->net_stats.collisions++; | 281 | dev->stats.collisions++; |
282 | mace_hwbug_workaround = 1; | 282 | mace_hwbug_workaround = 1; |
283 | } | 283 | } |
284 | 284 | ||
285 | if (qe_status & CREG_STAT_FUFLOW) { | 285 | if (qe_status & CREG_STAT_FUFLOW) { |
286 | printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); | 286 | printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); |
287 | qep->net_stats.tx_errors++; | 287 | dev->stats.tx_errors++; |
288 | mace_hwbug_workaround = 1; | 288 | mace_hwbug_workaround = 1; |
289 | } | 289 | } |
290 | 290 | ||
@@ -297,104 +297,104 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) | |||
297 | } | 297 | } |
298 | 298 | ||
299 | if (qe_status & CREG_STAT_CCOFLOW) { | 299 | if (qe_status & CREG_STAT_CCOFLOW) { |
300 | qep->net_stats.tx_errors += 256; | 300 | dev->stats.tx_errors += 256; |
301 | qep->net_stats.collisions += 256; | 301 | dev->stats.collisions += 256; |
302 | } | 302 | } |
303 | 303 | ||
304 | if (qe_status & CREG_STAT_TXDERROR) { | 304 | if (qe_status & CREG_STAT_TXDERROR) { |
305 | printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); | 305 | printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); |
306 | qep->net_stats.tx_errors++; | 306 | dev->stats.tx_errors++; |
307 | qep->net_stats.tx_aborted_errors++; | 307 | dev->stats.tx_aborted_errors++; |
308 | mace_hwbug_workaround = 1; | 308 | mace_hwbug_workaround = 1; |
309 | } | 309 | } |
310 | 310 | ||
311 | if (qe_status & CREG_STAT_TXLERR) { | 311 | if (qe_status & CREG_STAT_TXLERR) { |
312 | printk(KERN_ERR "%s: Transmit late error.\n", dev->name); | 312 | printk(KERN_ERR "%s: Transmit late error.\n", dev->name); |
313 | qep->net_stats.tx_errors++; | 313 | dev->stats.tx_errors++; |
314 | mace_hwbug_workaround = 1; | 314 | mace_hwbug_workaround = 1; |
315 | } | 315 | } |
316 | 316 | ||
317 | if (qe_status & CREG_STAT_TXPERR) { | 317 | if (qe_status & CREG_STAT_TXPERR) { |
318 | printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); | 318 | printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); |
319 | qep->net_stats.tx_errors++; | 319 | dev->stats.tx_errors++; |
320 | qep->net_stats.tx_aborted_errors++; | 320 | dev->stats.tx_aborted_errors++; |
321 | mace_hwbug_workaround = 1; | 321 | mace_hwbug_workaround = 1; |
322 | } | 322 | } |
323 | 323 | ||
324 | if (qe_status & CREG_STAT_TXSERR) { | 324 | if (qe_status & CREG_STAT_TXSERR) { |
325 | printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); | 325 | printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); |
326 | qep->net_stats.tx_errors++; | 326 | dev->stats.tx_errors++; |
327 | qep->net_stats.tx_aborted_errors++; | 327 | dev->stats.tx_aborted_errors++; |
328 | mace_hwbug_workaround = 1; | 328 | mace_hwbug_workaround = 1; |
329 | } | 329 | } |
330 | 330 | ||
331 | if (qe_status & CREG_STAT_RCCOFLOW) { | 331 | if (qe_status & CREG_STAT_RCCOFLOW) { |
332 | qep->net_stats.rx_errors += 256; | 332 | dev->stats.rx_errors += 256; |
333 | qep->net_stats.collisions += 256; | 333 | dev->stats.collisions += 256; |
334 | } | 334 | } |
335 | 335 | ||
336 | if (qe_status & CREG_STAT_RUOFLOW) { | 336 | if (qe_status & CREG_STAT_RUOFLOW) { |
337 | qep->net_stats.rx_errors += 256; | 337 | dev->stats.rx_errors += 256; |
338 | qep->net_stats.rx_over_errors += 256; | 338 | dev->stats.rx_over_errors += 256; |
339 | } | 339 | } |
340 | 340 | ||
341 | if (qe_status & CREG_STAT_MCOFLOW) { | 341 | if (qe_status & CREG_STAT_MCOFLOW) { |
342 | qep->net_stats.rx_errors += 256; | 342 | dev->stats.rx_errors += 256; |
343 | qep->net_stats.rx_missed_errors += 256; | 343 | dev->stats.rx_missed_errors += 256; |
344 | } | 344 | } |
345 | 345 | ||
346 | if (qe_status & CREG_STAT_RXFOFLOW) { | 346 | if (qe_status & CREG_STAT_RXFOFLOW) { |
347 | printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); | 347 | printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); |
348 | qep->net_stats.rx_errors++; | 348 | dev->stats.rx_errors++; |
349 | qep->net_stats.rx_over_errors++; | 349 | dev->stats.rx_over_errors++; |
350 | } | 350 | } |
351 | 351 | ||
352 | if (qe_status & CREG_STAT_RLCOLL) { | 352 | if (qe_status & CREG_STAT_RLCOLL) { |
353 | printk(KERN_ERR "%s: Late receive collision.\n", dev->name); | 353 | printk(KERN_ERR "%s: Late receive collision.\n", dev->name); |
354 | qep->net_stats.rx_errors++; | 354 | dev->stats.rx_errors++; |
355 | qep->net_stats.collisions++; | 355 | dev->stats.collisions++; |
356 | } | 356 | } |
357 | 357 | ||
358 | if (qe_status & CREG_STAT_FCOFLOW) { | 358 | if (qe_status & CREG_STAT_FCOFLOW) { |
359 | qep->net_stats.rx_errors += 256; | 359 | dev->stats.rx_errors += 256; |
360 | qep->net_stats.rx_frame_errors += 256; | 360 | dev->stats.rx_frame_errors += 256; |
361 | } | 361 | } |
362 | 362 | ||
363 | if (qe_status & CREG_STAT_CECOFLOW) { | 363 | if (qe_status & CREG_STAT_CECOFLOW) { |
364 | qep->net_stats.rx_errors += 256; | 364 | dev->stats.rx_errors += 256; |
365 | qep->net_stats.rx_crc_errors += 256; | 365 | dev->stats.rx_crc_errors += 256; |
366 | } | 366 | } |
367 | 367 | ||
368 | if (qe_status & CREG_STAT_RXDROP) { | 368 | if (qe_status & CREG_STAT_RXDROP) { |
369 | printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); | 369 | printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); |
370 | qep->net_stats.rx_errors++; | 370 | dev->stats.rx_errors++; |
371 | qep->net_stats.rx_dropped++; | 371 | dev->stats.rx_dropped++; |
372 | qep->net_stats.rx_missed_errors++; | 372 | dev->stats.rx_missed_errors++; |
373 | } | 373 | } |
374 | 374 | ||
375 | if (qe_status & CREG_STAT_RXSMALL) { | 375 | if (qe_status & CREG_STAT_RXSMALL) { |
376 | printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); | 376 | printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); |
377 | qep->net_stats.rx_errors++; | 377 | dev->stats.rx_errors++; |
378 | qep->net_stats.rx_length_errors++; | 378 | dev->stats.rx_length_errors++; |
379 | } | 379 | } |
380 | 380 | ||
381 | if (qe_status & CREG_STAT_RXLERR) { | 381 | if (qe_status & CREG_STAT_RXLERR) { |
382 | printk(KERN_ERR "%s: Receive late error.\n", dev->name); | 382 | printk(KERN_ERR "%s: Receive late error.\n", dev->name); |
383 | qep->net_stats.rx_errors++; | 383 | dev->stats.rx_errors++; |
384 | mace_hwbug_workaround = 1; | 384 | mace_hwbug_workaround = 1; |
385 | } | 385 | } |
386 | 386 | ||
387 | if (qe_status & CREG_STAT_RXPERR) { | 387 | if (qe_status & CREG_STAT_RXPERR) { |
388 | printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); | 388 | printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); |
389 | qep->net_stats.rx_errors++; | 389 | dev->stats.rx_errors++; |
390 | qep->net_stats.rx_missed_errors++; | 390 | dev->stats.rx_missed_errors++; |
391 | mace_hwbug_workaround = 1; | 391 | mace_hwbug_workaround = 1; |
392 | } | 392 | } |
393 | 393 | ||
394 | if (qe_status & CREG_STAT_RXSERR) { | 394 | if (qe_status & CREG_STAT_RXSERR) { |
395 | printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); | 395 | printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); |
396 | qep->net_stats.rx_errors++; | 396 | dev->stats.rx_errors++; |
397 | qep->net_stats.rx_missed_errors++; | 397 | dev->stats.rx_missed_errors++; |
398 | mace_hwbug_workaround = 1; | 398 | mace_hwbug_workaround = 1; |
399 | } | 399 | } |
400 | 400 | ||
@@ -409,6 +409,7 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) | |||
409 | static void qe_rx(struct sunqe *qep) | 409 | static void qe_rx(struct sunqe *qep) |
410 | { | 410 | { |
411 | struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; | 411 | struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; |
412 | struct net_device *dev = qep->dev; | ||
412 | struct qe_rxd *this; | 413 | struct qe_rxd *this; |
413 | struct sunqe_buffers *qbufs = qep->buffers; | 414 | struct sunqe_buffers *qbufs = qep->buffers; |
414 | __u32 qbufs_dvma = qep->buffers_dvma; | 415 | __u32 qbufs_dvma = qep->buffers_dvma; |
@@ -428,14 +429,14 @@ static void qe_rx(struct sunqe *qep) | |||
428 | 429 | ||
429 | /* Check for errors. */ | 430 | /* Check for errors. */ |
430 | if (len < ETH_ZLEN) { | 431 | if (len < ETH_ZLEN) { |
431 | qep->net_stats.rx_errors++; | 432 | dev->stats.rx_errors++; |
432 | qep->net_stats.rx_length_errors++; | 433 | dev->stats.rx_length_errors++; |
433 | qep->net_stats.rx_dropped++; | 434 | dev->stats.rx_dropped++; |
434 | } else { | 435 | } else { |
435 | skb = dev_alloc_skb(len + 2); | 436 | skb = dev_alloc_skb(len + 2); |
436 | if (skb == NULL) { | 437 | if (skb == NULL) { |
437 | drops++; | 438 | drops++; |
438 | qep->net_stats.rx_dropped++; | 439 | dev->stats.rx_dropped++; |
439 | } else { | 440 | } else { |
440 | skb_reserve(skb, 2); | 441 | skb_reserve(skb, 2); |
441 | skb_put(skb, len); | 442 | skb_put(skb, len); |
@@ -444,8 +445,8 @@ static void qe_rx(struct sunqe *qep) | |||
444 | skb->protocol = eth_type_trans(skb, qep->dev); | 445 | skb->protocol = eth_type_trans(skb, qep->dev); |
445 | netif_rx(skb); | 446 | netif_rx(skb); |
446 | qep->dev->last_rx = jiffies; | 447 | qep->dev->last_rx = jiffies; |
447 | qep->net_stats.rx_packets++; | 448 | dev->stats.rx_packets++; |
448 | qep->net_stats.rx_bytes += len; | 449 | dev->stats.rx_bytes += len; |
449 | } | 450 | } |
450 | } | 451 | } |
451 | end_rxd->rx_addr = this_qbuf_dvma; | 452 | end_rxd->rx_addr = this_qbuf_dvma; |
@@ -603,8 +604,8 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
603 | dev->trans_start = jiffies; | 604 | dev->trans_start = jiffies; |
604 | sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); | 605 | sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); |
605 | 606 | ||
606 | qep->net_stats.tx_packets++; | 607 | dev->stats.tx_packets++; |
607 | qep->net_stats.tx_bytes += len; | 608 | dev->stats.tx_bytes += len; |
608 | 609 | ||
609 | if (TX_BUFFS_AVAIL(qep) <= 0) { | 610 | if (TX_BUFFS_AVAIL(qep) <= 0) { |
610 | /* Halt the net queue and enable tx interrupts. | 611 | /* Halt the net queue and enable tx interrupts. |
@@ -622,13 +623,6 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
622 | return 0; | 623 | return 0; |
623 | } | 624 | } |
624 | 625 | ||
625 | static struct net_device_stats *qe_get_stats(struct net_device *dev) | ||
626 | { | ||
627 | struct sunqe *qep = (struct sunqe *) dev->priv; | ||
628 | |||
629 | return &qep->net_stats; | ||
630 | } | ||
631 | |||
632 | static void qe_set_multicast(struct net_device *dev) | 626 | static void qe_set_multicast(struct net_device *dev) |
633 | { | 627 | { |
634 | struct sunqe *qep = (struct sunqe *) dev->priv; | 628 | struct sunqe *qep = (struct sunqe *) dev->priv; |
@@ -903,7 +897,6 @@ static int __init qec_ether_init(struct sbus_dev *sdev) | |||
903 | dev->open = qe_open; | 897 | dev->open = qe_open; |
904 | dev->stop = qe_close; | 898 | dev->stop = qe_close; |
905 | dev->hard_start_xmit = qe_start_xmit; | 899 | dev->hard_start_xmit = qe_start_xmit; |
906 | dev->get_stats = qe_get_stats; | ||
907 | dev->set_multicast_list = qe_set_multicast; | 900 | dev->set_multicast_list = qe_set_multicast; |
908 | dev->tx_timeout = qe_tx_timeout; | 901 | dev->tx_timeout = qe_tx_timeout; |
909 | dev->watchdog_timeo = 5*HZ; | 902 | dev->watchdog_timeo = 5*HZ; |
diff --git a/drivers/net/sunqe.h b/drivers/net/sunqe.h index af34f36111ed..347c8ddc1592 100644 --- a/drivers/net/sunqe.h +++ b/drivers/net/sunqe.h | |||
@@ -342,7 +342,6 @@ struct sunqe { | |||
342 | __u32 buffers_dvma; /* DVMA visible address. */ | 342 | __u32 buffers_dvma; /* DVMA visible address. */ |
343 | struct sunqec *parent; | 343 | struct sunqec *parent; |
344 | u8 mconfig; /* Base MACE mconfig value */ | 344 | u8 mconfig; /* Base MACE mconfig value */ |
345 | struct net_device_stats net_stats; /* Statistical counters */ | ||
346 | struct sbus_dev *qe_sdev; /* QE's SBUS device struct */ | 345 | struct sbus_dev *qe_sdev; /* QE's SBUS device struct */ |
347 | struct net_device *dev; /* QE's netdevice struct */ | 346 | struct net_device *dev; /* QE's netdevice struct */ |
348 | int channel; /* Who am I? */ | 347 | int channel; /* Who am I? */ |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8b3ec335385c..d279151f065d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -110,7 +110,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
110 | 110 | ||
111 | /* We won't see all dropped packets individually, so overrun | 111 | /* We won't see all dropped packets individually, so overrun |
112 | * error is more appropriate. */ | 112 | * error is more appropriate. */ |
113 | tun->stats.tx_fifo_errors++; | 113 | dev->stats.tx_fifo_errors++; |
114 | } else { | 114 | } else { |
115 | /* Single queue mode. | 115 | /* Single queue mode. |
116 | * Driver handles dropping of all packets itself. */ | 116 | * Driver handles dropping of all packets itself. */ |
@@ -129,7 +129,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
129 | return 0; | 129 | return 0; |
130 | 130 | ||
131 | drop: | 131 | drop: |
132 | tun->stats.tx_dropped++; | 132 | dev->stats.tx_dropped++; |
133 | kfree_skb(skb); | 133 | kfree_skb(skb); |
134 | return 0; | 134 | return 0; |
135 | } | 135 | } |
@@ -172,12 +172,6 @@ tun_net_mclist(struct net_device *dev) | |||
172 | } | 172 | } |
173 | } | 173 | } |
174 | 174 | ||
175 | static struct net_device_stats *tun_net_stats(struct net_device *dev) | ||
176 | { | ||
177 | struct tun_struct *tun = netdev_priv(dev); | ||
178 | return &tun->stats; | ||
179 | } | ||
180 | |||
181 | /* Initialize net device. */ | 175 | /* Initialize net device. */ |
182 | static void tun_net_init(struct net_device *dev) | 176 | static void tun_net_init(struct net_device *dev) |
183 | { | 177 | { |
@@ -250,14 +244,14 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, | |||
250 | align = NET_IP_ALIGN; | 244 | align = NET_IP_ALIGN; |
251 | 245 | ||
252 | if (!(skb = alloc_skb(len + align, GFP_KERNEL))) { | 246 | if (!(skb = alloc_skb(len + align, GFP_KERNEL))) { |
253 | tun->stats.rx_dropped++; | 247 | tun->dev->stats.rx_dropped++; |
254 | return -ENOMEM; | 248 | return -ENOMEM; |
255 | } | 249 | } |
256 | 250 | ||
257 | if (align) | 251 | if (align) |
258 | skb_reserve(skb, align); | 252 | skb_reserve(skb, align); |
259 | if (memcpy_fromiovec(skb_put(skb, len), iv, len)) { | 253 | if (memcpy_fromiovec(skb_put(skb, len), iv, len)) { |
260 | tun->stats.rx_dropped++; | 254 | tun->dev->stats.rx_dropped++; |
261 | kfree_skb(skb); | 255 | kfree_skb(skb); |
262 | return -EFAULT; | 256 | return -EFAULT; |
263 | } | 257 | } |
@@ -279,8 +273,8 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, | |||
279 | netif_rx_ni(skb); | 273 | netif_rx_ni(skb); |
280 | tun->dev->last_rx = jiffies; | 274 | tun->dev->last_rx = jiffies; |
281 | 275 | ||
282 | tun->stats.rx_packets++; | 276 | tun->dev->stats.rx_packets++; |
283 | tun->stats.rx_bytes += len; | 277 | tun->dev->stats.rx_bytes += len; |
284 | 278 | ||
285 | return count; | 279 | return count; |
286 | } | 280 | } |
@@ -336,8 +330,8 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun, | |||
336 | skb_copy_datagram_iovec(skb, 0, iv, len); | 330 | skb_copy_datagram_iovec(skb, 0, iv, len); |
337 | total += len; | 331 | total += len; |
338 | 332 | ||
339 | tun->stats.tx_packets++; | 333 | tun->dev->stats.tx_packets++; |
340 | tun->stats.tx_bytes += len; | 334 | tun->dev->stats.tx_bytes += len; |
341 | 335 | ||
342 | return total; | 336 | return total; |
343 | } | 337 | } |
@@ -438,7 +432,6 @@ static void tun_setup(struct net_device *dev) | |||
438 | dev->open = tun_net_open; | 432 | dev->open = tun_net_open; |
439 | dev->hard_start_xmit = tun_net_xmit; | 433 | dev->hard_start_xmit = tun_net_xmit; |
440 | dev->stop = tun_net_close; | 434 | dev->stop = tun_net_close; |
441 | dev->get_stats = tun_net_stats; | ||
442 | dev->ethtool_ops = &tun_ethtool_ops; | 435 | dev->ethtool_ops = &tun_ethtool_ops; |
443 | dev->destructor = free_netdev; | 436 | dev->destructor = free_netdev; |
444 | } | 437 | } |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 8da41229594a..9667dac383f0 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -3350,14 +3350,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) | |||
3350 | return 0; | 3350 | return 0; |
3351 | } | 3351 | } |
3352 | 3352 | ||
3353 | /* returns a net_device_stats structure pointer */ | ||
3354 | static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev) | ||
3355 | { | ||
3356 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3357 | |||
3358 | return &(ugeth->stats); | ||
3359 | } | ||
3360 | |||
3361 | /* ucc_geth_timeout gets called when a packet has not been | 3353 | /* ucc_geth_timeout gets called when a packet has not been |
3362 | * transmitted after a set amount of time. | 3354 | * transmitted after a set amount of time. |
3363 | * For now, assume that clearing out all the structures, and | 3355 | * For now, assume that clearing out all the structures, and |
@@ -3368,7 +3360,7 @@ static void ucc_geth_timeout(struct net_device *dev) | |||
3368 | 3360 | ||
3369 | ugeth_vdbg("%s: IN", __FUNCTION__); | 3361 | ugeth_vdbg("%s: IN", __FUNCTION__); |
3370 | 3362 | ||
3371 | ugeth->stats.tx_errors++; | 3363 | dev->stats.tx_errors++; |
3372 | 3364 | ||
3373 | ugeth_dump_regs(ugeth); | 3365 | ugeth_dump_regs(ugeth); |
3374 | 3366 | ||
@@ -3396,7 +3388,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3396 | 3388 | ||
3397 | spin_lock_irq(&ugeth->lock); | 3389 | spin_lock_irq(&ugeth->lock); |
3398 | 3390 | ||
3399 | ugeth->stats.tx_bytes += skb->len; | 3391 | dev->stats.tx_bytes += skb->len; |
3400 | 3392 | ||
3401 | /* Start from the next BD that should be filled */ | 3393 | /* Start from the next BD that should be filled */ |
3402 | bd = ugeth->txBd[txQ]; | 3394 | bd = ugeth->txBd[txQ]; |
@@ -3488,9 +3480,9 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit | |||
3488 | dev_kfree_skb_any(skb); | 3480 | dev_kfree_skb_any(skb); |
3489 | 3481 | ||
3490 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; | 3482 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; |
3491 | ugeth->stats.rx_dropped++; | 3483 | dev->stats.rx_dropped++; |
3492 | } else { | 3484 | } else { |
3493 | ugeth->stats.rx_packets++; | 3485 | dev->stats.rx_packets++; |
3494 | howmany++; | 3486 | howmany++; |
3495 | 3487 | ||
3496 | /* Prep the skb for the packet */ | 3488 | /* Prep the skb for the packet */ |
@@ -3499,7 +3491,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit | |||
3499 | /* Tell the skb what kind of packet this is */ | 3491 | /* Tell the skb what kind of packet this is */ |
3500 | skb->protocol = eth_type_trans(skb, ugeth->dev); | 3492 | skb->protocol = eth_type_trans(skb, ugeth->dev); |
3501 | 3493 | ||
3502 | ugeth->stats.rx_bytes += length; | 3494 | dev->stats.rx_bytes += length; |
3503 | /* Send the packet up the stack */ | 3495 | /* Send the packet up the stack */ |
3504 | #ifdef CONFIG_UGETH_NAPI | 3496 | #ifdef CONFIG_UGETH_NAPI |
3505 | netif_receive_skb(skb); | 3497 | netif_receive_skb(skb); |
@@ -3514,7 +3506,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit | |||
3514 | if (!skb) { | 3506 | if (!skb) { |
3515 | if (netif_msg_rx_err(ugeth)) | 3507 | if (netif_msg_rx_err(ugeth)) |
3516 | ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); | 3508 | ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); |
3517 | ugeth->stats.rx_dropped++; | 3509 | dev->stats.rx_dropped++; |
3518 | break; | 3510 | break; |
3519 | } | 3511 | } |
3520 | 3512 | ||
@@ -3556,7 +3548,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3556 | if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) | 3548 | if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) |
3557 | break; | 3549 | break; |
3558 | 3550 | ||
3559 | ugeth->stats.tx_packets++; | 3551 | dev->stats.tx_packets++; |
3560 | 3552 | ||
3561 | /* Free the sk buffer associated with this TxBD */ | 3553 | /* Free the sk buffer associated with this TxBD */ |
3562 | dev_kfree_skb_irq(ugeth-> | 3554 | dev_kfree_skb_irq(ugeth-> |
@@ -3673,10 +3665,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) | |||
3673 | /* Errors and other events */ | 3665 | /* Errors and other events */ |
3674 | if (ucce & UCCE_OTHER) { | 3666 | if (ucce & UCCE_OTHER) { |
3675 | if (ucce & UCCE_BSY) { | 3667 | if (ucce & UCCE_BSY) { |
3676 | ugeth->stats.rx_errors++; | 3668 | dev->stats.rx_errors++; |
3677 | } | 3669 | } |
3678 | if (ucce & UCCE_TXE) { | 3670 | if (ucce & UCCE_TXE) { |
3679 | ugeth->stats.tx_errors++; | 3671 | dev->stats.tx_errors++; |
3680 | } | 3672 | } |
3681 | } | 3673 | } |
3682 | 3674 | ||
@@ -3969,7 +3961,6 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
3969 | netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); | 3961 | netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); |
3970 | #endif /* CONFIG_UGETH_NAPI */ | 3962 | #endif /* CONFIG_UGETH_NAPI */ |
3971 | dev->stop = ucc_geth_close; | 3963 | dev->stop = ucc_geth_close; |
3972 | dev->get_stats = ucc_geth_get_stats; | ||
3973 | // dev->change_mtu = ucc_geth_change_mtu; | 3964 | // dev->change_mtu = ucc_geth_change_mtu; |
3974 | dev->mtu = 1500; | 3965 | dev->mtu = 1500; |
3975 | dev->set_multicast_list = ucc_geth_set_multi; | 3966 | dev->set_multicast_list = ucc_geth_set_multi; |
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index 0579ba081aa5..aaeb94877987 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h | |||
@@ -1185,7 +1185,6 @@ struct ucc_geth_private { | |||
1185 | struct ucc_fast_private *uccf; | 1185 | struct ucc_fast_private *uccf; |
1186 | struct net_device *dev; | 1186 | struct net_device *dev; |
1187 | struct napi_struct napi; | 1187 | struct napi_struct napi; |
1188 | struct net_device_stats stats; /* linux network statistics */ | ||
1189 | struct ucc_geth *ug_regs; | 1188 | struct ucc_geth *ug_regs; |
1190 | struct ucc_geth_init_pram *p_init_enet_param_shadow; | 1189 | struct ucc_geth_init_pram *p_init_enet_param_shadow; |
1191 | struct ucc_geth_exf_global_pram *p_exf_glbl_param; | 1190 | struct ucc_geth_exf_global_pram *p_exf_glbl_param; |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8eeb068dc4a6..78e344ae7051 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -73,7 +73,6 @@ struct netfront_info { | |||
73 | struct net_device *netdev; | 73 | struct net_device *netdev; |
74 | 74 | ||
75 | struct napi_struct napi; | 75 | struct napi_struct napi; |
76 | struct net_device_stats stats; | ||
77 | 76 | ||
78 | struct xen_netif_tx_front_ring tx; | 77 | struct xen_netif_tx_front_ring tx; |
79 | struct xen_netif_rx_front_ring rx; | 78 | struct xen_netif_rx_front_ring rx; |
@@ -309,8 +308,6 @@ static int xennet_open(struct net_device *dev) | |||
309 | { | 308 | { |
310 | struct netfront_info *np = netdev_priv(dev); | 309 | struct netfront_info *np = netdev_priv(dev); |
311 | 310 | ||
312 | memset(&np->stats, 0, sizeof(np->stats)); | ||
313 | |||
314 | napi_enable(&np->napi); | 311 | napi_enable(&np->napi); |
315 | 312 | ||
316 | spin_lock_bh(&np->rx_lock); | 313 | spin_lock_bh(&np->rx_lock); |
@@ -537,8 +534,8 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
537 | if (notify) | 534 | if (notify) |
538 | notify_remote_via_irq(np->netdev->irq); | 535 | notify_remote_via_irq(np->netdev->irq); |
539 | 536 | ||
540 | np->stats.tx_bytes += skb->len; | 537 | dev->stats.tx_bytes += skb->len; |
541 | np->stats.tx_packets++; | 538 | dev->stats.tx_packets++; |
542 | 539 | ||
543 | /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ | 540 | /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ |
544 | xennet_tx_buf_gc(dev); | 541 | xennet_tx_buf_gc(dev); |
@@ -551,7 +548,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
551 | return 0; | 548 | return 0; |
552 | 549 | ||
553 | drop: | 550 | drop: |
554 | np->stats.tx_dropped++; | 551 | dev->stats.tx_dropped++; |
555 | dev_kfree_skb(skb); | 552 | dev_kfree_skb(skb); |
556 | return 0; | 553 | return 0; |
557 | } | 554 | } |
@@ -564,12 +561,6 @@ static int xennet_close(struct net_device *dev) | |||
564 | return 0; | 561 | return 0; |
565 | } | 562 | } |
566 | 563 | ||
567 | static struct net_device_stats *xennet_get_stats(struct net_device *dev) | ||
568 | { | ||
569 | struct netfront_info *np = netdev_priv(dev); | ||
570 | return &np->stats; | ||
571 | } | ||
572 | |||
573 | static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, | 564 | static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, |
574 | grant_ref_t ref) | 565 | grant_ref_t ref) |
575 | { | 566 | { |
@@ -804,9 +795,8 @@ out: | |||
804 | } | 795 | } |
805 | 796 | ||
806 | static int handle_incoming_queue(struct net_device *dev, | 797 | static int handle_incoming_queue(struct net_device *dev, |
807 | struct sk_buff_head *rxq) | 798 | struct sk_buff_head *rxq) |
808 | { | 799 | { |
809 | struct netfront_info *np = netdev_priv(dev); | ||
810 | int packets_dropped = 0; | 800 | int packets_dropped = 0; |
811 | struct sk_buff *skb; | 801 | struct sk_buff *skb; |
812 | 802 | ||
@@ -828,13 +818,13 @@ static int handle_incoming_queue(struct net_device *dev, | |||
828 | if (skb_checksum_setup(skb)) { | 818 | if (skb_checksum_setup(skb)) { |
829 | kfree_skb(skb); | 819 | kfree_skb(skb); |
830 | packets_dropped++; | 820 | packets_dropped++; |
831 | np->stats.rx_errors++; | 821 | dev->stats.rx_errors++; |
832 | continue; | 822 | continue; |
833 | } | 823 | } |
834 | } | 824 | } |
835 | 825 | ||
836 | np->stats.rx_packets++; | 826 | dev->stats.rx_packets++; |
837 | np->stats.rx_bytes += skb->len; | 827 | dev->stats.rx_bytes += skb->len; |
838 | 828 | ||
839 | /* Pass it up. */ | 829 | /* Pass it up. */ |
840 | netif_receive_skb(skb); | 830 | netif_receive_skb(skb); |
@@ -887,7 +877,7 @@ static int xennet_poll(struct napi_struct *napi, int budget) | |||
887 | err: | 877 | err: |
888 | while ((skb = __skb_dequeue(&tmpq))) | 878 | while ((skb = __skb_dequeue(&tmpq))) |
889 | __skb_queue_tail(&errq, skb); | 879 | __skb_queue_tail(&errq, skb); |
890 | np->stats.rx_errors++; | 880 | dev->stats.rx_errors++; |
891 | i = np->rx.rsp_cons; | 881 | i = np->rx.rsp_cons; |
892 | continue; | 882 | continue; |
893 | } | 883 | } |
@@ -1169,7 +1159,6 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev | |||
1169 | netdev->open = xennet_open; | 1159 | netdev->open = xennet_open; |
1170 | netdev->hard_start_xmit = xennet_start_xmit; | 1160 | netdev->hard_start_xmit = xennet_start_xmit; |
1171 | netdev->stop = xennet_close; | 1161 | netdev->stop = xennet_close; |
1172 | netdev->get_stats = xennet_get_stats; | ||
1173 | netif_napi_add(netdev, &np->napi, xennet_poll, 64); | 1162 | netif_napi_add(netdev, &np->napi, xennet_poll, 64); |
1174 | netdev->uninit = xennet_uninit; | 1163 | netdev->uninit = xennet_uninit; |
1175 | netdev->change_mtu = xennet_change_mtu; | 1164 | netdev->change_mtu = xennet_change_mtu; |
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 29e96950be65..709623e1c611 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -318,7 +318,6 @@ struct yellowfin_private { | |||
318 | dma_addr_t tx_status_dma; | 318 | dma_addr_t tx_status_dma; |
319 | 319 | ||
320 | struct timer_list timer; /* Media selection timer. */ | 320 | struct timer_list timer; /* Media selection timer. */ |
321 | struct net_device_stats stats; | ||
322 | /* Frequently used and paired value: keep adjacent for cache effect. */ | 321 | /* Frequently used and paired value: keep adjacent for cache effect. */ |
323 | int chip_id, drv_flags; | 322 | int chip_id, drv_flags; |
324 | struct pci_dev *pci_dev; | 323 | struct pci_dev *pci_dev; |
@@ -353,7 +352,6 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); | |||
353 | static int yellowfin_rx(struct net_device *dev); | 352 | static int yellowfin_rx(struct net_device *dev); |
354 | static void yellowfin_error(struct net_device *dev, int intr_status); | 353 | static void yellowfin_error(struct net_device *dev, int intr_status); |
355 | static int yellowfin_close(struct net_device *dev); | 354 | static int yellowfin_close(struct net_device *dev); |
356 | static struct net_device_stats *yellowfin_get_stats(struct net_device *dev); | ||
357 | static void set_rx_mode(struct net_device *dev); | 355 | static void set_rx_mode(struct net_device *dev); |
358 | static const struct ethtool_ops ethtool_ops; | 356 | static const struct ethtool_ops ethtool_ops; |
359 | 357 | ||
@@ -469,7 +467,6 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev, | |||
469 | dev->open = &yellowfin_open; | 467 | dev->open = &yellowfin_open; |
470 | dev->hard_start_xmit = &yellowfin_start_xmit; | 468 | dev->hard_start_xmit = &yellowfin_start_xmit; |
471 | dev->stop = &yellowfin_close; | 469 | dev->stop = &yellowfin_close; |
472 | dev->get_stats = &yellowfin_get_stats; | ||
473 | dev->set_multicast_list = &set_rx_mode; | 470 | dev->set_multicast_list = &set_rx_mode; |
474 | dev->do_ioctl = &netdev_ioctl; | 471 | dev->do_ioctl = &netdev_ioctl; |
475 | SET_ETHTOOL_OPS(dev, ðtool_ops); | 472 | SET_ETHTOOL_OPS(dev, ðtool_ops); |
@@ -717,7 +714,7 @@ static void yellowfin_tx_timeout(struct net_device *dev) | |||
717 | netif_wake_queue (dev); /* Typical path */ | 714 | netif_wake_queue (dev); /* Typical path */ |
718 | 715 | ||
719 | dev->trans_start = jiffies; | 716 | dev->trans_start = jiffies; |
720 | yp->stats.tx_errors++; | 717 | dev->stats.tx_errors++; |
721 | } | 718 | } |
722 | 719 | ||
723 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ | 720 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ |
@@ -923,8 +920,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) | |||
923 | if (yp->tx_ring[entry].result_status == 0) | 920 | if (yp->tx_ring[entry].result_status == 0) |
924 | break; | 921 | break; |
925 | skb = yp->tx_skbuff[entry]; | 922 | skb = yp->tx_skbuff[entry]; |
926 | yp->stats.tx_packets++; | 923 | dev->stats.tx_packets++; |
927 | yp->stats.tx_bytes += skb->len; | 924 | dev->stats.tx_bytes += skb->len; |
928 | /* Free the original skb. */ | 925 | /* Free the original skb. */ |
929 | pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr, | 926 | pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr, |
930 | skb->len, PCI_DMA_TODEVICE); | 927 | skb->len, PCI_DMA_TODEVICE); |
@@ -968,20 +965,20 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) | |||
968 | printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n", | 965 | printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n", |
969 | dev->name, tx_errs); | 966 | dev->name, tx_errs); |
970 | #endif | 967 | #endif |
971 | yp->stats.tx_errors++; | 968 | dev->stats.tx_errors++; |
972 | if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++; | 969 | if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++; |
973 | if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++; | 970 | if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++; |
974 | if (tx_errs & 0x2000) yp->stats.tx_window_errors++; | 971 | if (tx_errs & 0x2000) dev->stats.tx_window_errors++; |
975 | if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++; | 972 | if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++; |
976 | } else { | 973 | } else { |
977 | #ifndef final_version | 974 | #ifndef final_version |
978 | if (yellowfin_debug > 4) | 975 | if (yellowfin_debug > 4) |
979 | printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n", | 976 | printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n", |
980 | dev->name, tx_errs); | 977 | dev->name, tx_errs); |
981 | #endif | 978 | #endif |
982 | yp->stats.tx_bytes += skb->len; | 979 | dev->stats.tx_bytes += skb->len; |
983 | yp->stats.collisions += tx_errs & 15; | 980 | dev->stats.collisions += tx_errs & 15; |
984 | yp->stats.tx_packets++; | 981 | dev->stats.tx_packets++; |
985 | } | 982 | } |
986 | /* Free the original skb. */ | 983 | /* Free the original skb. */ |
987 | pci_unmap_single(yp->pci_dev, | 984 | pci_unmap_single(yp->pci_dev, |
@@ -1076,26 +1073,26 @@ static int yellowfin_rx(struct net_device *dev) | |||
1076 | if (data_size != 0) | 1073 | if (data_size != 0) |
1077 | printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers," | 1074 | printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers," |
1078 | " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size); | 1075 | " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size); |
1079 | yp->stats.rx_length_errors++; | 1076 | dev->stats.rx_length_errors++; |
1080 | } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) { | 1077 | } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) { |
1081 | /* There was a error. */ | 1078 | /* There was a error. */ |
1082 | if (yellowfin_debug > 3) | 1079 | if (yellowfin_debug > 3) |
1083 | printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n", | 1080 | printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n", |
1084 | frame_status); | 1081 | frame_status); |
1085 | yp->stats.rx_errors++; | 1082 | dev->stats.rx_errors++; |
1086 | if (frame_status & 0x0060) yp->stats.rx_length_errors++; | 1083 | if (frame_status & 0x0060) dev->stats.rx_length_errors++; |
1087 | if (frame_status & 0x0008) yp->stats.rx_frame_errors++; | 1084 | if (frame_status & 0x0008) dev->stats.rx_frame_errors++; |
1088 | if (frame_status & 0x0010) yp->stats.rx_crc_errors++; | 1085 | if (frame_status & 0x0010) dev->stats.rx_crc_errors++; |
1089 | if (frame_status < 0) yp->stats.rx_dropped++; | 1086 | if (frame_status < 0) dev->stats.rx_dropped++; |
1090 | } else if ( !(yp->drv_flags & IsGigabit) && | 1087 | } else if ( !(yp->drv_flags & IsGigabit) && |
1091 | ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) { | 1088 | ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) { |
1092 | u8 status1 = buf_addr[data_size-2]; | 1089 | u8 status1 = buf_addr[data_size-2]; |
1093 | u8 status2 = buf_addr[data_size-1]; | 1090 | u8 status2 = buf_addr[data_size-1]; |
1094 | yp->stats.rx_errors++; | 1091 | dev->stats.rx_errors++; |
1095 | if (status1 & 0xC0) yp->stats.rx_length_errors++; | 1092 | if (status1 & 0xC0) dev->stats.rx_length_errors++; |
1096 | if (status2 & 0x03) yp->stats.rx_frame_errors++; | 1093 | if (status2 & 0x03) dev->stats.rx_frame_errors++; |
1097 | if (status2 & 0x04) yp->stats.rx_crc_errors++; | 1094 | if (status2 & 0x04) dev->stats.rx_crc_errors++; |
1098 | if (status2 & 0x80) yp->stats.rx_dropped++; | 1095 | if (status2 & 0x80) dev->stats.rx_dropped++; |
1099 | #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ | 1096 | #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ |
1100 | } else if ((yp->flags & HasMACAddrBug) && | 1097 | } else if ((yp->flags & HasMACAddrBug) && |
1101 | memcmp(le32_to_cpu(yp->rx_ring_dma + | 1098 | memcmp(le32_to_cpu(yp->rx_ring_dma + |
@@ -1145,8 +1142,8 @@ static int yellowfin_rx(struct net_device *dev) | |||
1145 | skb->protocol = eth_type_trans(skb, dev); | 1142 | skb->protocol = eth_type_trans(skb, dev); |
1146 | netif_rx(skb); | 1143 | netif_rx(skb); |
1147 | dev->last_rx = jiffies; | 1144 | dev->last_rx = jiffies; |
1148 | yp->stats.rx_packets++; | 1145 | dev->stats.rx_packets++; |
1149 | yp->stats.rx_bytes += pkt_len; | 1146 | dev->stats.rx_bytes += pkt_len; |
1150 | } | 1147 | } |
1151 | entry = (++yp->cur_rx) % RX_RING_SIZE; | 1148 | entry = (++yp->cur_rx) % RX_RING_SIZE; |
1152 | } | 1149 | } |
@@ -1180,15 +1177,13 @@ static int yellowfin_rx(struct net_device *dev) | |||
1180 | 1177 | ||
1181 | static void yellowfin_error(struct net_device *dev, int intr_status) | 1178 | static void yellowfin_error(struct net_device *dev, int intr_status) |
1182 | { | 1179 | { |
1183 | struct yellowfin_private *yp = netdev_priv(dev); | ||
1184 | |||
1185 | printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", | 1180 | printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", |
1186 | dev->name, intr_status); | 1181 | dev->name, intr_status); |
1187 | /* Hmmmmm, it's not clear what to do here. */ | 1182 | /* Hmmmmm, it's not clear what to do here. */ |
1188 | if (intr_status & (IntrTxPCIErr | IntrTxPCIFault)) | 1183 | if (intr_status & (IntrTxPCIErr | IntrTxPCIFault)) |
1189 | yp->stats.tx_errors++; | 1184 | dev->stats.tx_errors++; |
1190 | if (intr_status & (IntrRxPCIErr | IntrRxPCIFault)) | 1185 | if (intr_status & (IntrRxPCIErr | IntrRxPCIFault)) |
1191 | yp->stats.rx_errors++; | 1186 | dev->stats.rx_errors++; |
1192 | } | 1187 | } |
1193 | 1188 | ||
1194 | static int yellowfin_close(struct net_device *dev) | 1189 | static int yellowfin_close(struct net_device *dev) |
@@ -1280,12 +1275,6 @@ static int yellowfin_close(struct net_device *dev) | |||
1280 | return 0; | 1275 | return 0; |
1281 | } | 1276 | } |
1282 | 1277 | ||
1283 | static struct net_device_stats *yellowfin_get_stats(struct net_device *dev) | ||
1284 | { | ||
1285 | struct yellowfin_private *yp = netdev_priv(dev); | ||
1286 | return &yp->stats; | ||
1287 | } | ||
1288 | |||
1289 | /* Set or clear the multicast filter for this adaptor. */ | 1278 | /* Set or clear the multicast filter for this adaptor. */ |
1290 | 1279 | ||
1291 | static void set_rx_mode(struct net_device *dev) | 1280 | static void set_rx_mode(struct net_device *dev) |
diff --git a/drivers/net/znet.c b/drivers/net/znet.c index dcd4e1b136b5..43712c7b9ecf 100644 --- a/drivers/net/znet.c +++ b/drivers/net/znet.c | |||
@@ -128,7 +128,6 @@ MODULE_LICENSE("GPL"); | |||
128 | 128 | ||
129 | struct znet_private { | 129 | struct znet_private { |
130 | int rx_dma, tx_dma; | 130 | int rx_dma, tx_dma; |
131 | struct net_device_stats stats; | ||
132 | spinlock_t lock; | 131 | spinlock_t lock; |
133 | short sia_base, sia_size, io_size; | 132 | short sia_base, sia_size, io_size; |
134 | struct i82593_conf_block i593_init; | 133 | struct i82593_conf_block i593_init; |
@@ -161,7 +160,6 @@ static int znet_send_packet(struct sk_buff *skb, struct net_device *dev); | |||
161 | static irqreturn_t znet_interrupt(int irq, void *dev_id); | 160 | static irqreturn_t znet_interrupt(int irq, void *dev_id); |
162 | static void znet_rx(struct net_device *dev); | 161 | static void znet_rx(struct net_device *dev); |
163 | static int znet_close(struct net_device *dev); | 162 | static int znet_close(struct net_device *dev); |
164 | static struct net_device_stats *net_get_stats(struct net_device *dev); | ||
165 | static void hardware_init(struct net_device *dev); | 163 | static void hardware_init(struct net_device *dev); |
166 | static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset); | 164 | static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset); |
167 | static void znet_tx_timeout (struct net_device *dev); | 165 | static void znet_tx_timeout (struct net_device *dev); |
@@ -445,7 +443,6 @@ static int __init znet_probe (void) | |||
445 | dev->open = &znet_open; | 443 | dev->open = &znet_open; |
446 | dev->hard_start_xmit = &znet_send_packet; | 444 | dev->hard_start_xmit = &znet_send_packet; |
447 | dev->stop = &znet_close; | 445 | dev->stop = &znet_close; |
448 | dev->get_stats = net_get_stats; | ||
449 | dev->set_multicast_list = &znet_set_multicast_list; | 446 | dev->set_multicast_list = &znet_set_multicast_list; |
450 | dev->tx_timeout = znet_tx_timeout; | 447 | dev->tx_timeout = znet_tx_timeout; |
451 | dev->watchdog_timeo = TX_TIMEOUT; | 448 | dev->watchdog_timeo = TX_TIMEOUT; |
@@ -564,7 +561,7 @@ static int znet_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
564 | ushort *tx_link = znet->tx_cur - 1; | 561 | ushort *tx_link = znet->tx_cur - 1; |
565 | ushort rnd_len = (length + 1)>>1; | 562 | ushort rnd_len = (length + 1)>>1; |
566 | 563 | ||
567 | znet->stats.tx_bytes+=length; | 564 | dev->stats.tx_bytes+=length; |
568 | 565 | ||
569 | if (znet->tx_cur >= znet->tx_end) | 566 | if (znet->tx_cur >= znet->tx_end) |
570 | znet->tx_cur = znet->tx_start; | 567 | znet->tx_cur = znet->tx_start; |
@@ -639,20 +636,20 @@ static irqreturn_t znet_interrupt(int irq, void *dev_id) | |||
639 | tx_status = inw(ioaddr); | 636 | tx_status = inw(ioaddr); |
640 | /* It's undocumented, but tx_status seems to match the i82586. */ | 637 | /* It's undocumented, but tx_status seems to match the i82586. */ |
641 | if (tx_status & TX_OK) { | 638 | if (tx_status & TX_OK) { |
642 | znet->stats.tx_packets++; | 639 | dev->stats.tx_packets++; |
643 | znet->stats.collisions += tx_status & TX_NCOL_MASK; | 640 | dev->stats.collisions += tx_status & TX_NCOL_MASK; |
644 | } else { | 641 | } else { |
645 | if (tx_status & (TX_LOST_CTS | TX_LOST_CRS)) | 642 | if (tx_status & (TX_LOST_CTS | TX_LOST_CRS)) |
646 | znet->stats.tx_carrier_errors++; | 643 | dev->stats.tx_carrier_errors++; |
647 | if (tx_status & TX_UND_RUN) | 644 | if (tx_status & TX_UND_RUN) |
648 | znet->stats.tx_fifo_errors++; | 645 | dev->stats.tx_fifo_errors++; |
649 | if (!(tx_status & TX_HRT_BEAT)) | 646 | if (!(tx_status & TX_HRT_BEAT)) |
650 | znet->stats.tx_heartbeat_errors++; | 647 | dev->stats.tx_heartbeat_errors++; |
651 | if (tx_status & TX_MAX_COL) | 648 | if (tx_status & TX_MAX_COL) |
652 | znet->stats.tx_aborted_errors++; | 649 | dev->stats.tx_aborted_errors++; |
653 | /* ...and the catch-all. */ | 650 | /* ...and the catch-all. */ |
654 | if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) | 651 | if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) |
655 | znet->stats.tx_errors++; | 652 | dev->stats.tx_errors++; |
656 | 653 | ||
657 | /* Transceiver may be stuck if cable | 654 | /* Transceiver may be stuck if cable |
658 | * was removed while emiting a | 655 | * was removed while emiting a |
@@ -748,19 +745,19 @@ static void znet_rx(struct net_device *dev) | |||
748 | this_rfp_ptr[-3]<<1); | 745 | this_rfp_ptr[-3]<<1); |
749 | /* Once again we must assume that the i82586 docs apply. */ | 746 | /* Once again we must assume that the i82586 docs apply. */ |
750 | if ( ! (status & RX_RCV_OK)) { /* There was an error. */ | 747 | if ( ! (status & RX_RCV_OK)) { /* There was an error. */ |
751 | znet->stats.rx_errors++; | 748 | dev->stats.rx_errors++; |
752 | if (status & RX_CRC_ERR) znet->stats.rx_crc_errors++; | 749 | if (status & RX_CRC_ERR) dev->stats.rx_crc_errors++; |
753 | if (status & RX_ALG_ERR) znet->stats.rx_frame_errors++; | 750 | if (status & RX_ALG_ERR) dev->stats.rx_frame_errors++; |
754 | #if 0 | 751 | #if 0 |
755 | if (status & 0x0200) znet->stats.rx_over_errors++; /* Wrong. */ | 752 | if (status & 0x0200) dev->stats.rx_over_errors++; /* Wrong. */ |
756 | if (status & 0x0100) znet->stats.rx_fifo_errors++; | 753 | if (status & 0x0100) dev->stats.rx_fifo_errors++; |
757 | #else | 754 | #else |
758 | /* maz : Wild guess... */ | 755 | /* maz : Wild guess... */ |
759 | if (status & RX_OVRRUN) znet->stats.rx_over_errors++; | 756 | if (status & RX_OVRRUN) dev->stats.rx_over_errors++; |
760 | #endif | 757 | #endif |
761 | if (status & RX_SRT_FRM) znet->stats.rx_length_errors++; | 758 | if (status & RX_SRT_FRM) dev->stats.rx_length_errors++; |
762 | } else if (pkt_len > 1536) { | 759 | } else if (pkt_len > 1536) { |
763 | znet->stats.rx_length_errors++; | 760 | dev->stats.rx_length_errors++; |
764 | } else { | 761 | } else { |
765 | /* Malloc up new buffer. */ | 762 | /* Malloc up new buffer. */ |
766 | struct sk_buff *skb; | 763 | struct sk_buff *skb; |
@@ -769,7 +766,7 @@ static void znet_rx(struct net_device *dev) | |||
769 | if (skb == NULL) { | 766 | if (skb == NULL) { |
770 | if (znet_debug) | 767 | if (znet_debug) |
771 | printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); | 768 | printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); |
772 | znet->stats.rx_dropped++; | 769 | dev->stats.rx_dropped++; |
773 | break; | 770 | break; |
774 | } | 771 | } |
775 | 772 | ||
@@ -789,8 +786,8 @@ static void znet_rx(struct net_device *dev) | |||
789 | skb->protocol=eth_type_trans(skb,dev); | 786 | skb->protocol=eth_type_trans(skb,dev); |
790 | netif_rx(skb); | 787 | netif_rx(skb); |
791 | dev->last_rx = jiffies; | 788 | dev->last_rx = jiffies; |
792 | znet->stats.rx_packets++; | 789 | dev->stats.rx_packets++; |
793 | znet->stats.rx_bytes += pkt_len; | 790 | dev->stats.rx_bytes += pkt_len; |
794 | } | 791 | } |
795 | znet->rx_cur = this_rfp_ptr; | 792 | znet->rx_cur = this_rfp_ptr; |
796 | if (znet->rx_cur >= znet->rx_end) | 793 | if (znet->rx_cur >= znet->rx_end) |
@@ -827,15 +824,6 @@ static int znet_close(struct net_device *dev) | |||
827 | return 0; | 824 | return 0; |
828 | } | 825 | } |
829 | 826 | ||
830 | /* Get the current statistics. This may be called with the card open or | ||
831 | closed. */ | ||
832 | static struct net_device_stats *net_get_stats(struct net_device *dev) | ||
833 | { | ||
834 | struct znet_private *znet = dev->priv; | ||
835 | |||
836 | return &znet->stats; | ||
837 | } | ||
838 | |||
839 | static void show_dma(struct net_device *dev) | 827 | static void show_dma(struct net_device *dev) |
840 | { | 828 | { |
841 | short ioaddr = dev->base_addr; | 829 | short ioaddr = dev->base_addr; |
diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h index b68752fdc5c4..79c4f268410d 100644 --- a/include/linux/if_eql.h +++ b/include/linux/if_eql.h | |||
@@ -58,7 +58,6 @@ typedef struct equalizer { | |||
58 | slave_queue_t queue; | 58 | slave_queue_t queue; |
59 | int min_slaves; | 59 | int min_slaves; |
60 | int max_slaves; | 60 | int max_slaves; |
61 | struct net_device_stats stats; | ||
62 | struct timer_list timer; | 61 | struct timer_list timer; |
63 | } equalizer_t; | 62 | } equalizer_t; |
64 | 63 | ||
diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h index 68c896a36a34..51574743aa1b 100644 --- a/include/linux/if_shaper.h +++ b/include/linux/if_shaper.h | |||
@@ -24,7 +24,6 @@ struct shaper | |||
24 | unsigned long recovery; /* Time we can next clock a packet out on | 24 | unsigned long recovery; /* Time we can next clock a packet out on |
25 | an empty queue */ | 25 | an empty queue */ |
26 | spinlock_t lock; | 26 | spinlock_t lock; |
27 | struct net_device_stats stats; | ||
28 | struct net_device *dev; | 27 | struct net_device *dev; |
29 | int (*hard_start_xmit) (struct sk_buff *skb, | 28 | int (*hard_start_xmit) (struct sk_buff *skb, |
30 | struct net_device *dev); | 29 | struct net_device *dev); |
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 42eb6945b93e..33e489d5bb33 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h | |||
@@ -42,7 +42,6 @@ struct tun_struct { | |||
42 | struct sk_buff_head readq; | 42 | struct sk_buff_head readq; |
43 | 43 | ||
44 | struct net_device *dev; | 44 | struct net_device *dev; |
45 | struct net_device_stats stats; | ||
46 | 45 | ||
47 | struct fasync_struct *fasync; | 46 | struct fasync_struct *fasync; |
48 | 47 | ||