diff options
Diffstat (limited to 'drivers/net/lib8390.c')
| -rw-r--r-- | drivers/net/lib8390.c | 100 |
1 files changed, 51 insertions, 49 deletions
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c index 0c5447dac03b..00d59ab2f8ac 100644 --- a/drivers/net/lib8390.c +++ b/drivers/net/lib8390.c | |||
| @@ -150,19 +150,19 @@ static void __NS8390_init(struct net_device *dev, int startp); | |||
| 150 | * card means that approach caused horrible problems like losing serial data | 150 | * card means that approach caused horrible problems like losing serial data |
| 151 | * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA | 151 | * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA |
| 152 | * chips with FPGA front ends. | 152 | * chips with FPGA front ends. |
| 153 | * | 153 | * |
| 154 | * Ok the logic behind the 8390 is very simple: | 154 | * Ok the logic behind the 8390 is very simple: |
| 155 | * | 155 | * |
| 156 | * Things to know | 156 | * Things to know |
| 157 | * - IRQ delivery is asynchronous to the PCI bus | 157 | * - IRQ delivery is asynchronous to the PCI bus |
| 158 | * - Blocking the local CPU IRQ via spin locks was too slow | 158 | * - Blocking the local CPU IRQ via spin locks was too slow |
| 159 | * - The chip has register windows needing locking work | 159 | * - The chip has register windows needing locking work |
| 160 | * | 160 | * |
| 161 | * So the path was once (I say once as people appear to have changed it | 161 | * So the path was once (I say once as people appear to have changed it |
| 162 | * in the mean time and it now looks rather bogus if the changes to use | 162 | * in the mean time and it now looks rather bogus if the changes to use |
| 163 | * disable_irq_nosync_irqsave are disabling the local IRQ) | 163 | * disable_irq_nosync_irqsave are disabling the local IRQ) |
| 164 | * | 164 | * |
| 165 | * | 165 | * |
| 166 | * Take the page lock | 166 | * Take the page lock |
| 167 | * Mask the IRQ on chip | 167 | * Mask the IRQ on chip |
| 168 | * Disable the IRQ (but not mask locally- someone seems to have | 168 | * Disable the IRQ (but not mask locally- someone seems to have |
| @@ -170,22 +170,22 @@ static void __NS8390_init(struct net_device *dev, int startp); | |||
| 170 | * [This must be _nosync as the page lock may otherwise | 170 | * [This must be _nosync as the page lock may otherwise |
| 171 | * deadlock us] | 171 | * deadlock us] |
| 172 | * Drop the page lock and turn IRQs back on | 172 | * Drop the page lock and turn IRQs back on |
| 173 | * | 173 | * |
| 174 | * At this point an existing IRQ may still be running but we can't | 174 | * At this point an existing IRQ may still be running but we can't |
| 175 | * get a new one | 175 | * get a new one |
| 176 | * | 176 | * |
| 177 | * Take the lock (so we know the IRQ has terminated) but don't mask | 177 | * Take the lock (so we know the IRQ has terminated) but don't mask |
| 178 | * the IRQs on the processor | 178 | * the IRQs on the processor |
| 179 | * Set irqlock [for debug] | 179 | * Set irqlock [for debug] |
| 180 | * | 180 | * |
| 181 | * Transmit (slow as ****) | 181 | * Transmit (slow as ****) |
| 182 | * | 182 | * |
| 183 | * re-enable the IRQ | 183 | * re-enable the IRQ |
| 184 | * | 184 | * |
| 185 | * | 185 | * |
| 186 | * We have to use disable_irq because otherwise you will get delayed | 186 | * We have to use disable_irq because otherwise you will get delayed |
| 187 | * interrupts on the APIC bus deadlocking the transmit path. | 187 | * interrupts on the APIC bus deadlocking the transmit path. |
| 188 | * | 188 | * |
| 189 | * Quite hairy but the chip simply wasn't designed for SMP and you can't | 189 | * Quite hairy but the chip simply wasn't designed for SMP and you can't |
| 190 | * even ACK an interrupt without risking corrupting other parallel | 190 | * even ACK an interrupt without risking corrupting other parallel |
| 191 | * activities on the chip." [lkml, 25 Jul 2007] | 191 | * activities on the chip." [lkml, 25 Jul 2007] |
| @@ -265,7 +265,7 @@ static void ei_tx_timeout(struct net_device *dev) | |||
| 265 | int txsr, isr, tickssofar = jiffies - dev->trans_start; | 265 | int txsr, isr, tickssofar = jiffies - dev->trans_start; |
| 266 | unsigned long flags; | 266 | unsigned long flags; |
| 267 | 267 | ||
| 268 | ei_local->stat.tx_errors++; | 268 | dev->stats.tx_errors++; |
| 269 | 269 | ||
| 270 | spin_lock_irqsave(&ei_local->page_lock, flags); | 270 | spin_lock_irqsave(&ei_local->page_lock, flags); |
| 271 | txsr = ei_inb(e8390_base+EN0_TSR); | 271 | txsr = ei_inb(e8390_base+EN0_TSR); |
| @@ -276,7 +276,7 @@ static void ei_tx_timeout(struct net_device *dev) | |||
| 276 | dev->name, (txsr & ENTSR_ABT) ? "excess collisions." : | 276 | dev->name, (txsr & ENTSR_ABT) ? "excess collisions." : |
| 277 | (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); | 277 | (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); |
| 278 | 278 | ||
| 279 | if (!isr && !ei_local->stat.tx_packets) | 279 | if (!isr && !dev->stats.tx_packets) |
| 280 | { | 280 | { |
| 281 | /* The 8390 probably hasn't gotten on the cable yet. */ | 281 | /* The 8390 probably hasn't gotten on the cable yet. */ |
| 282 | ei_local->interface_num ^= 1; /* Try a different xcvr. */ | 282 | ei_local->interface_num ^= 1; /* Try a different xcvr. */ |
| @@ -374,7 +374,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 374 | ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); | 374 | ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); |
| 375 | spin_unlock(&ei_local->page_lock); | 375 | spin_unlock(&ei_local->page_lock); |
| 376 | enable_irq_lockdep_irqrestore(dev->irq, &flags); | 376 | enable_irq_lockdep_irqrestore(dev->irq, &flags); |
| 377 | ei_local->stat.tx_errors++; | 377 | dev->stats.tx_errors++; |
| 378 | return 1; | 378 | return 1; |
| 379 | } | 379 | } |
| 380 | 380 | ||
| @@ -417,7 +417,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 417 | enable_irq_lockdep_irqrestore(dev->irq, &flags); | 417 | enable_irq_lockdep_irqrestore(dev->irq, &flags); |
| 418 | 418 | ||
| 419 | dev_kfree_skb (skb); | 419 | dev_kfree_skb (skb); |
| 420 | ei_local->stat.tx_bytes += send_length; | 420 | dev->stats.tx_bytes += send_length; |
| 421 | 421 | ||
| 422 | return 0; | 422 | return 0; |
| 423 | } | 423 | } |
| @@ -493,9 +493,9 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id) | |||
| 493 | 493 | ||
| 494 | if (interrupts & ENISR_COUNTERS) | 494 | if (interrupts & ENISR_COUNTERS) |
| 495 | { | 495 | { |
| 496 | ei_local->stat.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0); | 496 | dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0); |
| 497 | ei_local->stat.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1); | 497 | dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1); |
| 498 | ei_local->stat.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2); | 498 | dev->stats.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2); |
| 499 | ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */ | 499 | ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */ |
| 500 | } | 500 | } |
| 501 | 501 | ||
| @@ -553,7 +553,8 @@ static void __ei_poll(struct net_device *dev) | |||
| 553 | static void ei_tx_err(struct net_device *dev) | 553 | static void ei_tx_err(struct net_device *dev) |
| 554 | { | 554 | { |
| 555 | unsigned long e8390_base = dev->base_addr; | 555 | unsigned long e8390_base = dev->base_addr; |
| 556 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | 556 | /* ei_local is used on some platforms via the EI_SHIFT macro */ |
| 557 | struct ei_device *ei_local __maybe_unused = netdev_priv(dev); | ||
| 557 | unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR); | 558 | unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR); |
| 558 | unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); | 559 | unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); |
| 559 | 560 | ||
| @@ -578,10 +579,10 @@ static void ei_tx_err(struct net_device *dev) | |||
| 578 | ei_tx_intr(dev); | 579 | ei_tx_intr(dev); |
| 579 | else | 580 | else |
| 580 | { | 581 | { |
| 581 | ei_local->stat.tx_errors++; | 582 | dev->stats.tx_errors++; |
| 582 | if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++; | 583 | if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++; |
| 583 | if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++; | 584 | if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++; |
| 584 | if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++; | 585 | if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++; |
| 585 | } | 586 | } |
| 586 | } | 587 | } |
| 587 | 588 | ||
| @@ -645,25 +646,25 @@ static void ei_tx_intr(struct net_device *dev) | |||
| 645 | 646 | ||
| 646 | /* Minimize Tx latency: update the statistics after we restart TXing. */ | 647 | /* Minimize Tx latency: update the statistics after we restart TXing. */ |
| 647 | if (status & ENTSR_COL) | 648 | if (status & ENTSR_COL) |
| 648 | ei_local->stat.collisions++; | 649 | dev->stats.collisions++; |
| 649 | if (status & ENTSR_PTX) | 650 | if (status & ENTSR_PTX) |
| 650 | ei_local->stat.tx_packets++; | 651 | dev->stats.tx_packets++; |
| 651 | else | 652 | else |
| 652 | { | 653 | { |
| 653 | ei_local->stat.tx_errors++; | 654 | dev->stats.tx_errors++; |
| 654 | if (status & ENTSR_ABT) | 655 | if (status & ENTSR_ABT) |
| 655 | { | 656 | { |
| 656 | ei_local->stat.tx_aborted_errors++; | 657 | dev->stats.tx_aborted_errors++; |
| 657 | ei_local->stat.collisions += 16; | 658 | dev->stats.collisions += 16; |
| 658 | } | 659 | } |
| 659 | if (status & ENTSR_CRS) | 660 | if (status & ENTSR_CRS) |
| 660 | ei_local->stat.tx_carrier_errors++; | 661 | dev->stats.tx_carrier_errors++; |
| 661 | if (status & ENTSR_FU) | 662 | if (status & ENTSR_FU) |
| 662 | ei_local->stat.tx_fifo_errors++; | 663 | dev->stats.tx_fifo_errors++; |
| 663 | if (status & ENTSR_CDH) | 664 | if (status & ENTSR_CDH) |
| 664 | ei_local->stat.tx_heartbeat_errors++; | 665 | dev->stats.tx_heartbeat_errors++; |
| 665 | if (status & ENTSR_OWC) | 666 | if (status & ENTSR_OWC) |
| 666 | ei_local->stat.tx_window_errors++; | 667 | dev->stats.tx_window_errors++; |
| 667 | } | 668 | } |
| 668 | netif_wake_queue(dev); | 669 | netif_wake_queue(dev); |
| 669 | } | 670 | } |
| @@ -730,7 +731,7 @@ static void ei_receive(struct net_device *dev) | |||
| 730 | && rx_frame.next != next_frame + 1 - num_rx_pages) { | 731 | && rx_frame.next != next_frame + 1 - num_rx_pages) { |
| 731 | ei_local->current_page = rxing_page; | 732 | ei_local->current_page = rxing_page; |
| 732 | ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY); | 733 | ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY); |
| 733 | ei_local->stat.rx_errors++; | 734 | dev->stats.rx_errors++; |
| 734 | continue; | 735 | continue; |
| 735 | } | 736 | } |
| 736 | 737 | ||
| @@ -740,8 +741,8 @@ static void ei_receive(struct net_device *dev) | |||
| 740 | printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n", | 741 | printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n", |
| 741 | dev->name, rx_frame.count, rx_frame.status, | 742 | dev->name, rx_frame.count, rx_frame.status, |
| 742 | rx_frame.next); | 743 | rx_frame.next); |
| 743 | ei_local->stat.rx_errors++; | 744 | dev->stats.rx_errors++; |
| 744 | ei_local->stat.rx_length_errors++; | 745 | dev->stats.rx_length_errors++; |
| 745 | } | 746 | } |
| 746 | else if ((pkt_stat & 0x0F) == ENRSR_RXOK) | 747 | else if ((pkt_stat & 0x0F) == ENRSR_RXOK) |
| 747 | { | 748 | { |
| @@ -753,7 +754,7 @@ static void ei_receive(struct net_device *dev) | |||
| 753 | if (ei_debug > 1) | 754 | if (ei_debug > 1) |
| 754 | printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n", | 755 | printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n", |
| 755 | dev->name, pkt_len); | 756 | dev->name, pkt_len); |
| 756 | ei_local->stat.rx_dropped++; | 757 | dev->stats.rx_dropped++; |
| 757 | break; | 758 | break; |
| 758 | } | 759 | } |
| 759 | else | 760 | else |
| @@ -764,10 +765,10 @@ static void ei_receive(struct net_device *dev) | |||
| 764 | skb->protocol=eth_type_trans(skb,dev); | 765 | skb->protocol=eth_type_trans(skb,dev); |
| 765 | netif_rx(skb); | 766 | netif_rx(skb); |
| 766 | dev->last_rx = jiffies; | 767 | dev->last_rx = jiffies; |
| 767 | ei_local->stat.rx_packets++; | 768 | dev->stats.rx_packets++; |
| 768 | ei_local->stat.rx_bytes += pkt_len; | 769 | dev->stats.rx_bytes += pkt_len; |
| 769 | if (pkt_stat & ENRSR_PHY) | 770 | if (pkt_stat & ENRSR_PHY) |
| 770 | ei_local->stat.multicast++; | 771 | dev->stats.multicast++; |
| 771 | } | 772 | } |
| 772 | } | 773 | } |
| 773 | else | 774 | else |
| @@ -776,10 +777,10 @@ static void ei_receive(struct net_device *dev) | |||
| 776 | printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n", | 777 | printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n", |
| 777 | dev->name, rx_frame.status, rx_frame.next, | 778 | dev->name, rx_frame.status, rx_frame.next, |
| 778 | rx_frame.count); | 779 | rx_frame.count); |
| 779 | ei_local->stat.rx_errors++; | 780 | dev->stats.rx_errors++; |
| 780 | /* NB: The NIC counts CRC, frame and missed errors. */ | 781 | /* NB: The NIC counts CRC, frame and missed errors. */ |
| 781 | if (pkt_stat & ENRSR_FO) | 782 | if (pkt_stat & ENRSR_FO) |
| 782 | ei_local->stat.rx_fifo_errors++; | 783 | dev->stats.rx_fifo_errors++; |
| 783 | } | 784 | } |
| 784 | next_frame = rx_frame.next; | 785 | next_frame = rx_frame.next; |
| 785 | 786 | ||
| @@ -816,7 +817,8 @@ static void ei_rx_overrun(struct net_device *dev) | |||
| 816 | { | 817 | { |
| 817 | unsigned long e8390_base = dev->base_addr; | 818 | unsigned long e8390_base = dev->base_addr; |
| 818 | unsigned char was_txing, must_resend = 0; | 819 | unsigned char was_txing, must_resend = 0; |
| 819 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | 820 | /* ei_local is used on some platforms via the EI_SHIFT macro */ |
| 821 | struct ei_device *ei_local __maybe_unused = netdev_priv(dev); | ||
| 820 | 822 | ||
| 821 | /* | 823 | /* |
| 822 | * Record whether a Tx was in progress and then issue the | 824 | * Record whether a Tx was in progress and then issue the |
| @@ -827,7 +829,7 @@ static void ei_rx_overrun(struct net_device *dev) | |||
| 827 | 829 | ||
| 828 | if (ei_debug > 1) | 830 | if (ei_debug > 1) |
| 829 | printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); | 831 | printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); |
| 830 | ei_local->stat.rx_over_errors++; | 832 | dev->stats.rx_over_errors++; |
| 831 | 833 | ||
| 832 | /* | 834 | /* |
| 833 | * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. | 835 | * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. |
| @@ -889,16 +891,16 @@ static struct net_device_stats *get_stats(struct net_device *dev) | |||
| 889 | 891 | ||
| 890 | /* If the card is stopped, just return the present stats. */ | 892 | /* If the card is stopped, just return the present stats. */ |
| 891 | if (!netif_running(dev)) | 893 | if (!netif_running(dev)) |
| 892 | return &ei_local->stat; | 894 | return &dev->stats; |
| 893 | 895 | ||
| 894 | spin_lock_irqsave(&ei_local->page_lock,flags); | 896 | spin_lock_irqsave(&ei_local->page_lock,flags); |
| 895 | /* Read the counter registers, assuming we are in page 0. */ | 897 | /* Read the counter registers, assuming we are in page 0. */ |
| 896 | ei_local->stat.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0); | 898 | dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0); |
| 897 | ei_local->stat.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1); | 899 | dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1); |
| 898 | ei_local->stat.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2); | 900 | dev->stats.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2); |
| 899 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | 901 | spin_unlock_irqrestore(&ei_local->page_lock, flags); |
| 900 | 902 | ||
| 901 | return &ei_local->stat; | 903 | return &dev->stats; |
| 902 | } | 904 | } |
| 903 | 905 | ||
| 904 | /* | 906 | /* |
