diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-06-01 03:17:10 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-06-03 01:06:42 -0400 |
commit | fe09bb619096a0aa139210748ddc668c2dbe2308 (patch) | |
tree | 6bc8488ee4941aeae05faf75d6522b491b4a24f1 /drivers/net/sungem.c | |
parent | 6f92c66f7190b1677ea666249b72298723392115 (diff) |
sungem: Spring cleaning and GRO support
This patch simplifies the logic and locking in sungem significantly:
- LLTX is gone, all private locks are gone, mutex is gone
- We don't poll the PHY while the interface is down
- The above allowed me to get rid of a pile of state flags
using the proper interface state provided by the networking
stack when needed and overall simplify the driver a lot
- Allocate the bulk of RX skbs at init time using GFP_KERNEL
- Fix a bug where the dev->features were set after register_netdev()
- Added GRO while at it
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sungem.c')
-rw-r--r-- | drivers/net/sungem.c | 889 |
1 files changed, 373 insertions, 516 deletions
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index ab5930099267..f0bcbe4bce4a 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -10,25 +10,6 @@ | |||
10 | * NAPI and NETPOLL support | 10 | * NAPI and NETPOLL support |
11 | * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) | 11 | * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) |
12 | * | 12 | * |
13 | * TODO: | ||
14 | * - Now that the driver was significantly simplified, I need to rework | ||
15 | * the locking. I'm sure we don't need _2_ spinlocks, and we probably | ||
16 | * can avoid taking most of them for so long period of time (and schedule | ||
17 | * instead). The main issues at this point are caused by the netdev layer | ||
18 | * though: | ||
19 | * | ||
20 | * gem_change_mtu() and gem_set_multicast() are called with a read_lock() | ||
21 | * help by net/core/dev.c, thus they can't schedule. That means they can't | ||
22 | * call napi_disable() neither, thus force gem_poll() to keep a spinlock | ||
23 | * where it could have been dropped. change_mtu especially would love also to | ||
24 | * be able to msleep instead of horrid locked delays when resetting the HW, | ||
25 | * but that read_lock() makes it impossible, unless I defer it's action to | ||
26 | * the reset task, which means it'll be asynchronous (won't take effect until | ||
27 | * the system schedules a bit). | ||
28 | * | ||
29 | * Also, it would probably be possible to also remove most of the long-life | ||
30 | * locking in open/resume code path (gem_reinit_chip) by beeing more careful | ||
31 | * about when we can start taking interrupts or get xmit() called... | ||
32 | */ | 13 | */ |
33 | 14 | ||
34 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
@@ -57,7 +38,6 @@ | |||
57 | #include <linux/workqueue.h> | 38 | #include <linux/workqueue.h> |
58 | #include <linux/if_vlan.h> | 39 | #include <linux/if_vlan.h> |
59 | #include <linux/bitops.h> | 40 | #include <linux/bitops.h> |
60 | #include <linux/mutex.h> | ||
61 | #include <linux/mm.h> | 41 | #include <linux/mm.h> |
62 | #include <linux/gfp.h> | 42 | #include <linux/gfp.h> |
63 | 43 | ||
@@ -95,12 +75,11 @@ | |||
95 | SUPPORTED_Pause | SUPPORTED_Autoneg) | 75 | SUPPORTED_Pause | SUPPORTED_Autoneg) |
96 | 76 | ||
97 | #define DRV_NAME "sungem" | 77 | #define DRV_NAME "sungem" |
98 | #define DRV_VERSION "0.98" | 78 | #define DRV_VERSION "1.0" |
99 | #define DRV_RELDATE "8/24/03" | 79 | #define DRV_AUTHOR "David S. Miller <davem@redhat.com>" |
100 | #define DRV_AUTHOR "David S. Miller (davem@redhat.com)" | ||
101 | 80 | ||
102 | static char version[] __devinitdata = | 81 | static char version[] __devinitdata = |
103 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; | 82 | DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n"; |
104 | 83 | ||
105 | MODULE_AUTHOR(DRV_AUTHOR); | 84 | MODULE_AUTHOR(DRV_AUTHOR); |
106 | MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); | 85 | MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); |
@@ -218,6 +197,7 @@ static inline void gem_disable_ints(struct gem *gp) | |||
218 | { | 197 | { |
219 | /* Disable all interrupts, including TXDONE */ | 198 | /* Disable all interrupts, including TXDONE */ |
220 | writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); | 199 | writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); |
200 | (void)readl(gp->regs + GREG_IMASK); /* write posting */ | ||
221 | } | 201 | } |
222 | 202 | ||
223 | static void gem_get_cell(struct gem *gp) | 203 | static void gem_get_cell(struct gem *gp) |
@@ -247,6 +227,29 @@ static void gem_put_cell(struct gem *gp) | |||
247 | #endif /* CONFIG_PPC_PMAC */ | 227 | #endif /* CONFIG_PPC_PMAC */ |
248 | } | 228 | } |
249 | 229 | ||
230 | static inline void gem_netif_stop(struct gem *gp) | ||
231 | { | ||
232 | gp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
233 | napi_disable(&gp->napi); | ||
234 | netif_tx_disable(gp->dev); | ||
235 | } | ||
236 | |||
237 | static inline void gem_netif_start(struct gem *gp) | ||
238 | { | ||
239 | /* NOTE: unconditional netif_wake_queue is only | ||
240 | * appropriate so long as all callers are assured to | ||
241 | * have free tx slots. | ||
242 | */ | ||
243 | netif_wake_queue(gp->dev); | ||
244 | napi_enable(&gp->napi); | ||
245 | } | ||
246 | |||
247 | static void gem_schedule_reset(struct gem *gp) | ||
248 | { | ||
249 | gp->reset_task_pending = 1; | ||
250 | schedule_work(&gp->reset_task); | ||
251 | } | ||
252 | |||
250 | static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) | 253 | static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) |
251 | { | 254 | { |
252 | if (netif_msg_intr(gp)) | 255 | if (netif_msg_intr(gp)) |
@@ -604,56 +607,46 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat | |||
604 | gp->dev->name); | 607 | gp->dev->name); |
605 | dev->stats.rx_errors++; | 608 | dev->stats.rx_errors++; |
606 | 609 | ||
607 | goto do_reset; | 610 | return 1; |
608 | } | 611 | } |
609 | 612 | ||
610 | if (gem_status & GREG_STAT_PCS) { | 613 | if (gem_status & GREG_STAT_PCS) { |
611 | if (gem_pcs_interrupt(dev, gp, gem_status)) | 614 | if (gem_pcs_interrupt(dev, gp, gem_status)) |
612 | goto do_reset; | 615 | return 1; |
613 | } | 616 | } |
614 | 617 | ||
615 | if (gem_status & GREG_STAT_TXMAC) { | 618 | if (gem_status & GREG_STAT_TXMAC) { |
616 | if (gem_txmac_interrupt(dev, gp, gem_status)) | 619 | if (gem_txmac_interrupt(dev, gp, gem_status)) |
617 | goto do_reset; | 620 | return 1; |
618 | } | 621 | } |
619 | 622 | ||
620 | if (gem_status & GREG_STAT_RXMAC) { | 623 | if (gem_status & GREG_STAT_RXMAC) { |
621 | if (gem_rxmac_interrupt(dev, gp, gem_status)) | 624 | if (gem_rxmac_interrupt(dev, gp, gem_status)) |
622 | goto do_reset; | 625 | return 1; |
623 | } | 626 | } |
624 | 627 | ||
625 | if (gem_status & GREG_STAT_MAC) { | 628 | if (gem_status & GREG_STAT_MAC) { |
626 | if (gem_mac_interrupt(dev, gp, gem_status)) | 629 | if (gem_mac_interrupt(dev, gp, gem_status)) |
627 | goto do_reset; | 630 | return 1; |
628 | } | 631 | } |
629 | 632 | ||
630 | if (gem_status & GREG_STAT_MIF) { | 633 | if (gem_status & GREG_STAT_MIF) { |
631 | if (gem_mif_interrupt(dev, gp, gem_status)) | 634 | if (gem_mif_interrupt(dev, gp, gem_status)) |
632 | goto do_reset; | 635 | return 1; |
633 | } | 636 | } |
634 | 637 | ||
635 | if (gem_status & GREG_STAT_PCIERR) { | 638 | if (gem_status & GREG_STAT_PCIERR) { |
636 | if (gem_pci_interrupt(dev, gp, gem_status)) | 639 | if (gem_pci_interrupt(dev, gp, gem_status)) |
637 | goto do_reset; | 640 | return 1; |
638 | } | 641 | } |
639 | 642 | ||
640 | return 0; | 643 | return 0; |
641 | |||
642 | do_reset: | ||
643 | gp->reset_task_pending = 1; | ||
644 | schedule_work(&gp->reset_task); | ||
645 | |||
646 | return 1; | ||
647 | } | 644 | } |
648 | 645 | ||
649 | static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) | 646 | static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) |
650 | { | 647 | { |
651 | int entry, limit; | 648 | int entry, limit; |
652 | 649 | ||
653 | if (netif_msg_intr(gp)) | ||
654 | printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n", | ||
655 | gp->dev->name, gem_status); | ||
656 | |||
657 | entry = gp->tx_old; | 650 | entry = gp->tx_old; |
658 | limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); | 651 | limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); |
659 | while (entry != limit) { | 652 | while (entry != limit) { |
@@ -697,13 +690,27 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st | |||
697 | } | 690 | } |
698 | 691 | ||
699 | dev->stats.tx_packets++; | 692 | dev->stats.tx_packets++; |
700 | dev_kfree_skb_irq(skb); | 693 | dev_kfree_skb(skb); |
701 | } | 694 | } |
702 | gp->tx_old = entry; | 695 | gp->tx_old = entry; |
703 | 696 | ||
704 | if (netif_queue_stopped(dev) && | 697 | /* Need to make the tx_old update visible to gem_start_xmit() |
705 | TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) | 698 | * before checking for netif_queue_stopped(). Without the |
706 | netif_wake_queue(dev); | 699 | * memory barrier, there is a small possibility that gem_start_xmit() |
700 | * will miss it and cause the queue to be stopped forever. | ||
701 | */ | ||
702 | smp_mb(); | ||
703 | |||
704 | if (unlikely(netif_queue_stopped(dev) && | ||
705 | TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) { | ||
706 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
707 | |||
708 | __netif_tx_lock(txq, smp_processor_id()); | ||
709 | if (netif_queue_stopped(dev) && | ||
710 | TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) | ||
711 | netif_wake_queue(dev); | ||
712 | __netif_tx_unlock(txq); | ||
713 | } | ||
707 | } | 714 | } |
708 | 715 | ||
709 | static __inline__ void gem_post_rxds(struct gem *gp, int limit) | 716 | static __inline__ void gem_post_rxds(struct gem *gp, int limit) |
@@ -736,6 +743,21 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit) | |||
736 | } | 743 | } |
737 | } | 744 | } |
738 | 745 | ||
746 | #define ALIGNED_RX_SKB_ADDR(addr) \ | ||
747 | ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) | ||
748 | static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size, | ||
749 | gfp_t gfp_flags) | ||
750 | { | ||
751 | struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); | ||
752 | |||
753 | if (likely(skb)) { | ||
754 | unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data); | ||
755 | skb_reserve(skb, offset); | ||
756 | skb->dev = dev; | ||
757 | } | ||
758 | return skb; | ||
759 | } | ||
760 | |||
739 | static int gem_rx(struct gem *gp, int work_to_do) | 761 | static int gem_rx(struct gem *gp, int work_to_do) |
740 | { | 762 | { |
741 | struct net_device *dev = gp->dev; | 763 | struct net_device *dev = gp->dev; |
@@ -799,7 +821,7 @@ static int gem_rx(struct gem *gp, int work_to_do) | |||
799 | if (len > RX_COPY_THRESHOLD) { | 821 | if (len > RX_COPY_THRESHOLD) { |
800 | struct sk_buff *new_skb; | 822 | struct sk_buff *new_skb; |
801 | 823 | ||
802 | new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); | 824 | new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); |
803 | if (new_skb == NULL) { | 825 | if (new_skb == NULL) { |
804 | drops++; | 826 | drops++; |
805 | goto drop_it; | 827 | goto drop_it; |
@@ -808,7 +830,6 @@ static int gem_rx(struct gem *gp, int work_to_do) | |||
808 | RX_BUF_ALLOC_SIZE(gp), | 830 | RX_BUF_ALLOC_SIZE(gp), |
809 | PCI_DMA_FROMDEVICE); | 831 | PCI_DMA_FROMDEVICE); |
810 | gp->rx_skbs[entry] = new_skb; | 832 | gp->rx_skbs[entry] = new_skb; |
811 | new_skb->dev = gp->dev; | ||
812 | skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); | 833 | skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); |
813 | rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, | 834 | rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, |
814 | virt_to_page(new_skb->data), | 835 | virt_to_page(new_skb->data), |
@@ -820,7 +841,7 @@ static int gem_rx(struct gem *gp, int work_to_do) | |||
820 | /* Trim the original skb for the netif. */ | 841 | /* Trim the original skb for the netif. */ |
821 | skb_trim(skb, len); | 842 | skb_trim(skb, len); |
822 | } else { | 843 | } else { |
823 | struct sk_buff *copy_skb = dev_alloc_skb(len + 2); | 844 | struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2); |
824 | 845 | ||
825 | if (copy_skb == NULL) { | 846 | if (copy_skb == NULL) { |
826 | drops++; | 847 | drops++; |
@@ -842,7 +863,7 @@ static int gem_rx(struct gem *gp, int work_to_do) | |||
842 | skb->ip_summed = CHECKSUM_COMPLETE; | 863 | skb->ip_summed = CHECKSUM_COMPLETE; |
843 | skb->protocol = eth_type_trans(skb, gp->dev); | 864 | skb->protocol = eth_type_trans(skb, gp->dev); |
844 | 865 | ||
845 | netif_receive_skb(skb); | 866 | napi_gro_receive(&gp->napi, skb); |
846 | 867 | ||
847 | dev->stats.rx_packets++; | 868 | dev->stats.rx_packets++; |
848 | dev->stats.rx_bytes += len; | 869 | dev->stats.rx_bytes += len; |
@@ -865,28 +886,32 @@ static int gem_poll(struct napi_struct *napi, int budget) | |||
865 | { | 886 | { |
866 | struct gem *gp = container_of(napi, struct gem, napi); | 887 | struct gem *gp = container_of(napi, struct gem, napi); |
867 | struct net_device *dev = gp->dev; | 888 | struct net_device *dev = gp->dev; |
868 | unsigned long flags; | ||
869 | int work_done; | 889 | int work_done; |
870 | 890 | ||
871 | /* | ||
872 | * NAPI locking nightmare: See comment at head of driver | ||
873 | */ | ||
874 | spin_lock_irqsave(&gp->lock, flags); | ||
875 | |||
876 | work_done = 0; | 891 | work_done = 0; |
877 | do { | 892 | do { |
878 | /* Handle anomalies */ | 893 | /* Handle anomalies */ |
879 | if (gp->status & GREG_STAT_ABNORMAL) { | 894 | if (unlikely(gp->status & GREG_STAT_ABNORMAL)) { |
880 | if (gem_abnormal_irq(dev, gp, gp->status)) | 895 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); |
881 | break; | 896 | int reset; |
897 | |||
898 | /* We run the abnormal interrupt handling code with | ||
899 | * the Tx lock. It only resets the Rx portion of the | ||
900 | * chip, but we need to guard it against DMA being | ||
901 | * restarted by the link poll timer | ||
902 | */ | ||
903 | __netif_tx_lock(txq, smp_processor_id()); | ||
904 | reset = gem_abnormal_irq(dev, gp, gp->status); | ||
905 | __netif_tx_unlock(txq); | ||
906 | if (reset) { | ||
907 | gem_schedule_reset(gp); | ||
908 | napi_complete(napi); | ||
909 | return work_done; | ||
910 | } | ||
882 | } | 911 | } |
883 | 912 | ||
884 | /* Run TX completion thread */ | 913 | /* Run TX completion thread */ |
885 | spin_lock(&gp->tx_lock); | ||
886 | gem_tx(dev, gp, gp->status); | 914 | gem_tx(dev, gp, gp->status); |
887 | spin_unlock(&gp->tx_lock); | ||
888 | |||
889 | spin_unlock_irqrestore(&gp->lock, flags); | ||
890 | 915 | ||
891 | /* Run RX thread. We don't use any locking here, | 916 | /* Run RX thread. We don't use any locking here, |
892 | * code willing to do bad things - like cleaning the | 917 | * code willing to do bad things - like cleaning the |
@@ -898,16 +923,12 @@ static int gem_poll(struct napi_struct *napi, int budget) | |||
898 | if (work_done >= budget) | 923 | if (work_done >= budget) |
899 | return work_done; | 924 | return work_done; |
900 | 925 | ||
901 | spin_lock_irqsave(&gp->lock, flags); | ||
902 | |||
903 | gp->status = readl(gp->regs + GREG_STAT); | 926 | gp->status = readl(gp->regs + GREG_STAT); |
904 | } while (gp->status & GREG_STAT_NAPI); | 927 | } while (gp->status & GREG_STAT_NAPI); |
905 | 928 | ||
906 | __napi_complete(napi); | 929 | napi_complete(napi); |
907 | gem_enable_ints(gp); | 930 | gem_enable_ints(gp); |
908 | 931 | ||
909 | spin_unlock_irqrestore(&gp->lock, flags); | ||
910 | |||
911 | return work_done; | 932 | return work_done; |
912 | } | 933 | } |
913 | 934 | ||
@@ -915,32 +936,23 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) | |||
915 | { | 936 | { |
916 | struct net_device *dev = dev_id; | 937 | struct net_device *dev = dev_id; |
917 | struct gem *gp = netdev_priv(dev); | 938 | struct gem *gp = netdev_priv(dev); |
918 | unsigned long flags; | ||
919 | |||
920 | /* Swallow interrupts when shutting the chip down, though | ||
921 | * that shouldn't happen, we should have done free_irq() at | ||
922 | * this point... | ||
923 | */ | ||
924 | if (!gp->running) | ||
925 | return IRQ_HANDLED; | ||
926 | |||
927 | spin_lock_irqsave(&gp->lock, flags); | ||
928 | 939 | ||
929 | if (napi_schedule_prep(&gp->napi)) { | 940 | if (napi_schedule_prep(&gp->napi)) { |
930 | u32 gem_status = readl(gp->regs + GREG_STAT); | 941 | u32 gem_status = readl(gp->regs + GREG_STAT); |
931 | 942 | ||
932 | if (gem_status == 0) { | 943 | if (unlikely(gem_status == 0)) { |
933 | napi_enable(&gp->napi); | 944 | napi_enable(&gp->napi); |
934 | spin_unlock_irqrestore(&gp->lock, flags); | ||
935 | return IRQ_NONE; | 945 | return IRQ_NONE; |
936 | } | 946 | } |
947 | if (netif_msg_intr(gp)) | ||
948 | printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n", | ||
949 | gp->dev->name, gem_status); | ||
950 | |||
937 | gp->status = gem_status; | 951 | gp->status = gem_status; |
938 | gem_disable_ints(gp); | 952 | gem_disable_ints(gp); |
939 | __napi_schedule(&gp->napi); | 953 | __napi_schedule(&gp->napi); |
940 | } | 954 | } |
941 | 955 | ||
942 | spin_unlock_irqrestore(&gp->lock, flags); | ||
943 | |||
944 | /* If polling was disabled at the time we received that | 956 | /* If polling was disabled at the time we received that |
945 | * interrupt, we may return IRQ_HANDLED here while we | 957 | * interrupt, we may return IRQ_HANDLED here while we |
946 | * should return IRQ_NONE. No big deal... | 958 | * should return IRQ_NONE. No big deal... |
@@ -951,10 +963,11 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) | |||
951 | #ifdef CONFIG_NET_POLL_CONTROLLER | 963 | #ifdef CONFIG_NET_POLL_CONTROLLER |
952 | static void gem_poll_controller(struct net_device *dev) | 964 | static void gem_poll_controller(struct net_device *dev) |
953 | { | 965 | { |
954 | /* gem_interrupt is safe to reentrance so no need | 966 | struct gem *gp = netdev_priv(dev); |
955 | * to disable_irq here. | 967 | |
956 | */ | 968 | disable_irq(gp->pdev->irq); |
957 | gem_interrupt(dev->irq, dev); | 969 | gem_interrupt(gp->pdev->irq, dev); |
970 | enable_irq(gp->pdev->irq); | ||
958 | } | 971 | } |
959 | #endif | 972 | #endif |
960 | 973 | ||
@@ -963,10 +976,7 @@ static void gem_tx_timeout(struct net_device *dev) | |||
963 | struct gem *gp = netdev_priv(dev); | 976 | struct gem *gp = netdev_priv(dev); |
964 | 977 | ||
965 | netdev_err(dev, "transmit timed out, resetting\n"); | 978 | netdev_err(dev, "transmit timed out, resetting\n"); |
966 | if (!gp->running) { | 979 | |
967 | netdev_err(dev, "hrm.. hw not running !\n"); | ||
968 | return; | ||
969 | } | ||
970 | netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n", | 980 | netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n", |
971 | readl(gp->regs + TXDMA_CFG), | 981 | readl(gp->regs + TXDMA_CFG), |
972 | readl(gp->regs + MAC_TXSTAT), | 982 | readl(gp->regs + MAC_TXSTAT), |
@@ -976,14 +986,7 @@ static void gem_tx_timeout(struct net_device *dev) | |||
976 | readl(gp->regs + MAC_RXSTAT), | 986 | readl(gp->regs + MAC_RXSTAT), |
977 | readl(gp->regs + MAC_RXCFG)); | 987 | readl(gp->regs + MAC_RXCFG)); |
978 | 988 | ||
979 | spin_lock_irq(&gp->lock); | 989 | gem_schedule_reset(gp); |
980 | spin_lock(&gp->tx_lock); | ||
981 | |||
982 | gp->reset_task_pending = 1; | ||
983 | schedule_work(&gp->reset_task); | ||
984 | |||
985 | spin_unlock(&gp->tx_lock); | ||
986 | spin_unlock_irq(&gp->lock); | ||
987 | } | 990 | } |
988 | 991 | ||
989 | static __inline__ int gem_intme(int entry) | 992 | static __inline__ int gem_intme(int entry) |
@@ -1001,7 +1004,6 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, | |||
1001 | struct gem *gp = netdev_priv(dev); | 1004 | struct gem *gp = netdev_priv(dev); |
1002 | int entry; | 1005 | int entry; |
1003 | u64 ctrl; | 1006 | u64 ctrl; |
1004 | unsigned long flags; | ||
1005 | 1007 | ||
1006 | ctrl = 0; | 1008 | ctrl = 0; |
1007 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1009 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
@@ -1013,21 +1015,12 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, | |||
1013 | (csum_stuff_off << 21)); | 1015 | (csum_stuff_off << 21)); |
1014 | } | 1016 | } |
1015 | 1017 | ||
1016 | if (!spin_trylock_irqsave(&gp->tx_lock, flags)) { | 1018 | if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
1017 | /* Tell upper layer to requeue */ | 1019 | /* This is a hard error, log it. */ |
1018 | return NETDEV_TX_LOCKED; | 1020 | if (!netif_queue_stopped(dev)) { |
1019 | } | 1021 | netif_stop_queue(dev); |
1020 | /* We raced with gem_do_stop() */ | 1022 | netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
1021 | if (!gp->running) { | 1023 | } |
1022 | spin_unlock_irqrestore(&gp->tx_lock, flags); | ||
1023 | return NETDEV_TX_BUSY; | ||
1024 | } | ||
1025 | |||
1026 | /* This is a hard error, log it. */ | ||
1027 | if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { | ||
1028 | netif_stop_queue(dev); | ||
1029 | spin_unlock_irqrestore(&gp->tx_lock, flags); | ||
1030 | netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); | ||
1031 | return NETDEV_TX_BUSY; | 1024 | return NETDEV_TX_BUSY; |
1032 | } | 1025 | } |
1033 | 1026 | ||
@@ -1104,17 +1097,23 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, | |||
1104 | } | 1097 | } |
1105 | 1098 | ||
1106 | gp->tx_new = entry; | 1099 | gp->tx_new = entry; |
1107 | if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1)) | 1100 | if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) { |
1108 | netif_stop_queue(dev); | 1101 | netif_stop_queue(dev); |
1109 | 1102 | ||
1103 | /* netif_stop_queue() must be done before checking | ||
1104 | * checking tx index in TX_BUFFS_AVAIL() below, because | ||
1105 | * in gem_tx(), we update tx_old before checking for | ||
1106 | * netif_queue_stopped(). | ||
1107 | */ | ||
1108 | smp_mb(); | ||
1109 | if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) | ||
1110 | netif_wake_queue(dev); | ||
1111 | } | ||
1110 | if (netif_msg_tx_queued(gp)) | 1112 | if (netif_msg_tx_queued(gp)) |
1111 | printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", | 1113 | printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", |
1112 | dev->name, entry, skb->len); | 1114 | dev->name, entry, skb->len); |
1113 | mb(); | 1115 | mb(); |
1114 | writel(gp->tx_new, gp->regs + TXDMA_KICK); | 1116 | writel(gp->tx_new, gp->regs + TXDMA_KICK); |
1115 | spin_unlock_irqrestore(&gp->tx_lock, flags); | ||
1116 | |||
1117 | dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ | ||
1118 | 1117 | ||
1119 | return NETDEV_TX_OK; | 1118 | return NETDEV_TX_OK; |
1120 | } | 1119 | } |
@@ -1184,7 +1183,6 @@ static void gem_pcs_reinit_adv(struct gem *gp) | |||
1184 | 1183 | ||
1185 | #define STOP_TRIES 32 | 1184 | #define STOP_TRIES 32 |
1186 | 1185 | ||
1187 | /* Must be invoked under gp->lock and gp->tx_lock. */ | ||
1188 | static void gem_reset(struct gem *gp) | 1186 | static void gem_reset(struct gem *gp) |
1189 | { | 1187 | { |
1190 | int limit; | 1188 | int limit; |
@@ -1213,7 +1211,6 @@ static void gem_reset(struct gem *gp) | |||
1213 | gem_pcs_reinit_adv(gp); | 1211 | gem_pcs_reinit_adv(gp); |
1214 | } | 1212 | } |
1215 | 1213 | ||
1216 | /* Must be invoked under gp->lock and gp->tx_lock. */ | ||
1217 | static void gem_start_dma(struct gem *gp) | 1214 | static void gem_start_dma(struct gem *gp) |
1218 | { | 1215 | { |
1219 | u32 val; | 1216 | u32 val; |
@@ -1236,8 +1233,7 @@ static void gem_start_dma(struct gem *gp) | |||
1236 | writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); | 1233 | writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); |
1237 | } | 1234 | } |
1238 | 1235 | ||
1239 | /* Must be invoked under gp->lock and gp->tx_lock. DMA won't be | 1236 | /* DMA won't be actually stopped before about 4ms tho ... |
1240 | * actually stopped before about 4ms tho ... | ||
1241 | */ | 1237 | */ |
1242 | static void gem_stop_dma(struct gem *gp) | 1238 | static void gem_stop_dma(struct gem *gp) |
1243 | { | 1239 | { |
@@ -1259,7 +1255,6 @@ static void gem_stop_dma(struct gem *gp) | |||
1259 | } | 1255 | } |
1260 | 1256 | ||
1261 | 1257 | ||
1262 | /* Must be invoked under gp->lock and gp->tx_lock. */ | ||
1263 | // XXX dbl check what that function should do when called on PCS PHY | 1258 | // XXX dbl check what that function should do when called on PCS PHY |
1264 | static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) | 1259 | static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) |
1265 | { | 1260 | { |
@@ -1319,7 +1314,7 @@ start_aneg: | |||
1319 | /* If we are asleep, we don't try to actually setup the PHY, we | 1314 | /* If we are asleep, we don't try to actually setup the PHY, we |
1320 | * just store the settings | 1315 | * just store the settings |
1321 | */ | 1316 | */ |
1322 | if (gp->asleep) { | 1317 | if (!netif_device_present(gp->dev)) { |
1323 | gp->phy_mii.autoneg = gp->want_autoneg = autoneg; | 1318 | gp->phy_mii.autoneg = gp->want_autoneg = autoneg; |
1324 | gp->phy_mii.speed = speed; | 1319 | gp->phy_mii.speed = speed; |
1325 | gp->phy_mii.duplex = duplex; | 1320 | gp->phy_mii.duplex = duplex; |
@@ -1345,13 +1340,12 @@ non_mii: | |||
1345 | 1340 | ||
1346 | /* A link-up condition has occurred, initialize and enable the | 1341 | /* A link-up condition has occurred, initialize and enable the |
1347 | * rest of the chip. | 1342 | * rest of the chip. |
1348 | * | ||
1349 | * Must be invoked under gp->lock and gp->tx_lock. | ||
1350 | */ | 1343 | */ |
1351 | static int gem_set_link_modes(struct gem *gp) | 1344 | static int gem_set_link_modes(struct gem *gp) |
1352 | { | 1345 | { |
1353 | u32 val; | 1346 | struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0); |
1354 | int full_duplex, speed, pause; | 1347 | int full_duplex, speed, pause; |
1348 | u32 val; | ||
1355 | 1349 | ||
1356 | full_duplex = 0; | 1350 | full_duplex = 0; |
1357 | speed = SPEED_10; | 1351 | speed = SPEED_10; |
@@ -1375,8 +1369,11 @@ static int gem_set_link_modes(struct gem *gp) | |||
1375 | netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n", | 1369 | netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n", |
1376 | speed, (full_duplex ? "full" : "half")); | 1370 | speed, (full_duplex ? "full" : "half")); |
1377 | 1371 | ||
1378 | if (!gp->running) | 1372 | |
1379 | return 0; | 1373 | /* We take the tx queue lock to avoid collisions between |
1374 | * this code, the tx path and the NAPI-driven error path | ||
1375 | */ | ||
1376 | __netif_tx_lock(txq, smp_processor_id()); | ||
1380 | 1377 | ||
1381 | val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); | 1378 | val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); |
1382 | if (full_duplex) { | 1379 | if (full_duplex) { |
@@ -1425,18 +1422,6 @@ static int gem_set_link_modes(struct gem *gp) | |||
1425 | pause = 1; | 1422 | pause = 1; |
1426 | } | 1423 | } |
1427 | 1424 | ||
1428 | if (netif_msg_link(gp)) { | ||
1429 | if (pause) { | ||
1430 | netdev_info(gp->dev, | ||
1431 | "Pause is enabled (rxfifo: %d off: %d on: %d)\n", | ||
1432 | gp->rx_fifo_sz, | ||
1433 | gp->rx_pause_off, | ||
1434 | gp->rx_pause_on); | ||
1435 | } else { | ||
1436 | netdev_info(gp->dev, "Pause is disabled\n"); | ||
1437 | } | ||
1438 | } | ||
1439 | |||
1440 | if (!full_duplex) | 1425 | if (!full_duplex) |
1441 | writel(512, gp->regs + MAC_STIME); | 1426 | writel(512, gp->regs + MAC_STIME); |
1442 | else | 1427 | else |
@@ -1450,10 +1435,23 @@ static int gem_set_link_modes(struct gem *gp) | |||
1450 | 1435 | ||
1451 | gem_start_dma(gp); | 1436 | gem_start_dma(gp); |
1452 | 1437 | ||
1438 | __netif_tx_unlock(txq); | ||
1439 | |||
1440 | if (netif_msg_link(gp)) { | ||
1441 | if (pause) { | ||
1442 | netdev_info(gp->dev, | ||
1443 | "Pause is enabled (rxfifo: %d off: %d on: %d)\n", | ||
1444 | gp->rx_fifo_sz, | ||
1445 | gp->rx_pause_off, | ||
1446 | gp->rx_pause_on); | ||
1447 | } else { | ||
1448 | netdev_info(gp->dev, "Pause is disabled\n"); | ||
1449 | } | ||
1450 | } | ||
1451 | |||
1453 | return 0; | 1452 | return 0; |
1454 | } | 1453 | } |
1455 | 1454 | ||
1456 | /* Must be invoked under gp->lock and gp->tx_lock. */ | ||
1457 | static int gem_mdio_link_not_up(struct gem *gp) | 1455 | static int gem_mdio_link_not_up(struct gem *gp) |
1458 | { | 1456 | { |
1459 | switch (gp->lstate) { | 1457 | switch (gp->lstate) { |
@@ -1501,20 +1499,12 @@ static int gem_mdio_link_not_up(struct gem *gp) | |||
1501 | static void gem_link_timer(unsigned long data) | 1499 | static void gem_link_timer(unsigned long data) |
1502 | { | 1500 | { |
1503 | struct gem *gp = (struct gem *) data; | 1501 | struct gem *gp = (struct gem *) data; |
1502 | struct net_device *dev = gp->dev; | ||
1504 | int restart_aneg = 0; | 1503 | int restart_aneg = 0; |
1505 | 1504 | ||
1506 | if (gp->asleep) | 1505 | /* There's no point doing anything if we're going to be reset */ |
1507 | return; | ||
1508 | |||
1509 | spin_lock_irq(&gp->lock); | ||
1510 | spin_lock(&gp->tx_lock); | ||
1511 | gem_get_cell(gp); | ||
1512 | |||
1513 | /* If the reset task is still pending, we just | ||
1514 | * reschedule the link timer | ||
1515 | */ | ||
1516 | if (gp->reset_task_pending) | 1506 | if (gp->reset_task_pending) |
1517 | goto restart; | 1507 | return; |
1518 | 1508 | ||
1519 | if (gp->phy_type == phy_serialink || | 1509 | if (gp->phy_type == phy_serialink || |
1520 | gp->phy_type == phy_serdes) { | 1510 | gp->phy_type == phy_serdes) { |
@@ -1528,7 +1518,7 @@ static void gem_link_timer(unsigned long data) | |||
1528 | goto restart; | 1518 | goto restart; |
1529 | 1519 | ||
1530 | gp->lstate = link_up; | 1520 | gp->lstate = link_up; |
1531 | netif_carrier_on(gp->dev); | 1521 | netif_carrier_on(dev); |
1532 | (void)gem_set_link_modes(gp); | 1522 | (void)gem_set_link_modes(gp); |
1533 | } | 1523 | } |
1534 | goto restart; | 1524 | goto restart; |
@@ -1544,12 +1534,12 @@ static void gem_link_timer(unsigned long data) | |||
1544 | gp->last_forced_speed = gp->phy_mii.speed; | 1534 | gp->last_forced_speed = gp->phy_mii.speed; |
1545 | gp->timer_ticks = 5; | 1535 | gp->timer_ticks = 5; |
1546 | if (netif_msg_link(gp)) | 1536 | if (netif_msg_link(gp)) |
1547 | netdev_info(gp->dev, | 1537 | netdev_info(dev, |
1548 | "Got link after fallback, retrying autoneg once...\n"); | 1538 | "Got link after fallback, retrying autoneg once...\n"); |
1549 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); | 1539 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); |
1550 | } else if (gp->lstate != link_up) { | 1540 | } else if (gp->lstate != link_up) { |
1551 | gp->lstate = link_up; | 1541 | gp->lstate = link_up; |
1552 | netif_carrier_on(gp->dev); | 1542 | netif_carrier_on(dev); |
1553 | if (gem_set_link_modes(gp)) | 1543 | if (gem_set_link_modes(gp)) |
1554 | restart_aneg = 1; | 1544 | restart_aneg = 1; |
1555 | } | 1545 | } |
@@ -1559,11 +1549,11 @@ static void gem_link_timer(unsigned long data) | |||
1559 | */ | 1549 | */ |
1560 | if (gp->lstate == link_up) { | 1550 | if (gp->lstate == link_up) { |
1561 | gp->lstate = link_down; | 1551 | gp->lstate = link_down; |
1562 | netif_info(gp, link, gp->dev, "Link down\n"); | 1552 | netif_info(gp, link, dev, "Link down\n"); |
1563 | netif_carrier_off(gp->dev); | 1553 | netif_carrier_off(dev); |
1564 | gp->reset_task_pending = 1; | 1554 | gem_schedule_reset(gp); |
1565 | schedule_work(&gp->reset_task); | 1555 | /* The reset task will restart the timer */ |
1566 | restart_aneg = 1; | 1556 | return; |
1567 | } else if (++gp->timer_ticks > 10) { | 1557 | } else if (++gp->timer_ticks > 10) { |
1568 | if (found_mii_phy(gp)) | 1558 | if (found_mii_phy(gp)) |
1569 | restart_aneg = gem_mdio_link_not_up(gp); | 1559 | restart_aneg = gem_mdio_link_not_up(gp); |
@@ -1573,17 +1563,12 @@ static void gem_link_timer(unsigned long data) | |||
1573 | } | 1563 | } |
1574 | if (restart_aneg) { | 1564 | if (restart_aneg) { |
1575 | gem_begin_auto_negotiation(gp, NULL); | 1565 | gem_begin_auto_negotiation(gp, NULL); |
1576 | goto out_unlock; | 1566 | return; |
1577 | } | 1567 | } |
1578 | restart: | 1568 | restart: |
1579 | mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); | 1569 | mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); |
1580 | out_unlock: | ||
1581 | gem_put_cell(gp); | ||
1582 | spin_unlock(&gp->tx_lock); | ||
1583 | spin_unlock_irq(&gp->lock); | ||
1584 | } | 1570 | } |
1585 | 1571 | ||
1586 | /* Must be invoked under gp->lock and gp->tx_lock. */ | ||
1587 | static void gem_clean_rings(struct gem *gp) | 1572 | static void gem_clean_rings(struct gem *gp) |
1588 | { | 1573 | { |
1589 | struct gem_init_block *gb = gp->init_block; | 1574 | struct gem_init_block *gb = gp->init_block; |
@@ -1634,7 +1619,6 @@ static void gem_clean_rings(struct gem *gp) | |||
1634 | } | 1619 | } |
1635 | } | 1620 | } |
1636 | 1621 | ||
1637 | /* Must be invoked under gp->lock and gp->tx_lock. */ | ||
1638 | static void gem_init_rings(struct gem *gp) | 1622 | static void gem_init_rings(struct gem *gp) |
1639 | { | 1623 | { |
1640 | struct gem_init_block *gb = gp->init_block; | 1624 | struct gem_init_block *gb = gp->init_block; |
@@ -1653,7 +1637,7 @@ static void gem_init_rings(struct gem *gp) | |||
1653 | struct sk_buff *skb; | 1637 | struct sk_buff *skb; |
1654 | struct gem_rxd *rxd = &gb->rxd[i]; | 1638 | struct gem_rxd *rxd = &gb->rxd[i]; |
1655 | 1639 | ||
1656 | skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); | 1640 | skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL); |
1657 | if (!skb) { | 1641 | if (!skb) { |
1658 | rxd->buffer = 0; | 1642 | rxd->buffer = 0; |
1659 | rxd->status_word = 0; | 1643 | rxd->status_word = 0; |
@@ -1661,7 +1645,6 @@ static void gem_init_rings(struct gem *gp) | |||
1661 | } | 1645 | } |
1662 | 1646 | ||
1663 | gp->rx_skbs[i] = skb; | 1647 | gp->rx_skbs[i] = skb; |
1664 | skb->dev = dev; | ||
1665 | skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); | 1648 | skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); |
1666 | dma_addr = pci_map_page(gp->pdev, | 1649 | dma_addr = pci_map_page(gp->pdev, |
1667 | virt_to_page(skb->data), | 1650 | virt_to_page(skb->data), |
@@ -1737,7 +1720,7 @@ static void gem_init_phy(struct gem *gp) | |||
1737 | 1720 | ||
1738 | if (gp->phy_type == phy_mii_mdio0 || | 1721 | if (gp->phy_type == phy_mii_mdio0 || |
1739 | gp->phy_type == phy_mii_mdio1) { | 1722 | gp->phy_type == phy_mii_mdio1) { |
1740 | // XXX check for errors | 1723 | /* Reset and detect MII PHY */ |
1741 | mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); | 1724 | mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); |
1742 | 1725 | ||
1743 | /* Init PHY */ | 1726 | /* Init PHY */ |
@@ -1753,13 +1736,15 @@ static void gem_init_phy(struct gem *gp) | |||
1753 | gp->lstate = link_down; | 1736 | gp->lstate = link_down; |
1754 | netif_carrier_off(gp->dev); | 1737 | netif_carrier_off(gp->dev); |
1755 | 1738 | ||
1756 | /* Can I advertise gigabit here ? I'd need BCM PHY docs... */ | 1739 | /* Print things out */ |
1757 | spin_lock_irq(&gp->lock); | 1740 | if (gp->phy_type == phy_mii_mdio0 || |
1741 | gp->phy_type == phy_mii_mdio1) | ||
1742 | netdev_info(gp->dev, "Found %s PHY\n", | ||
1743 | gp->phy_mii.def ? gp->phy_mii.def->name : "no"); | ||
1744 | |||
1758 | gem_begin_auto_negotiation(gp, NULL); | 1745 | gem_begin_auto_negotiation(gp, NULL); |
1759 | spin_unlock_irq(&gp->lock); | ||
1760 | } | 1746 | } |
1761 | 1747 | ||
1762 | /* Must be invoked under gp->lock and gp->tx_lock. */ | ||
1763 | static void gem_init_dma(struct gem *gp) | 1748 | static void gem_init_dma(struct gem *gp) |
1764 | { | 1749 | { |
1765 | u64 desc_dma = (u64) gp->gblock_dvma; | 1750 | u64 desc_dma = (u64) gp->gblock_dvma; |
@@ -1797,7 +1782,6 @@ static void gem_init_dma(struct gem *gp) | |||
1797 | gp->regs + RXDMA_BLANK); | 1782 | gp->regs + RXDMA_BLANK); |
1798 | } | 1783 | } |
1799 | 1784 | ||
1800 | /* Must be invoked under gp->lock and gp->tx_lock. */ | ||
1801 | static u32 gem_setup_multicast(struct gem *gp) | 1785 | static u32 gem_setup_multicast(struct gem *gp) |
1802 | { | 1786 | { |
1803 | u32 rxcfg = 0; | 1787 | u32 rxcfg = 0; |
@@ -1835,7 +1819,6 @@ static u32 gem_setup_multicast(struct gem *gp) | |||
1835 | return rxcfg; | 1819 | return rxcfg; |
1836 | } | 1820 | } |
1837 | 1821 | ||
1838 | /* Must be invoked under gp->lock and gp->tx_lock. */ | ||
1839 | static void gem_init_mac(struct gem *gp) | 1822 | static void gem_init_mac(struct gem *gp) |
1840 | { | 1823 | { |
1841 | unsigned char *e = &gp->dev->dev_addr[0]; | 1824 | unsigned char *e = &gp->dev->dev_addr[0]; |
@@ -1918,7 +1901,6 @@ static void gem_init_mac(struct gem *gp) | |||
1918 | writel(0, gp->regs + WOL_WAKECSR); | 1901 | writel(0, gp->regs + WOL_WAKECSR); |
1919 | } | 1902 | } |
1920 | 1903 | ||
1921 | /* Must be invoked under gp->lock and gp->tx_lock. */ | ||
1922 | static void gem_init_pause_thresholds(struct gem *gp) | 1904 | static void gem_init_pause_thresholds(struct gem *gp) |
1923 | { | 1905 | { |
1924 | u32 cfg; | 1906 | u32 cfg; |
@@ -2079,7 +2061,6 @@ static int gem_check_invariants(struct gem *gp) | |||
2079 | return 0; | 2061 | return 0; |
2080 | } | 2062 | } |
2081 | 2063 | ||
2082 | /* Must be invoked under gp->lock and gp->tx_lock. */ | ||
2083 | static void gem_reinit_chip(struct gem *gp) | 2064 | static void gem_reinit_chip(struct gem *gp) |
2084 | { | 2065 | { |
2085 | /* Reset the chip */ | 2066 | /* Reset the chip */ |
@@ -2100,11 +2081,9 @@ static void gem_reinit_chip(struct gem *gp) | |||
2100 | } | 2081 | } |
2101 | 2082 | ||
2102 | 2083 | ||
2103 | /* Must be invoked with no lock held. */ | ||
2104 | static void gem_stop_phy(struct gem *gp, int wol) | 2084 | static void gem_stop_phy(struct gem *gp, int wol) |
2105 | { | 2085 | { |
2106 | u32 mifcfg; | 2086 | u32 mifcfg; |
2107 | unsigned long flags; | ||
2108 | 2087 | ||
2109 | /* Let the chip settle down a bit, it seems that helps | 2088 | /* Let the chip settle down a bit, it seems that helps |
2110 | * for sleep mode on some models | 2089 | * for sleep mode on some models |
@@ -2150,15 +2129,9 @@ static void gem_stop_phy(struct gem *gp, int wol) | |||
2150 | writel(0, gp->regs + RXDMA_CFG); | 2129 | writel(0, gp->regs + RXDMA_CFG); |
2151 | 2130 | ||
2152 | if (!wol) { | 2131 | if (!wol) { |
2153 | spin_lock_irqsave(&gp->lock, flags); | ||
2154 | spin_lock(&gp->tx_lock); | ||
2155 | gem_reset(gp); | 2132 | gem_reset(gp); |
2156 | writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); | 2133 | writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); |
2157 | writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); | 2134 | writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); |
2158 | spin_unlock(&gp->tx_lock); | ||
2159 | spin_unlock_irqrestore(&gp->lock, flags); | ||
2160 | |||
2161 | /* No need to take the lock here */ | ||
2162 | 2135 | ||
2163 | if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) | 2136 | if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) |
2164 | gp->phy_mii.def->ops->suspend(&gp->phy_mii); | 2137 | gp->phy_mii.def->ops->suspend(&gp->phy_mii); |
@@ -2175,54 +2148,55 @@ static void gem_stop_phy(struct gem *gp, int wol) | |||
2175 | } | 2148 | } |
2176 | } | 2149 | } |
2177 | 2150 | ||
2178 | |||
2179 | static int gem_do_start(struct net_device *dev) | 2151 | static int gem_do_start(struct net_device *dev) |
2180 | { | 2152 | { |
2181 | struct gem *gp = netdev_priv(dev); | 2153 | struct gem *gp = netdev_priv(dev); |
2182 | unsigned long flags; | 2154 | int rc; |
2183 | |||
2184 | spin_lock_irqsave(&gp->lock, flags); | ||
2185 | spin_lock(&gp->tx_lock); | ||
2186 | 2155 | ||
2187 | /* Enable the cell */ | 2156 | /* Enable the cell */ |
2188 | gem_get_cell(gp); | 2157 | gem_get_cell(gp); |
2189 | 2158 | ||
2190 | /* Init & setup chip hardware */ | 2159 | /* Make sure PCI access and bus master are enabled */ |
2191 | gem_reinit_chip(gp); | 2160 | rc = pci_enable_device(gp->pdev); |
2192 | 2161 | if (rc) { | |
2193 | gp->running = 1; | 2162 | netdev_err(dev, "Failed to enable chip on PCI bus !\n"); |
2194 | |||
2195 | napi_enable(&gp->napi); | ||
2196 | 2163 | ||
2197 | if (gp->lstate == link_up) { | 2164 | /* Put cell and forget it for now, it will be considered as |
2198 | netif_carrier_on(gp->dev); | 2165 | * still asleep, a new sleep cycle may bring it back |
2199 | gem_set_link_modes(gp); | 2166 | */ |
2167 | gem_put_cell(gp); | ||
2168 | return -ENXIO; | ||
2200 | } | 2169 | } |
2170 | pci_set_master(gp->pdev); | ||
2201 | 2171 | ||
2202 | netif_wake_queue(gp->dev); | 2172 | /* Init & setup chip hardware */ |
2203 | 2173 | gem_reinit_chip(gp); | |
2204 | spin_unlock(&gp->tx_lock); | ||
2205 | spin_unlock_irqrestore(&gp->lock, flags); | ||
2206 | 2174 | ||
2207 | if (request_irq(gp->pdev->irq, gem_interrupt, | 2175 | /* An interrupt might come in handy */ |
2208 | IRQF_SHARED, dev->name, (void *)dev)) { | 2176 | rc = request_irq(gp->pdev->irq, gem_interrupt, |
2177 | IRQF_SHARED, dev->name, (void *)dev); | ||
2178 | if (rc) { | ||
2209 | netdev_err(dev, "failed to request irq !\n"); | 2179 | netdev_err(dev, "failed to request irq !\n"); |
2210 | 2180 | ||
2211 | spin_lock_irqsave(&gp->lock, flags); | ||
2212 | spin_lock(&gp->tx_lock); | ||
2213 | |||
2214 | napi_disable(&gp->napi); | ||
2215 | |||
2216 | gp->running = 0; | ||
2217 | gem_reset(gp); | 2181 | gem_reset(gp); |
2218 | gem_clean_rings(gp); | 2182 | gem_clean_rings(gp); |
2219 | gem_put_cell(gp); | 2183 | gem_put_cell(gp); |
2184 | return rc; | ||
2185 | } | ||
2220 | 2186 | ||
2221 | spin_unlock(&gp->tx_lock); | 2187 | /* Mark us as attached again if we come from resume(), this has |
2222 | spin_unlock_irqrestore(&gp->lock, flags); | 2188 | * no effect if we weren't detatched and needs to be done now. |
2189 | */ | ||
2190 | netif_device_attach(dev); | ||
2223 | 2191 | ||
2224 | return -EAGAIN; | 2192 | /* Restart NAPI & queues */ |
2225 | } | 2193 | gem_netif_start(gp); |
2194 | |||
2195 | /* Detect & init PHY, start autoneg etc... this will | ||
2196 | * eventually result in starting DMA operations when | ||
2197 | * the link is up | ||
2198 | */ | ||
2199 | gem_init_phy(gp); | ||
2226 | 2200 | ||
2227 | return 0; | 2201 | return 0; |
2228 | } | 2202 | } |
@@ -2230,22 +2204,30 @@ static int gem_do_start(struct net_device *dev) | |||
2230 | static void gem_do_stop(struct net_device *dev, int wol) | 2204 | static void gem_do_stop(struct net_device *dev, int wol) |
2231 | { | 2205 | { |
2232 | struct gem *gp = netdev_priv(dev); | 2206 | struct gem *gp = netdev_priv(dev); |
2233 | unsigned long flags; | ||
2234 | 2207 | ||
2235 | spin_lock_irqsave(&gp->lock, flags); | 2208 | /* Stop NAPI and stop tx queue */ |
2236 | spin_lock(&gp->tx_lock); | 2209 | gem_netif_stop(gp); |
2237 | 2210 | ||
2238 | gp->running = 0; | 2211 | /* Make sure ints are disabled. We don't care about |
2239 | 2212 | * synchronizing as NAPI is disabled, thus a stray | |
2240 | /* Stop netif queue */ | 2213 | * interrupt will do nothing bad (our irq handler |
2241 | netif_stop_queue(dev); | 2214 | * just schedules NAPI) |
2242 | 2215 | */ | |
2243 | /* Make sure ints are disabled */ | ||
2244 | gem_disable_ints(gp); | 2216 | gem_disable_ints(gp); |
2245 | 2217 | ||
2246 | /* We can drop the lock now */ | 2218 | /* Stop the link timer */ |
2247 | spin_unlock(&gp->tx_lock); | 2219 | del_timer_sync(&gp->link_timer); |
2248 | spin_unlock_irqrestore(&gp->lock, flags); | 2220 | |
2221 | /* We cannot cancel the reset task while holding the | ||
2222 | * rtnl lock, we'd get an A->B / B->A deadlock stituation | ||
2223 | * if we did. This is not an issue however as the reset | ||
2224 | * task is synchronized vs. us (rtnl_lock) and will do | ||
2225 | * nothing if the device is down or suspended. We do | ||
2226 | * still clear reset_task_pending to avoid a spurrious | ||
2227 | * reset later on in case we do resume before it gets | ||
2228 | * scheduled. | ||
2229 | */ | ||
2230 | gp->reset_task_pending = 0; | ||
2249 | 2231 | ||
2250 | /* If we are going to sleep with WOL */ | 2232 | /* If we are going to sleep with WOL */ |
2251 | gem_stop_dma(gp); | 2233 | gem_stop_dma(gp); |
@@ -2260,79 +2242,79 @@ static void gem_do_stop(struct net_device *dev, int wol) | |||
2260 | /* No irq needed anymore */ | 2242 | /* No irq needed anymore */ |
2261 | free_irq(gp->pdev->irq, (void *) dev); | 2243 | free_irq(gp->pdev->irq, (void *) dev); |
2262 | 2244 | ||
2245 | /* Shut the PHY down eventually and setup WOL */ | ||
2246 | gem_stop_phy(gp, wol); | ||
2247 | |||
2248 | /* Make sure bus master is disabled */ | ||
2249 | pci_disable_device(gp->pdev); | ||
2250 | |||
2263 | /* Cell not needed neither if no WOL */ | 2251 | /* Cell not needed neither if no WOL */ |
2264 | if (!wol) { | 2252 | if (!wol) |
2265 | spin_lock_irqsave(&gp->lock, flags); | ||
2266 | gem_put_cell(gp); | 2253 | gem_put_cell(gp); |
2267 | spin_unlock_irqrestore(&gp->lock, flags); | ||
2268 | } | ||
2269 | } | 2254 | } |
2270 | 2255 | ||
2271 | static void gem_reset_task(struct work_struct *work) | 2256 | static void gem_reset_task(struct work_struct *work) |
2272 | { | 2257 | { |
2273 | struct gem *gp = container_of(work, struct gem, reset_task); | 2258 | struct gem *gp = container_of(work, struct gem, reset_task); |
2274 | 2259 | ||
2275 | mutex_lock(&gp->pm_mutex); | 2260 | /* Lock out the network stack (essentially shield ourselves |
2261 | * against a racing open, close, control call, or suspend | ||
2262 | */ | ||
2263 | rtnl_lock(); | ||
2276 | 2264 | ||
2277 | if (gp->opened) | 2265 | /* Skip the reset task if suspended or closed, or if it's |
2278 | napi_disable(&gp->napi); | 2266 | * been cancelled by gem_do_stop (see comment there) |
2267 | */ | ||
2268 | if (!netif_device_present(gp->dev) || | ||
2269 | !netif_running(gp->dev) || | ||
2270 | !gp->reset_task_pending) { | ||
2271 | rtnl_unlock(); | ||
2272 | return; | ||
2273 | } | ||
2279 | 2274 | ||
2280 | spin_lock_irq(&gp->lock); | 2275 | /* Stop the link timer */ |
2281 | spin_lock(&gp->tx_lock); | 2276 | del_timer_sync(&gp->link_timer); |
2282 | 2277 | ||
2283 | if (gp->running) { | 2278 | /* Stop NAPI and tx */ |
2284 | netif_stop_queue(gp->dev); | 2279 | gem_netif_stop(gp); |
2285 | 2280 | ||
2286 | /* Reset the chip & rings */ | 2281 | /* Reset the chip & rings */ |
2287 | gem_reinit_chip(gp); | 2282 | gem_reinit_chip(gp); |
2288 | if (gp->lstate == link_up) | 2283 | if (gp->lstate == link_up) |
2289 | gem_set_link_modes(gp); | 2284 | gem_set_link_modes(gp); |
2290 | netif_wake_queue(gp->dev); | ||
2291 | } | ||
2292 | 2285 | ||
2293 | gp->reset_task_pending = 0; | 2286 | /* Restart NAPI and Tx */ |
2287 | gem_netif_start(gp); | ||
2294 | 2288 | ||
2295 | spin_unlock(&gp->tx_lock); | 2289 | /* We are back ! */ |
2296 | spin_unlock_irq(&gp->lock); | 2290 | gp->reset_task_pending = 0; |
2297 | 2291 | ||
2298 | if (gp->opened) | 2292 | /* If the link is not up, restart autoneg, else restart the |
2299 | napi_enable(&gp->napi); | 2293 | * polling timer |
2294 | */ | ||
2295 | if (gp->lstate != link_up) | ||
2296 | gem_begin_auto_negotiation(gp, NULL); | ||
2297 | else | ||
2298 | mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); | ||
2300 | 2299 | ||
2301 | mutex_unlock(&gp->pm_mutex); | 2300 | rtnl_unlock(); |
2302 | } | 2301 | } |
2303 | 2302 | ||
2304 | |||
2305 | static int gem_open(struct net_device *dev) | 2303 | static int gem_open(struct net_device *dev) |
2306 | { | 2304 | { |
2307 | struct gem *gp = netdev_priv(dev); | 2305 | /* We allow open while suspended, we just do nothing, |
2308 | int rc = 0; | 2306 | * the chip will be initialized in resume() |
2309 | 2307 | */ | |
2310 | mutex_lock(&gp->pm_mutex); | 2308 | if (netif_device_present(dev)) |
2311 | 2309 | return gem_do_start(dev); | |
2312 | /* We need the cell enabled */ | 2310 | return 0; |
2313 | if (!gp->asleep) | ||
2314 | rc = gem_do_start(dev); | ||
2315 | gp->opened = (rc == 0); | ||
2316 | |||
2317 | mutex_unlock(&gp->pm_mutex); | ||
2318 | |||
2319 | return rc; | ||
2320 | } | 2311 | } |
2321 | 2312 | ||
2322 | static int gem_close(struct net_device *dev) | 2313 | static int gem_close(struct net_device *dev) |
2323 | { | 2314 | { |
2324 | struct gem *gp = netdev_priv(dev); | 2315 | if (netif_device_present(dev)) |
2325 | |||
2326 | mutex_lock(&gp->pm_mutex); | ||
2327 | |||
2328 | napi_disable(&gp->napi); | ||
2329 | |||
2330 | gp->opened = 0; | ||
2331 | if (!gp->asleep) | ||
2332 | gem_do_stop(dev, 0); | 2316 | gem_do_stop(dev, 0); |
2333 | 2317 | ||
2334 | mutex_unlock(&gp->pm_mutex); | ||
2335 | |||
2336 | return 0; | 2318 | return 0; |
2337 | } | 2319 | } |
2338 | 2320 | ||
@@ -2341,59 +2323,35 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2341 | { | 2323 | { |
2342 | struct net_device *dev = pci_get_drvdata(pdev); | 2324 | struct net_device *dev = pci_get_drvdata(pdev); |
2343 | struct gem *gp = netdev_priv(dev); | 2325 | struct gem *gp = netdev_priv(dev); |
2344 | unsigned long flags; | ||
2345 | 2326 | ||
2346 | mutex_lock(&gp->pm_mutex); | 2327 | /* Lock the network stack first to avoid racing with open/close, |
2347 | 2328 | * reset task and setting calls | |
2348 | netdev_info(dev, "suspending, WakeOnLan %s\n", | 2329 | */ |
2349 | (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); | 2330 | rtnl_lock(); |
2350 | |||
2351 | /* Keep the cell enabled during the entire operation */ | ||
2352 | spin_lock_irqsave(&gp->lock, flags); | ||
2353 | spin_lock(&gp->tx_lock); | ||
2354 | gem_get_cell(gp); | ||
2355 | spin_unlock(&gp->tx_lock); | ||
2356 | spin_unlock_irqrestore(&gp->lock, flags); | ||
2357 | |||
2358 | /* If the driver is opened, we stop the MAC */ | ||
2359 | if (gp->opened) { | ||
2360 | napi_disable(&gp->napi); | ||
2361 | 2331 | ||
2362 | /* Stop traffic, mark us closed */ | 2332 | /* Not running, mark ourselves non-present, no need for |
2333 | * a lock here | ||
2334 | */ | ||
2335 | if (!netif_running(dev)) { | ||
2363 | netif_device_detach(dev); | 2336 | netif_device_detach(dev); |
2337 | rtnl_unlock(); | ||
2338 | return 0; | ||
2339 | } | ||
2340 | netdev_info(dev, "suspending, WakeOnLan %s\n", | ||
2341 | (gp->wake_on_lan && netif_running(dev)) ? | ||
2342 | "enabled" : "disabled"); | ||
2364 | 2343 | ||
2365 | /* Switch off MAC, remember WOL setting */ | 2344 | /* Tell the network stack we're gone. gem_do_stop() below will |
2366 | gp->asleep_wol = gp->wake_on_lan; | 2345 | * synchronize with TX, stop NAPI etc... |
2367 | gem_do_stop(dev, gp->asleep_wol); | ||
2368 | } else | ||
2369 | gp->asleep_wol = 0; | ||
2370 | |||
2371 | /* Mark us asleep */ | ||
2372 | gp->asleep = 1; | ||
2373 | wmb(); | ||
2374 | |||
2375 | /* Stop the link timer */ | ||
2376 | del_timer_sync(&gp->link_timer); | ||
2377 | |||
2378 | /* Now we release the mutex to not block the reset task who | ||
2379 | * can take it too. We are marked asleep, so there will be no | ||
2380 | * conflict here | ||
2381 | */ | 2346 | */ |
2382 | mutex_unlock(&gp->pm_mutex); | 2347 | netif_device_detach(dev); |
2383 | |||
2384 | /* Wait for the pending reset task to complete */ | ||
2385 | flush_work_sync(&gp->reset_task); | ||
2386 | 2348 | ||
2387 | /* Shut the PHY down eventually and setup WOL */ | 2349 | /* Switch off chip, remember WOL setting */ |
2388 | gem_stop_phy(gp, gp->asleep_wol); | 2350 | gp->asleep_wol = gp->wake_on_lan; |
2389 | 2351 | gem_do_stop(dev, gp->asleep_wol); | |
2390 | /* Make sure bus master is disabled */ | ||
2391 | pci_disable_device(gp->pdev); | ||
2392 | 2352 | ||
2393 | /* Release the cell, no need to take a lock at this point since | 2353 | /* Unlock the network stack */ |
2394 | * nothing else can happen now | 2354 | rtnl_unlock(); |
2395 | */ | ||
2396 | gem_put_cell(gp); | ||
2397 | 2355 | ||
2398 | return 0; | 2356 | return 0; |
2399 | } | 2357 | } |
@@ -2402,53 +2360,23 @@ static int gem_resume(struct pci_dev *pdev) | |||
2402 | { | 2360 | { |
2403 | struct net_device *dev = pci_get_drvdata(pdev); | 2361 | struct net_device *dev = pci_get_drvdata(pdev); |
2404 | struct gem *gp = netdev_priv(dev); | 2362 | struct gem *gp = netdev_priv(dev); |
2405 | unsigned long flags; | ||
2406 | 2363 | ||
2407 | netdev_info(dev, "resuming\n"); | 2364 | /* See locking comment in gem_suspend */ |
2365 | rtnl_lock(); | ||
2408 | 2366 | ||
2409 | mutex_lock(&gp->pm_mutex); | 2367 | /* Not running, mark ourselves present, no need for |
2410 | 2368 | * a lock here | |
2411 | /* Keep the cell enabled during the entire operation, no need to | ||
2412 | * take a lock here tho since nothing else can happen while we are | ||
2413 | * marked asleep | ||
2414 | */ | 2369 | */ |
2415 | gem_get_cell(gp); | 2370 | if (!netif_running(dev)) { |
2416 | 2371 | netif_device_attach(dev); | |
2417 | /* Make sure PCI access and bus master are enabled */ | 2372 | rtnl_unlock(); |
2418 | if (pci_enable_device(gp->pdev)) { | ||
2419 | netdev_err(dev, "Can't re-enable chip !\n"); | ||
2420 | /* Put cell and forget it for now, it will be considered as | ||
2421 | * still asleep, a new sleep cycle may bring it back | ||
2422 | */ | ||
2423 | gem_put_cell(gp); | ||
2424 | mutex_unlock(&gp->pm_mutex); | ||
2425 | return 0; | 2373 | return 0; |
2426 | } | 2374 | } |
2427 | pci_set_master(gp->pdev); | ||
2428 | |||
2429 | /* Reset everything */ | ||
2430 | gem_reset(gp); | ||
2431 | 2375 | ||
2432 | /* Mark us woken up */ | 2376 | /* Restart chip. If that fails there isn't much we can do, we |
2433 | gp->asleep = 0; | 2377 | * leave things stopped. |
2434 | wmb(); | ||
2435 | |||
2436 | /* Bring the PHY back. Again, lock is useless at this point as | ||
2437 | * nothing can be happening until we restart the whole thing | ||
2438 | */ | 2378 | */ |
2439 | gem_init_phy(gp); | 2379 | gem_do_start(dev); |
2440 | |||
2441 | /* If we were opened, bring everything back */ | ||
2442 | if (gp->opened) { | ||
2443 | /* Restart MAC */ | ||
2444 | gem_do_start(dev); | ||
2445 | |||
2446 | /* Re-attach net device */ | ||
2447 | netif_device_attach(dev); | ||
2448 | } | ||
2449 | |||
2450 | spin_lock_irqsave(&gp->lock, flags); | ||
2451 | spin_lock(&gp->tx_lock); | ||
2452 | 2380 | ||
2453 | /* If we had WOL enabled, the cell clock was never turned off during | 2381 | /* If we had WOL enabled, the cell clock was never turned off during |
2454 | * sleep, so we end up beeing unbalanced. Fix that here | 2382 | * sleep, so we end up beeing unbalanced. Fix that here |
@@ -2456,15 +2384,8 @@ static int gem_resume(struct pci_dev *pdev) | |||
2456 | if (gp->asleep_wol) | 2384 | if (gp->asleep_wol) |
2457 | gem_put_cell(gp); | 2385 | gem_put_cell(gp); |
2458 | 2386 | ||
2459 | /* This function doesn't need to hold the cell, it will be held if the | 2387 | /* Unlock the network stack */ |
2460 | * driver is open by gem_do_start(). | 2388 | rtnl_unlock(); |
2461 | */ | ||
2462 | gem_put_cell(gp); | ||
2463 | |||
2464 | spin_unlock(&gp->tx_lock); | ||
2465 | spin_unlock_irqrestore(&gp->lock, flags); | ||
2466 | |||
2467 | mutex_unlock(&gp->pm_mutex); | ||
2468 | 2389 | ||
2469 | return 0; | 2390 | return 0; |
2470 | } | 2391 | } |
@@ -2474,33 +2395,35 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) | |||
2474 | { | 2395 | { |
2475 | struct gem *gp = netdev_priv(dev); | 2396 | struct gem *gp = netdev_priv(dev); |
2476 | 2397 | ||
2477 | spin_lock_irq(&gp->lock); | ||
2478 | spin_lock(&gp->tx_lock); | ||
2479 | |||
2480 | /* I have seen this being called while the PM was in progress, | 2398 | /* I have seen this being called while the PM was in progress, |
2481 | * so we shield against this | 2399 | * so we shield against this. Let's also not poke at registers |
2400 | * while the reset task is going on. | ||
2401 | * | ||
2402 | * TODO: Move stats collection elsewhere (link timer ?) and | ||
2403 | * make this a nop to avoid all those synchro issues | ||
2482 | */ | 2404 | */ |
2483 | if (gp->running) { | 2405 | if (!netif_device_present(dev) || !netif_running(dev)) |
2484 | dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); | 2406 | goto bail; |
2485 | writel(0, gp->regs + MAC_FCSERR); | ||
2486 | 2407 | ||
2487 | dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); | 2408 | /* Better safe than sorry... */ |
2488 | writel(0, gp->regs + MAC_AERR); | 2409 | if (WARN_ON(!gp->cell_enabled)) |
2410 | goto bail; | ||
2489 | 2411 | ||
2490 | dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); | 2412 | dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); |
2491 | writel(0, gp->regs + MAC_LERR); | 2413 | writel(0, gp->regs + MAC_FCSERR); |
2492 | 2414 | ||
2493 | dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); | 2415 | dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); |
2494 | dev->stats.collisions += | 2416 | writel(0, gp->regs + MAC_AERR); |
2495 | (readl(gp->regs + MAC_ECOLL) + | ||
2496 | readl(gp->regs + MAC_LCOLL)); | ||
2497 | writel(0, gp->regs + MAC_ECOLL); | ||
2498 | writel(0, gp->regs + MAC_LCOLL); | ||
2499 | } | ||
2500 | 2417 | ||
2501 | spin_unlock(&gp->tx_lock); | 2418 | dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); |
2502 | spin_unlock_irq(&gp->lock); | 2419 | writel(0, gp->regs + MAC_LERR); |
2503 | 2420 | ||
2421 | dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); | ||
2422 | dev->stats.collisions += | ||
2423 | (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL)); | ||
2424 | writel(0, gp->regs + MAC_ECOLL); | ||
2425 | writel(0, gp->regs + MAC_LCOLL); | ||
2426 | bail: | ||
2504 | return &dev->stats; | 2427 | return &dev->stats; |
2505 | } | 2428 | } |
2506 | 2429 | ||
@@ -2513,22 +2436,19 @@ static int gem_set_mac_address(struct net_device *dev, void *addr) | |||
2513 | if (!is_valid_ether_addr(macaddr->sa_data)) | 2436 | if (!is_valid_ether_addr(macaddr->sa_data)) |
2514 | return -EADDRNOTAVAIL; | 2437 | return -EADDRNOTAVAIL; |
2515 | 2438 | ||
2516 | if (!netif_running(dev) || !netif_device_present(dev)) { | 2439 | memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); |
2517 | /* We'll just catch it later when the | 2440 | |
2518 | * device is up'd or resumed. | 2441 | /* We'll just catch it later when the device is up'd or resumed */ |
2519 | */ | 2442 | if (!netif_running(dev) || !netif_device_present(dev)) |
2520 | memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); | ||
2521 | return 0; | 2443 | return 0; |
2522 | } | ||
2523 | 2444 | ||
2524 | mutex_lock(&gp->pm_mutex); | 2445 | /* Better safe than sorry... */ |
2525 | memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); | 2446 | if (WARN_ON(!gp->cell_enabled)) |
2526 | if (gp->running) { | 2447 | return 0; |
2527 | writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); | 2448 | |
2528 | writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); | 2449 | writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); |
2529 | writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); | 2450 | writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); |
2530 | } | 2451 | writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); |
2531 | mutex_unlock(&gp->pm_mutex); | ||
2532 | 2452 | ||
2533 | return 0; | 2453 | return 0; |
2534 | } | 2454 | } |
@@ -2539,14 +2459,12 @@ static void gem_set_multicast(struct net_device *dev) | |||
2539 | u32 rxcfg, rxcfg_new; | 2459 | u32 rxcfg, rxcfg_new; |
2540 | int limit = 10000; | 2460 | int limit = 10000; |
2541 | 2461 | ||
2462 | if (!netif_running(dev) || !netif_device_present(dev)) | ||
2463 | return; | ||
2542 | 2464 | ||
2543 | spin_lock_irq(&gp->lock); | 2465 | /* Better safe than sorry... */ |
2544 | spin_lock(&gp->tx_lock); | 2466 | if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled)) |
2545 | 2467 | return; | |
2546 | if (!gp->running) | ||
2547 | goto bail; | ||
2548 | |||
2549 | netif_stop_queue(dev); | ||
2550 | 2468 | ||
2551 | rxcfg = readl(gp->regs + MAC_RXCFG); | 2469 | rxcfg = readl(gp->regs + MAC_RXCFG); |
2552 | rxcfg_new = gem_setup_multicast(gp); | 2470 | rxcfg_new = gem_setup_multicast(gp); |
@@ -2566,12 +2484,6 @@ static void gem_set_multicast(struct net_device *dev) | |||
2566 | rxcfg |= rxcfg_new; | 2484 | rxcfg |= rxcfg_new; |
2567 | 2485 | ||
2568 | writel(rxcfg, gp->regs + MAC_RXCFG); | 2486 | writel(rxcfg, gp->regs + MAC_RXCFG); |
2569 | |||
2570 | netif_wake_queue(dev); | ||
2571 | |||
2572 | bail: | ||
2573 | spin_unlock(&gp->tx_lock); | ||
2574 | spin_unlock_irq(&gp->lock); | ||
2575 | } | 2487 | } |
2576 | 2488 | ||
2577 | /* Jumbo-grams don't seem to work :-( */ | 2489 | /* Jumbo-grams don't seem to work :-( */ |
@@ -2589,26 +2501,21 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) | |||
2589 | if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) | 2501 | if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) |
2590 | return -EINVAL; | 2502 | return -EINVAL; |
2591 | 2503 | ||
2592 | if (!netif_running(dev) || !netif_device_present(dev)) { | 2504 | dev->mtu = new_mtu; |
2593 | /* We'll just catch it later when the | 2505 | |
2594 | * device is up'd or resumed. | 2506 | /* We'll just catch it later when the device is up'd or resumed */ |
2595 | */ | 2507 | if (!netif_running(dev) || !netif_device_present(dev)) |
2596 | dev->mtu = new_mtu; | ||
2597 | return 0; | 2508 | return 0; |
2598 | } | ||
2599 | 2509 | ||
2600 | mutex_lock(&gp->pm_mutex); | 2510 | /* Better safe than sorry... */ |
2601 | spin_lock_irq(&gp->lock); | 2511 | if (WARN_ON(!gp->cell_enabled)) |
2602 | spin_lock(&gp->tx_lock); | 2512 | return 0; |
2603 | dev->mtu = new_mtu; | 2513 | |
2604 | if (gp->running) { | 2514 | gem_netif_stop(gp); |
2605 | gem_reinit_chip(gp); | 2515 | gem_reinit_chip(gp); |
2606 | if (gp->lstate == link_up) | 2516 | if (gp->lstate == link_up) |
2607 | gem_set_link_modes(gp); | 2517 | gem_set_link_modes(gp); |
2608 | } | 2518 | gem_netif_start(gp); |
2609 | spin_unlock(&gp->tx_lock); | ||
2610 | spin_unlock_irq(&gp->lock); | ||
2611 | mutex_unlock(&gp->pm_mutex); | ||
2612 | 2519 | ||
2613 | return 0; | 2520 | return 0; |
2614 | } | 2521 | } |
@@ -2640,7 +2547,6 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
2640 | cmd->phy_address = 0; /* XXX fixed PHYAD */ | 2547 | cmd->phy_address = 0; /* XXX fixed PHYAD */ |
2641 | 2548 | ||
2642 | /* Return current PHY settings */ | 2549 | /* Return current PHY settings */ |
2643 | spin_lock_irq(&gp->lock); | ||
2644 | cmd->autoneg = gp->want_autoneg; | 2550 | cmd->autoneg = gp->want_autoneg; |
2645 | ethtool_cmd_speed_set(cmd, gp->phy_mii.speed); | 2551 | ethtool_cmd_speed_set(cmd, gp->phy_mii.speed); |
2646 | cmd->duplex = gp->phy_mii.duplex; | 2552 | cmd->duplex = gp->phy_mii.duplex; |
@@ -2652,7 +2558,6 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
2652 | */ | 2558 | */ |
2653 | if (cmd->advertising == 0) | 2559 | if (cmd->advertising == 0) |
2654 | cmd->advertising = cmd->supported; | 2560 | cmd->advertising = cmd->supported; |
2655 | spin_unlock_irq(&gp->lock); | ||
2656 | } else { // XXX PCS ? | 2561 | } else { // XXX PCS ? |
2657 | cmd->supported = | 2562 | cmd->supported = |
2658 | (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | | 2563 | (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | |
@@ -2706,11 +2611,10 @@ static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
2706 | return -EINVAL; | 2611 | return -EINVAL; |
2707 | 2612 | ||
2708 | /* Apply settings and restart link process. */ | 2613 | /* Apply settings and restart link process. */ |
2709 | spin_lock_irq(&gp->lock); | 2614 | if (netif_device_present(gp->dev)) { |
2710 | gem_get_cell(gp); | 2615 | del_timer_sync(&gp->link_timer); |
2711 | gem_begin_auto_negotiation(gp, cmd); | 2616 | gem_begin_auto_negotiation(gp, cmd); |
2712 | gem_put_cell(gp); | 2617 | } |
2713 | spin_unlock_irq(&gp->lock); | ||
2714 | 2618 | ||
2715 | return 0; | 2619 | return 0; |
2716 | } | 2620 | } |
@@ -2722,12 +2626,11 @@ static int gem_nway_reset(struct net_device *dev) | |||
2722 | if (!gp->want_autoneg) | 2626 | if (!gp->want_autoneg) |
2723 | return -EINVAL; | 2627 | return -EINVAL; |
2724 | 2628 | ||
2725 | /* Restart link process. */ | 2629 | /* Restart link process */ |
2726 | spin_lock_irq(&gp->lock); | 2630 | if (netif_device_present(gp->dev)) { |
2727 | gem_get_cell(gp); | 2631 | del_timer_sync(&gp->link_timer); |
2728 | gem_begin_auto_negotiation(gp, NULL); | 2632 | gem_begin_auto_negotiation(gp, NULL); |
2729 | gem_put_cell(gp); | 2633 | } |
2730 | spin_unlock_irq(&gp->lock); | ||
2731 | 2634 | ||
2732 | return 0; | 2635 | return 0; |
2733 | } | 2636 | } |
@@ -2791,16 +2694,11 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
2791 | struct gem *gp = netdev_priv(dev); | 2694 | struct gem *gp = netdev_priv(dev); |
2792 | struct mii_ioctl_data *data = if_mii(ifr); | 2695 | struct mii_ioctl_data *data = if_mii(ifr); |
2793 | int rc = -EOPNOTSUPP; | 2696 | int rc = -EOPNOTSUPP; |
2794 | unsigned long flags; | ||
2795 | 2697 | ||
2796 | /* Hold the PM mutex while doing ioctl's or we may collide | 2698 | /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that |
2797 | * with power management. | 2699 | * netif_device_present() is true and holds rtnl_lock for us |
2700 | * so we have nothing to worry about | ||
2798 | */ | 2701 | */ |
2799 | mutex_lock(&gp->pm_mutex); | ||
2800 | |||
2801 | spin_lock_irqsave(&gp->lock, flags); | ||
2802 | gem_get_cell(gp); | ||
2803 | spin_unlock_irqrestore(&gp->lock, flags); | ||
2804 | 2702 | ||
2805 | switch (cmd) { | 2703 | switch (cmd) { |
2806 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ | 2704 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ |
@@ -2808,32 +2706,17 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
2808 | /* Fallthrough... */ | 2706 | /* Fallthrough... */ |
2809 | 2707 | ||
2810 | case SIOCGMIIREG: /* Read MII PHY register. */ | 2708 | case SIOCGMIIREG: /* Read MII PHY register. */ |
2811 | if (!gp->running) | 2709 | data->val_out = __phy_read(gp, data->phy_id & 0x1f, |
2812 | rc = -EAGAIN; | 2710 | data->reg_num & 0x1f); |
2813 | else { | 2711 | rc = 0; |
2814 | data->val_out = __phy_read(gp, data->phy_id & 0x1f, | ||
2815 | data->reg_num & 0x1f); | ||
2816 | rc = 0; | ||
2817 | } | ||
2818 | break; | 2712 | break; |
2819 | 2713 | ||
2820 | case SIOCSMIIREG: /* Write MII PHY register. */ | 2714 | case SIOCSMIIREG: /* Write MII PHY register. */ |
2821 | if (!gp->running) | 2715 | __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, |
2822 | rc = -EAGAIN; | 2716 | data->val_in); |
2823 | else { | 2717 | rc = 0; |
2824 | __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, | ||
2825 | data->val_in); | ||
2826 | rc = 0; | ||
2827 | } | ||
2828 | break; | 2718 | break; |
2829 | }; | 2719 | }; |
2830 | |||
2831 | spin_lock_irqsave(&gp->lock, flags); | ||
2832 | gem_put_cell(gp); | ||
2833 | spin_unlock_irqrestore(&gp->lock, flags); | ||
2834 | |||
2835 | mutex_unlock(&gp->pm_mutex); | ||
2836 | |||
2837 | return rc; | 2720 | return rc; |
2838 | } | 2721 | } |
2839 | 2722 | ||
@@ -2921,23 +2804,9 @@ static void gem_remove_one(struct pci_dev *pdev) | |||
2921 | 2804 | ||
2922 | unregister_netdev(dev); | 2805 | unregister_netdev(dev); |
2923 | 2806 | ||
2924 | /* Stop the link timer */ | 2807 | /* Ensure reset task is truely gone */ |
2925 | del_timer_sync(&gp->link_timer); | ||
2926 | |||
2927 | /* We shouldn't need any locking here */ | ||
2928 | gem_get_cell(gp); | ||
2929 | |||
2930 | /* Cancel reset task */ | ||
2931 | cancel_work_sync(&gp->reset_task); | 2808 | cancel_work_sync(&gp->reset_task); |
2932 | 2809 | ||
2933 | /* Shut the PHY down */ | ||
2934 | gem_stop_phy(gp, 0); | ||
2935 | |||
2936 | gem_put_cell(gp); | ||
2937 | |||
2938 | /* Make sure bus master is disabled */ | ||
2939 | pci_disable_device(gp->pdev); | ||
2940 | |||
2941 | /* Free resources */ | 2810 | /* Free resources */ |
2942 | pci_free_consistent(pdev, | 2811 | pci_free_consistent(pdev, |
2943 | sizeof(struct gem_init_block), | 2812 | sizeof(struct gem_init_block), |
@@ -3043,10 +2912,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3043 | 2912 | ||
3044 | gp->msg_enable = DEFAULT_MSG; | 2913 | gp->msg_enable = DEFAULT_MSG; |
3045 | 2914 | ||
3046 | spin_lock_init(&gp->lock); | ||
3047 | spin_lock_init(&gp->tx_lock); | ||
3048 | mutex_init(&gp->pm_mutex); | ||
3049 | |||
3050 | init_timer(&gp->link_timer); | 2915 | init_timer(&gp->link_timer); |
3051 | gp->link_timer.function = gem_link_timer; | 2916 | gp->link_timer.function = gem_link_timer; |
3052 | gp->link_timer.data = (unsigned long) gp; | 2917 | gp->link_timer.data = (unsigned long) gp; |
@@ -3122,14 +2987,11 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3122 | /* Set that now, in case PM kicks in now */ | 2987 | /* Set that now, in case PM kicks in now */ |
3123 | pci_set_drvdata(pdev, dev); | 2988 | pci_set_drvdata(pdev, dev); |
3124 | 2989 | ||
3125 | /* Detect & init PHY, start autoneg, we release the cell now | 2990 | /* We can do scatter/gather and HW checksum */ |
3126 | * too, it will be managed by whoever needs it | 2991 | dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; |
3127 | */ | 2992 | dev->features |= dev->hw_features | NETIF_F_RXCSUM; |
3128 | gem_init_phy(gp); | 2993 | if (pci_using_dac) |
3129 | 2994 | dev->features |= NETIF_F_HIGHDMA; | |
3130 | spin_lock_irq(&gp->lock); | ||
3131 | gem_put_cell(gp); | ||
3132 | spin_unlock_irq(&gp->lock); | ||
3133 | 2995 | ||
3134 | /* Register with kernel */ | 2996 | /* Register with kernel */ |
3135 | if (register_netdev(dev)) { | 2997 | if (register_netdev(dev)) { |
@@ -3138,20 +3000,15 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3138 | goto err_out_free_consistent; | 3000 | goto err_out_free_consistent; |
3139 | } | 3001 | } |
3140 | 3002 | ||
3003 | /* Undo the get_cell with appropriate locking (we could use | ||
3004 | * ndo_init/uninit but that would be even more clumsy imho) | ||
3005 | */ | ||
3006 | rtnl_lock(); | ||
3007 | gem_put_cell(gp); | ||
3008 | rtnl_unlock(); | ||
3009 | |||
3141 | netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", | 3010 | netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", |
3142 | dev->dev_addr); | 3011 | dev->dev_addr); |
3143 | |||
3144 | if (gp->phy_type == phy_mii_mdio0 || | ||
3145 | gp->phy_type == phy_mii_mdio1) | ||
3146 | netdev_info(dev, "Found %s PHY\n", | ||
3147 | gp->phy_mii.def ? gp->phy_mii.def->name : "no"); | ||
3148 | |||
3149 | /* GEM can do it all... */ | ||
3150 | dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; | ||
3151 | dev->features |= dev->hw_features | NETIF_F_RXCSUM | NETIF_F_LLTX; | ||
3152 | if (pci_using_dac) | ||
3153 | dev->features |= NETIF_F_HIGHDMA; | ||
3154 | |||
3155 | return 0; | 3012 | return 0; |
3156 | 3013 | ||
3157 | err_out_free_consistent: | 3014 | err_out_free_consistent: |