diff options
Diffstat (limited to 'drivers/net')
29 files changed, 870 insertions, 812 deletions
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 1363083b4d83..14dbad14afb6 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/mii.h> | 52 | #include <linux/mii.h> |
53 | #include <linux/skbuff.h> | 53 | #include <linux/skbuff.h> |
54 | #include <linux/delay.h> | 54 | #include <linux/delay.h> |
55 | #include <linux/crc32.h> | ||
55 | #include <asm/mipsregs.h> | 56 | #include <asm/mipsregs.h> |
56 | #include <asm/irq.h> | 57 | #include <asm/irq.h> |
57 | #include <asm/io.h> | 58 | #include <asm/io.h> |
@@ -2070,23 +2071,6 @@ static void au1000_tx_timeout(struct net_device *dev) | |||
2070 | netif_wake_queue(dev); | 2071 | netif_wake_queue(dev); |
2071 | } | 2072 | } |
2072 | 2073 | ||
2073 | |||
2074 | static unsigned const ethernet_polynomial = 0x04c11db7U; | ||
2075 | static inline u32 ether_crc(int length, unsigned char *data) | ||
2076 | { | ||
2077 | int crc = -1; | ||
2078 | |||
2079 | while(--length >= 0) { | ||
2080 | unsigned char current_octet = *data++; | ||
2081 | int bit; | ||
2082 | for (bit = 0; bit < 8; bit++, current_octet >>= 1) | ||
2083 | crc = (crc << 1) ^ | ||
2084 | ((crc < 0) ^ (current_octet & 1) ? | ||
2085 | ethernet_polynomial : 0); | ||
2086 | } | ||
2087 | return crc; | ||
2088 | } | ||
2089 | |||
2090 | static void set_rx_mode(struct net_device *dev) | 2074 | static void set_rx_mode(struct net_device *dev) |
2091 | { | 2075 | { |
2092 | struct au1000_private *aup = (struct au1000_private *) dev->priv; | 2076 | struct au1000_private *aup = (struct au1000_private *) dev->priv; |
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 1f3627470c95..1ddefd281213 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
@@ -765,7 +765,7 @@ rio_free_tx (struct net_device *dev, int irq) | |||
765 | break; | 765 | break; |
766 | skb = np->tx_skbuff[entry]; | 766 | skb = np->tx_skbuff[entry]; |
767 | pci_unmap_single (np->pdev, | 767 | pci_unmap_single (np->pdev, |
768 | np->tx_ring[entry].fraginfo & 0xffffffffffff, | 768 | np->tx_ring[entry].fraginfo & DMA_48BIT_MASK, |
769 | skb->len, PCI_DMA_TODEVICE); | 769 | skb->len, PCI_DMA_TODEVICE); |
770 | if (irq) | 770 | if (irq) |
771 | dev_kfree_skb_irq (skb); | 771 | dev_kfree_skb_irq (skb); |
@@ -893,7 +893,7 @@ receive_packet (struct net_device *dev) | |||
893 | /* Small skbuffs for short packets */ | 893 | /* Small skbuffs for short packets */ |
894 | if (pkt_len > copy_thresh) { | 894 | if (pkt_len > copy_thresh) { |
895 | pci_unmap_single (np->pdev, | 895 | pci_unmap_single (np->pdev, |
896 | desc->fraginfo & 0xffffffffffff, | 896 | desc->fraginfo & DMA_48BIT_MASK, |
897 | np->rx_buf_sz, | 897 | np->rx_buf_sz, |
898 | PCI_DMA_FROMDEVICE); | 898 | PCI_DMA_FROMDEVICE); |
899 | skb_put (skb = np->rx_skbuff[entry], pkt_len); | 899 | skb_put (skb = np->rx_skbuff[entry], pkt_len); |
@@ -901,7 +901,7 @@ receive_packet (struct net_device *dev) | |||
901 | } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { | 901 | } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { |
902 | pci_dma_sync_single_for_cpu(np->pdev, | 902 | pci_dma_sync_single_for_cpu(np->pdev, |
903 | desc->fraginfo & | 903 | desc->fraginfo & |
904 | 0xffffffffffff, | 904 | DMA_48BIT_MASK, |
905 | np->rx_buf_sz, | 905 | np->rx_buf_sz, |
906 | PCI_DMA_FROMDEVICE); | 906 | PCI_DMA_FROMDEVICE); |
907 | skb->dev = dev; | 907 | skb->dev = dev; |
@@ -913,7 +913,7 @@ receive_packet (struct net_device *dev) | |||
913 | skb_put (skb, pkt_len); | 913 | skb_put (skb, pkt_len); |
914 | pci_dma_sync_single_for_device(np->pdev, | 914 | pci_dma_sync_single_for_device(np->pdev, |
915 | desc->fraginfo & | 915 | desc->fraginfo & |
916 | 0xffffffffffff, | 916 | DMA_48BIT_MASK, |
917 | np->rx_buf_sz, | 917 | np->rx_buf_sz, |
918 | PCI_DMA_FROMDEVICE); | 918 | PCI_DMA_FROMDEVICE); |
919 | } | 919 | } |
@@ -1800,7 +1800,7 @@ rio_close (struct net_device *dev) | |||
1800 | skb = np->rx_skbuff[i]; | 1800 | skb = np->rx_skbuff[i]; |
1801 | if (skb) { | 1801 | if (skb) { |
1802 | pci_unmap_single(np->pdev, | 1802 | pci_unmap_single(np->pdev, |
1803 | np->rx_ring[i].fraginfo & 0xffffffffffff, | 1803 | np->rx_ring[i].fraginfo & DMA_48BIT_MASK, |
1804 | skb->len, PCI_DMA_FROMDEVICE); | 1804 | skb->len, PCI_DMA_FROMDEVICE); |
1805 | dev_kfree_skb (skb); | 1805 | dev_kfree_skb (skb); |
1806 | np->rx_skbuff[i] = NULL; | 1806 | np->rx_skbuff[i] = NULL; |
@@ -1810,7 +1810,7 @@ rio_close (struct net_device *dev) | |||
1810 | skb = np->tx_skbuff[i]; | 1810 | skb = np->tx_skbuff[i]; |
1811 | if (skb) { | 1811 | if (skb) { |
1812 | pci_unmap_single(np->pdev, | 1812 | pci_unmap_single(np->pdev, |
1813 | np->tx_ring[i].fraginfo & 0xffffffffffff, | 1813 | np->tx_ring[i].fraginfo & DMA_48BIT_MASK, |
1814 | skb->len, PCI_DMA_TODEVICE); | 1814 | skb->len, PCI_DMA_TODEVICE); |
1815 | dev_kfree_skb (skb); | 1815 | dev_kfree_skb (skb); |
1816 | np->tx_skbuff[i] = NULL; | 1816 | np->tx_skbuff[i] = NULL; |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 9788b1ef2e7d..f7235c9bc421 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -106,6 +106,7 @@ | |||
106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. | 106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. |
107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. | 107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. |
108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. | 108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. |
109 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. | ||
109 | * | 110 | * |
110 | * Known bugs: | 111 | * Known bugs: |
111 | * We suspect that on some hardware no TX done interrupts are generated. | 112 | * We suspect that on some hardware no TX done interrupts are generated. |
@@ -117,7 +118,7 @@ | |||
117 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 118 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
118 | * superfluous timer interrupts from the nic. | 119 | * superfluous timer interrupts from the nic. |
119 | */ | 120 | */ |
120 | #define FORCEDETH_VERSION "0.53" | 121 | #define FORCEDETH_VERSION "0.54" |
121 | #define DRV_NAME "forcedeth" | 122 | #define DRV_NAME "forcedeth" |
122 | 123 | ||
123 | #include <linux/module.h> | 124 | #include <linux/module.h> |
@@ -710,6 +711,72 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | |||
710 | } | 711 | } |
711 | } | 712 | } |
712 | 713 | ||
714 | static int using_multi_irqs(struct net_device *dev) | ||
715 | { | ||
716 | struct fe_priv *np = get_nvpriv(dev); | ||
717 | |||
718 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | ||
719 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
720 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) | ||
721 | return 0; | ||
722 | else | ||
723 | return 1; | ||
724 | } | ||
725 | |||
726 | static void nv_enable_irq(struct net_device *dev) | ||
727 | { | ||
728 | struct fe_priv *np = get_nvpriv(dev); | ||
729 | |||
730 | if (!using_multi_irqs(dev)) { | ||
731 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
732 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
733 | else | ||
734 | enable_irq(dev->irq); | ||
735 | } else { | ||
736 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
737 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
738 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
739 | } | ||
740 | } | ||
741 | |||
742 | static void nv_disable_irq(struct net_device *dev) | ||
743 | { | ||
744 | struct fe_priv *np = get_nvpriv(dev); | ||
745 | |||
746 | if (!using_multi_irqs(dev)) { | ||
747 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
748 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
749 | else | ||
750 | disable_irq(dev->irq); | ||
751 | } else { | ||
752 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
753 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
754 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
755 | } | ||
756 | } | ||
757 | |||
758 | /* In MSIX mode, a write to irqmask behaves as XOR */ | ||
759 | static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) | ||
760 | { | ||
761 | u8 __iomem *base = get_hwbase(dev); | ||
762 | |||
763 | writel(mask, base + NvRegIrqMask); | ||
764 | } | ||
765 | |||
766 | static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) | ||
767 | { | ||
768 | struct fe_priv *np = get_nvpriv(dev); | ||
769 | u8 __iomem *base = get_hwbase(dev); | ||
770 | |||
771 | if (np->msi_flags & NV_MSI_X_ENABLED) { | ||
772 | writel(mask, base + NvRegIrqMask); | ||
773 | } else { | ||
774 | if (np->msi_flags & NV_MSI_ENABLED) | ||
775 | writel(0, base + NvRegMSIIrqMask); | ||
776 | writel(0, base + NvRegIrqMask); | ||
777 | } | ||
778 | } | ||
779 | |||
713 | #define MII_READ (-1) | 780 | #define MII_READ (-1) |
714 | /* mii_rw: read/write a register on the PHY. | 781 | /* mii_rw: read/write a register on the PHY. |
715 | * | 782 | * |
@@ -1019,24 +1086,25 @@ static void nv_do_rx_refill(unsigned long data) | |||
1019 | struct net_device *dev = (struct net_device *) data; | 1086 | struct net_device *dev = (struct net_device *) data; |
1020 | struct fe_priv *np = netdev_priv(dev); | 1087 | struct fe_priv *np = netdev_priv(dev); |
1021 | 1088 | ||
1022 | 1089 | if (!using_multi_irqs(dev)) { | |
1023 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 1090 | if (np->msi_flags & NV_MSI_X_ENABLED) |
1024 | ((np->msi_flags & NV_MSI_X_ENABLED) && | 1091 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
1025 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | 1092 | else |
1026 | disable_irq(dev->irq); | 1093 | disable_irq(dev->irq); |
1027 | } else { | 1094 | } else { |
1028 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | 1095 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
1029 | } | 1096 | } |
1030 | if (nv_alloc_rx(dev)) { | 1097 | if (nv_alloc_rx(dev)) { |
1031 | spin_lock(&np->lock); | 1098 | spin_lock_irq(&np->lock); |
1032 | if (!np->in_shutdown) | 1099 | if (!np->in_shutdown) |
1033 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 1100 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
1034 | spin_unlock(&np->lock); | 1101 | spin_unlock_irq(&np->lock); |
1035 | } | 1102 | } |
1036 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 1103 | if (!using_multi_irqs(dev)) { |
1037 | ((np->msi_flags & NV_MSI_X_ENABLED) && | 1104 | if (np->msi_flags & NV_MSI_X_ENABLED) |
1038 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | 1105 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
1039 | enable_irq(dev->irq); | 1106 | else |
1107 | enable_irq(dev->irq); | ||
1040 | } else { | 1108 | } else { |
1041 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | 1109 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
1042 | } | 1110 | } |
@@ -1668,15 +1736,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1668 | * guessed, there is probably a simpler approach. | 1736 | * guessed, there is probably a simpler approach. |
1669 | * Changing the MTU is a rare event, it shouldn't matter. | 1737 | * Changing the MTU is a rare event, it shouldn't matter. |
1670 | */ | 1738 | */ |
1671 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 1739 | nv_disable_irq(dev); |
1672 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
1673 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
1674 | disable_irq(dev->irq); | ||
1675 | } else { | ||
1676 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1677 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
1678 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
1679 | } | ||
1680 | spin_lock_bh(&dev->xmit_lock); | 1740 | spin_lock_bh(&dev->xmit_lock); |
1681 | spin_lock(&np->lock); | 1741 | spin_lock(&np->lock); |
1682 | /* stop engines */ | 1742 | /* stop engines */ |
@@ -1709,15 +1769,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1709 | nv_start_tx(dev); | 1769 | nv_start_tx(dev); |
1710 | spin_unlock(&np->lock); | 1770 | spin_unlock(&np->lock); |
1711 | spin_unlock_bh(&dev->xmit_lock); | 1771 | spin_unlock_bh(&dev->xmit_lock); |
1712 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 1772 | nv_enable_irq(dev); |
1713 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
1714 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
1715 | enable_irq(dev->irq); | ||
1716 | } else { | ||
1717 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
1718 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
1719 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
1720 | } | ||
1721 | } | 1773 | } |
1722 | return 0; | 1774 | return 0; |
1723 | } | 1775 | } |
@@ -2108,16 +2160,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | |||
2108 | if (!(events & np->irqmask)) | 2160 | if (!(events & np->irqmask)) |
2109 | break; | 2161 | break; |
2110 | 2162 | ||
2111 | spin_lock(&np->lock); | 2163 | spin_lock_irq(&np->lock); |
2112 | nv_tx_done(dev); | 2164 | nv_tx_done(dev); |
2113 | spin_unlock(&np->lock); | 2165 | spin_unlock_irq(&np->lock); |
2114 | 2166 | ||
2115 | if (events & (NVREG_IRQ_TX_ERR)) { | 2167 | if (events & (NVREG_IRQ_TX_ERR)) { |
2116 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | 2168 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
2117 | dev->name, events); | 2169 | dev->name, events); |
2118 | } | 2170 | } |
2119 | if (i > max_interrupt_work) { | 2171 | if (i > max_interrupt_work) { |
2120 | spin_lock(&np->lock); | 2172 | spin_lock_irq(&np->lock); |
2121 | /* disable interrupts on the nic */ | 2173 | /* disable interrupts on the nic */ |
2122 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | 2174 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); |
2123 | pci_push(base); | 2175 | pci_push(base); |
@@ -2127,7 +2179,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | |||
2127 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2179 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
2128 | } | 2180 | } |
2129 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); | 2181 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); |
2130 | spin_unlock(&np->lock); | 2182 | spin_unlock_irq(&np->lock); |
2131 | break; | 2183 | break; |
2132 | } | 2184 | } |
2133 | 2185 | ||
@@ -2157,14 +2209,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
2157 | 2209 | ||
2158 | nv_rx_process(dev); | 2210 | nv_rx_process(dev); |
2159 | if (nv_alloc_rx(dev)) { | 2211 | if (nv_alloc_rx(dev)) { |
2160 | spin_lock(&np->lock); | 2212 | spin_lock_irq(&np->lock); |
2161 | if (!np->in_shutdown) | 2213 | if (!np->in_shutdown) |
2162 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 2214 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
2163 | spin_unlock(&np->lock); | 2215 | spin_unlock_irq(&np->lock); |
2164 | } | 2216 | } |
2165 | 2217 | ||
2166 | if (i > max_interrupt_work) { | 2218 | if (i > max_interrupt_work) { |
2167 | spin_lock(&np->lock); | 2219 | spin_lock_irq(&np->lock); |
2168 | /* disable interrupts on the nic */ | 2220 | /* disable interrupts on the nic */ |
2169 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | 2221 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); |
2170 | pci_push(base); | 2222 | pci_push(base); |
@@ -2174,7 +2226,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
2174 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2226 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
2175 | } | 2227 | } |
2176 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); | 2228 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); |
2177 | spin_unlock(&np->lock); | 2229 | spin_unlock_irq(&np->lock); |
2178 | break; | 2230 | break; |
2179 | } | 2231 | } |
2180 | 2232 | ||
@@ -2203,14 +2255,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
2203 | break; | 2255 | break; |
2204 | 2256 | ||
2205 | if (events & NVREG_IRQ_LINK) { | 2257 | if (events & NVREG_IRQ_LINK) { |
2206 | spin_lock(&np->lock); | 2258 | spin_lock_irq(&np->lock); |
2207 | nv_link_irq(dev); | 2259 | nv_link_irq(dev); |
2208 | spin_unlock(&np->lock); | 2260 | spin_unlock_irq(&np->lock); |
2209 | } | 2261 | } |
2210 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | 2262 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { |
2211 | spin_lock(&np->lock); | 2263 | spin_lock_irq(&np->lock); |
2212 | nv_linkchange(dev); | 2264 | nv_linkchange(dev); |
2213 | spin_unlock(&np->lock); | 2265 | spin_unlock_irq(&np->lock); |
2214 | np->link_timeout = jiffies + LINK_TIMEOUT; | 2266 | np->link_timeout = jiffies + LINK_TIMEOUT; |
2215 | } | 2267 | } |
2216 | if (events & (NVREG_IRQ_UNKNOWN)) { | 2268 | if (events & (NVREG_IRQ_UNKNOWN)) { |
@@ -2218,7 +2270,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
2218 | dev->name, events); | 2270 | dev->name, events); |
2219 | } | 2271 | } |
2220 | if (i > max_interrupt_work) { | 2272 | if (i > max_interrupt_work) { |
2221 | spin_lock(&np->lock); | 2273 | spin_lock_irq(&np->lock); |
2222 | /* disable interrupts on the nic */ | 2274 | /* disable interrupts on the nic */ |
2223 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | 2275 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); |
2224 | pci_push(base); | 2276 | pci_push(base); |
@@ -2228,7 +2280,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
2228 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2280 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
2229 | } | 2281 | } |
2230 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); | 2282 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); |
2231 | spin_unlock(&np->lock); | 2283 | spin_unlock_irq(&np->lock); |
2232 | break; | 2284 | break; |
2233 | } | 2285 | } |
2234 | 2286 | ||
@@ -2251,10 +2303,11 @@ static void nv_do_nic_poll(unsigned long data) | |||
2251 | * nv_nic_irq because that may decide to do otherwise | 2303 | * nv_nic_irq because that may decide to do otherwise |
2252 | */ | 2304 | */ |
2253 | 2305 | ||
2254 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 2306 | if (!using_multi_irqs(dev)) { |
2255 | ((np->msi_flags & NV_MSI_X_ENABLED) && | 2307 | if (np->msi_flags & NV_MSI_X_ENABLED) |
2256 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | 2308 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
2257 | disable_irq(dev->irq); | 2309 | else |
2310 | disable_irq(dev->irq); | ||
2258 | mask = np->irqmask; | 2311 | mask = np->irqmask; |
2259 | } else { | 2312 | } else { |
2260 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | 2313 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { |
@@ -2277,11 +2330,12 @@ static void nv_do_nic_poll(unsigned long data) | |||
2277 | writel(mask, base + NvRegIrqMask); | 2330 | writel(mask, base + NvRegIrqMask); |
2278 | pci_push(base); | 2331 | pci_push(base); |
2279 | 2332 | ||
2280 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 2333 | if (!using_multi_irqs(dev)) { |
2281 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
2282 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
2283 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | 2334 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); |
2284 | enable_irq(dev->irq); | 2335 | if (np->msi_flags & NV_MSI_X_ENABLED) |
2336 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
2337 | else | ||
2338 | enable_irq(dev->irq); | ||
2285 | } else { | 2339 | } else { |
2286 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | 2340 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { |
2287 | nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); | 2341 | nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); |
@@ -2628,6 +2682,113 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | |||
2628 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | 2682 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); |
2629 | } | 2683 | } |
2630 | 2684 | ||
2685 | static int nv_request_irq(struct net_device *dev) | ||
2686 | { | ||
2687 | struct fe_priv *np = get_nvpriv(dev); | ||
2688 | u8 __iomem *base = get_hwbase(dev); | ||
2689 | int ret = 1; | ||
2690 | int i; | ||
2691 | |||
2692 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | ||
2693 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
2694 | np->msi_x_entry[i].entry = i; | ||
2695 | } | ||
2696 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
2697 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
2698 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
2699 | /* Request irq for rx handling */ | ||
2700 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2701 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
2702 | pci_disable_msix(np->pci_dev); | ||
2703 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2704 | goto out_err; | ||
2705 | } | ||
2706 | /* Request irq for tx handling */ | ||
2707 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2708 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
2709 | pci_disable_msix(np->pci_dev); | ||
2710 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2711 | goto out_free_rx; | ||
2712 | } | ||
2713 | /* Request irq for link and timer handling */ | ||
2714 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
2715 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
2716 | pci_disable_msix(np->pci_dev); | ||
2717 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2718 | goto out_free_tx; | ||
2719 | } | ||
2720 | /* map interrupts to their respective vector */ | ||
2721 | writel(0, base + NvRegMSIXMap0); | ||
2722 | writel(0, base + NvRegMSIXMap1); | ||
2723 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
2724 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
2725 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
2726 | } else { | ||
2727 | /* Request irq for all interrupts */ | ||
2728 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2729 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2730 | pci_disable_msix(np->pci_dev); | ||
2731 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2732 | goto out_err; | ||
2733 | } | ||
2734 | |||
2735 | /* map interrupts to vector 0 */ | ||
2736 | writel(0, base + NvRegMSIXMap0); | ||
2737 | writel(0, base + NvRegMSIXMap1); | ||
2738 | } | ||
2739 | } | ||
2740 | } | ||
2741 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
2742 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
2743 | np->msi_flags |= NV_MSI_ENABLED; | ||
2744 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2745 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2746 | pci_disable_msi(np->pci_dev); | ||
2747 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2748 | goto out_err; | ||
2749 | } | ||
2750 | |||
2751 | /* map interrupts to vector 0 */ | ||
2752 | writel(0, base + NvRegMSIMap0); | ||
2753 | writel(0, base + NvRegMSIMap1); | ||
2754 | /* enable msi vector 0 */ | ||
2755 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
2756 | } | ||
2757 | } | ||
2758 | if (ret != 0) { | ||
2759 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
2760 | goto out_err; | ||
2761 | } | ||
2762 | |||
2763 | return 0; | ||
2764 | out_free_tx: | ||
2765 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); | ||
2766 | out_free_rx: | ||
2767 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); | ||
2768 | out_err: | ||
2769 | return 1; | ||
2770 | } | ||
2771 | |||
2772 | static void nv_free_irq(struct net_device *dev) | ||
2773 | { | ||
2774 | struct fe_priv *np = get_nvpriv(dev); | ||
2775 | int i; | ||
2776 | |||
2777 | if (np->msi_flags & NV_MSI_X_ENABLED) { | ||
2778 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
2779 | free_irq(np->msi_x_entry[i].vector, dev); | ||
2780 | } | ||
2781 | pci_disable_msix(np->pci_dev); | ||
2782 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2783 | } else { | ||
2784 | free_irq(np->pci_dev->irq, dev); | ||
2785 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
2786 | pci_disable_msi(np->pci_dev); | ||
2787 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2788 | } | ||
2789 | } | ||
2790 | } | ||
2791 | |||
2631 | static int nv_open(struct net_device *dev) | 2792 | static int nv_open(struct net_device *dev) |
2632 | { | 2793 | { |
2633 | struct fe_priv *np = netdev_priv(dev); | 2794 | struct fe_priv *np = netdev_priv(dev); |
@@ -2720,12 +2881,16 @@ static int nv_open(struct net_device *dev) | |||
2720 | udelay(10); | 2881 | udelay(10); |
2721 | writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); | 2882 | writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); |
2722 | 2883 | ||
2723 | writel(0, base + NvRegIrqMask); | 2884 | nv_disable_hw_interrupts(dev, np->irqmask); |
2724 | pci_push(base); | 2885 | pci_push(base); |
2725 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | 2886 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); |
2726 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 2887 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
2727 | pci_push(base); | 2888 | pci_push(base); |
2728 | 2889 | ||
2890 | if (nv_request_irq(dev)) { | ||
2891 | goto out_drain; | ||
2892 | } | ||
2893 | |||
2729 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | 2894 | if (np->msi_flags & NV_MSI_X_CAPABLE) { |
2730 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | 2895 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { |
2731 | np->msi_x_entry[i].entry = i; | 2896 | np->msi_x_entry[i].entry = i; |
@@ -2799,7 +2964,7 @@ static int nv_open(struct net_device *dev) | |||
2799 | } | 2964 | } |
2800 | 2965 | ||
2801 | /* ask for interrupts */ | 2966 | /* ask for interrupts */ |
2802 | writel(np->irqmask, base + NvRegIrqMask); | 2967 | nv_enable_hw_interrupts(dev, np->irqmask); |
2803 | 2968 | ||
2804 | spin_lock_irq(&np->lock); | 2969 | spin_lock_irq(&np->lock); |
2805 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); | 2970 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); |
@@ -2843,7 +3008,6 @@ static int nv_close(struct net_device *dev) | |||
2843 | { | 3008 | { |
2844 | struct fe_priv *np = netdev_priv(dev); | 3009 | struct fe_priv *np = netdev_priv(dev); |
2845 | u8 __iomem *base; | 3010 | u8 __iomem *base; |
2846 | int i; | ||
2847 | 3011 | ||
2848 | spin_lock_irq(&np->lock); | 3012 | spin_lock_irq(&np->lock); |
2849 | np->in_shutdown = 1; | 3013 | np->in_shutdown = 1; |
@@ -2861,31 +3025,13 @@ static int nv_close(struct net_device *dev) | |||
2861 | 3025 | ||
2862 | /* disable interrupts on the nic or we will lock up */ | 3026 | /* disable interrupts on the nic or we will lock up */ |
2863 | base = get_hwbase(dev); | 3027 | base = get_hwbase(dev); |
2864 | if (np->msi_flags & NV_MSI_X_ENABLED) { | 3028 | nv_disable_hw_interrupts(dev, np->irqmask); |
2865 | writel(np->irqmask, base + NvRegIrqMask); | ||
2866 | } else { | ||
2867 | if (np->msi_flags & NV_MSI_ENABLED) | ||
2868 | writel(0, base + NvRegMSIIrqMask); | ||
2869 | writel(0, base + NvRegIrqMask); | ||
2870 | } | ||
2871 | pci_push(base); | 3029 | pci_push(base); |
2872 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); | 3030 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); |
2873 | 3031 | ||
2874 | spin_unlock_irq(&np->lock); | 3032 | spin_unlock_irq(&np->lock); |
2875 | 3033 | ||
2876 | if (np->msi_flags & NV_MSI_X_ENABLED) { | 3034 | nv_free_irq(dev); |
2877 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
2878 | free_irq(np->msi_x_entry[i].vector, dev); | ||
2879 | } | ||
2880 | pci_disable_msix(np->pci_dev); | ||
2881 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2882 | } else { | ||
2883 | free_irq(np->pci_dev->irq, dev); | ||
2884 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
2885 | pci_disable_msi(np->pci_dev); | ||
2886 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2887 | } | ||
2888 | } | ||
2889 | 3035 | ||
2890 | drain_ring(dev); | 3036 | drain_ring(dev); |
2891 | 3037 | ||
@@ -2974,20 +3120,18 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2974 | if (id->driver_data & DEV_HAS_HIGH_DMA) { | 3120 | if (id->driver_data & DEV_HAS_HIGH_DMA) { |
2975 | /* packet format 3: supports 40-bit addressing */ | 3121 | /* packet format 3: supports 40-bit addressing */ |
2976 | np->desc_ver = DESC_VER_3; | 3122 | np->desc_ver = DESC_VER_3; |
3123 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; | ||
2977 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { | 3124 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { |
2978 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", | 3125 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", |
2979 | pci_name(pci_dev)); | 3126 | pci_name(pci_dev)); |
2980 | } else { | 3127 | } else { |
2981 | if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { | 3128 | dev->features |= NETIF_F_HIGHDMA; |
2982 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", | 3129 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); |
2983 | pci_name(pci_dev)); | 3130 | } |
2984 | goto out_relreg; | 3131 | if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { |
2985 | } else { | 3132 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", |
2986 | dev->features |= NETIF_F_HIGHDMA; | 3133 | pci_name(pci_dev)); |
2987 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | ||
2988 | } | ||
2989 | } | 3134 | } |
2990 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; | ||
2991 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | 3135 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { |
2992 | /* packet format 2: supports jumbo frames */ | 3136 | /* packet format 2: supports jumbo frames */ |
2993 | np->desc_ver = DESC_VER_2; | 3137 | np->desc_ver = DESC_VER_2; |
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 79a8fbcf5f93..0d5fccc984bb 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c | |||
@@ -582,7 +582,6 @@ static int __init setup_adapter(int card_base, int type, int n) | |||
582 | INIT_WORK(&priv->rx_work, rx_bh, priv); | 582 | INIT_WORK(&priv->rx_work, rx_bh, priv); |
583 | dev->priv = priv; | 583 | dev->priv = priv; |
584 | sprintf(dev->name, "dmascc%i", 2 * n + i); | 584 | sprintf(dev->name, "dmascc%i", 2 * n + i); |
585 | SET_MODULE_OWNER(dev); | ||
586 | dev->base_addr = card_base; | 585 | dev->base_addr = card_base; |
587 | dev->irq = irq; | 586 | dev->irq = irq; |
588 | dev->open = scc_open; | 587 | dev->open = scc_open; |
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 6ace0e914fd1..5927784df3f9 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c | |||
@@ -1550,7 +1550,6 @@ static unsigned char ax25_nocall[AX25_ADDR_LEN] = | |||
1550 | 1550 | ||
1551 | static void scc_net_setup(struct net_device *dev) | 1551 | static void scc_net_setup(struct net_device *dev) |
1552 | { | 1552 | { |
1553 | SET_MODULE_OWNER(dev); | ||
1554 | dev->tx_queue_len = 16; /* should be enough... */ | 1553 | dev->tx_queue_len = 16; /* should be enough... */ |
1555 | 1554 | ||
1556 | dev->open = scc_net_open; | 1555 | dev->open = scc_net_open; |
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index fe22479eb202..b49884048caa 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c | |||
@@ -1098,7 +1098,6 @@ static void yam_setup(struct net_device *dev) | |||
1098 | 1098 | ||
1099 | dev->base_addr = yp->iobase; | 1099 | dev->base_addr = yp->iobase; |
1100 | dev->irq = yp->irq; | 1100 | dev->irq = yp->irq; |
1101 | SET_MODULE_OWNER(dev); | ||
1102 | 1101 | ||
1103 | dev->open = yam_open; | 1102 | dev->open = yam_open; |
1104 | dev->stop = yam_close; | 1103 | dev->stop = yam_close; |
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile index 27ab75f20799..c1ce2398efea 100644 --- a/drivers/net/irda/Makefile +++ b/drivers/net/irda/Makefile | |||
@@ -46,4 +46,4 @@ obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o | |||
46 | obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o | 46 | obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o |
47 | 47 | ||
48 | # The SIR helper module | 48 | # The SIR helper module |
49 | sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o | 49 | sir-dev-objs := sir_dev.o sir_dongle.o |
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 96bdb73c2283..cd87593e4e8a 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c | |||
@@ -1778,7 +1778,7 @@ static int irda_usb_probe(struct usb_interface *intf, | |||
1778 | 1778 | ||
1779 | if (self->needspatch) { | 1779 | if (self->needspatch) { |
1780 | ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0), | 1780 | ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0), |
1781 | 0x02, 0x40, 0, 0, 0, 0, msecs_to_jiffies(500)); | 1781 | 0x02, 0x40, 0, 0, NULL, 0, 500); |
1782 | if (ret < 0) { | 1782 | if (ret < 0) { |
1783 | IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret); | 1783 | IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret); |
1784 | goto err_out_3; | 1784 | goto err_out_3; |
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h index f69fb4cec76f..9fa294a546d6 100644 --- a/drivers/net/irda/sir-dev.h +++ b/drivers/net/irda/sir-dev.h | |||
@@ -15,23 +15,14 @@ | |||
15 | #define IRDA_SIR_H | 15 | #define IRDA_SIR_H |
16 | 16 | ||
17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
18 | #include <linux/workqueue.h> | ||
18 | 19 | ||
19 | #include <net/irda/irda.h> | 20 | #include <net/irda/irda.h> |
20 | #include <net/irda/irda_device.h> // iobuff_t | 21 | #include <net/irda/irda_device.h> // iobuff_t |
21 | 22 | ||
22 | /* FIXME: unify irda_request with sir_fsm! */ | ||
23 | |||
24 | struct irda_request { | ||
25 | struct list_head lh_request; | ||
26 | unsigned long pending; | ||
27 | void (*func)(void *); | ||
28 | void *data; | ||
29 | struct timer_list timer; | ||
30 | }; | ||
31 | |||
32 | struct sir_fsm { | 23 | struct sir_fsm { |
33 | struct semaphore sem; | 24 | struct semaphore sem; |
34 | struct irda_request rq; | 25 | struct work_struct work; |
35 | unsigned state, substate; | 26 | unsigned state, substate; |
36 | int param; | 27 | int param; |
37 | int result; | 28 | int result; |
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index ea7c9464d46a..3b5854d10c17 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c | |||
@@ -23,6 +23,298 @@ | |||
23 | 23 | ||
24 | #include "sir-dev.h" | 24 | #include "sir-dev.h" |
25 | 25 | ||
26 | |||
27 | static struct workqueue_struct *irda_sir_wq; | ||
28 | |||
29 | /* STATE MACHINE */ | ||
30 | |||
31 | /* substate handler of the config-fsm to handle the cases where we want | ||
32 | * to wait for transmit completion before changing the port configuration | ||
33 | */ | ||
34 | |||
35 | static int sirdev_tx_complete_fsm(struct sir_dev *dev) | ||
36 | { | ||
37 | struct sir_fsm *fsm = &dev->fsm; | ||
38 | unsigned next_state, delay; | ||
39 | unsigned bytes_left; | ||
40 | |||
41 | do { | ||
42 | next_state = fsm->substate; /* default: stay in current substate */ | ||
43 | delay = 0; | ||
44 | |||
45 | switch(fsm->substate) { | ||
46 | |||
47 | case SIRDEV_STATE_WAIT_XMIT: | ||
48 | if (dev->drv->chars_in_buffer) | ||
49 | bytes_left = dev->drv->chars_in_buffer(dev); | ||
50 | else | ||
51 | bytes_left = 0; | ||
52 | if (!bytes_left) { | ||
53 | next_state = SIRDEV_STATE_WAIT_UNTIL_SENT; | ||
54 | break; | ||
55 | } | ||
56 | |||
57 | if (dev->speed > 115200) | ||
58 | delay = (bytes_left*8*10000) / (dev->speed/100); | ||
59 | else if (dev->speed > 0) | ||
60 | delay = (bytes_left*10*10000) / (dev->speed/100); | ||
61 | else | ||
62 | delay = 0; | ||
63 | /* expected delay (usec) until remaining bytes are sent */ | ||
64 | if (delay < 100) { | ||
65 | udelay(delay); | ||
66 | delay = 0; | ||
67 | break; | ||
68 | } | ||
69 | /* sleep some longer delay (msec) */ | ||
70 | delay = (delay+999) / 1000; | ||
71 | break; | ||
72 | |||
73 | case SIRDEV_STATE_WAIT_UNTIL_SENT: | ||
74 | /* block until underlaying hardware buffer are empty */ | ||
75 | if (dev->drv->wait_until_sent) | ||
76 | dev->drv->wait_until_sent(dev); | ||
77 | next_state = SIRDEV_STATE_TX_DONE; | ||
78 | break; | ||
79 | |||
80 | case SIRDEV_STATE_TX_DONE: | ||
81 | return 0; | ||
82 | |||
83 | default: | ||
84 | IRDA_ERROR("%s - undefined state\n", __FUNCTION__); | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | fsm->substate = next_state; | ||
88 | } while (delay == 0); | ||
89 | return delay; | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Function sirdev_config_fsm | ||
94 | * | ||
95 | * State machine to handle the configuration of the device (and attached dongle, if any). | ||
96 | * This handler is scheduled for execution in kIrDAd context, so we can sleep. | ||
97 | * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too | ||
98 | * long. Instead, for longer delays we start a timer to reschedule us later. | ||
99 | * On entry, fsm->sem is always locked and the netdev xmit queue stopped. | ||
100 | * Both must be unlocked/restarted on completion - but only on final exit. | ||
101 | */ | ||
102 | |||
103 | static void sirdev_config_fsm(void *data) | ||
104 | { | ||
105 | struct sir_dev *dev = data; | ||
106 | struct sir_fsm *fsm = &dev->fsm; | ||
107 | int next_state; | ||
108 | int ret = -1; | ||
109 | unsigned delay; | ||
110 | |||
111 | IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies); | ||
112 | |||
113 | do { | ||
114 | IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", | ||
115 | __FUNCTION__, fsm->state, fsm->substate); | ||
116 | |||
117 | next_state = fsm->state; | ||
118 | delay = 0; | ||
119 | |||
120 | switch(fsm->state) { | ||
121 | |||
122 | case SIRDEV_STATE_DONGLE_OPEN: | ||
123 | if (dev->dongle_drv != NULL) { | ||
124 | ret = sirdev_put_dongle(dev); | ||
125 | if (ret) { | ||
126 | fsm->result = -EINVAL; | ||
127 | next_state = SIRDEV_STATE_ERROR; | ||
128 | break; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | /* Initialize dongle */ | ||
133 | ret = sirdev_get_dongle(dev, fsm->param); | ||
134 | if (ret) { | ||
135 | fsm->result = ret; | ||
136 | next_state = SIRDEV_STATE_ERROR; | ||
137 | break; | ||
138 | } | ||
139 | |||
140 | /* Dongles are powered through the modem control lines which | ||
141 | * were just set during open. Before resetting, let's wait for | ||
142 | * the power to stabilize. This is what some dongle drivers did | ||
143 | * in open before, while others didn't - should be safe anyway. | ||
144 | */ | ||
145 | |||
146 | delay = 50; | ||
147 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | ||
148 | next_state = SIRDEV_STATE_DONGLE_RESET; | ||
149 | |||
150 | fsm->param = 9600; | ||
151 | |||
152 | break; | ||
153 | |||
154 | case SIRDEV_STATE_DONGLE_CLOSE: | ||
155 | /* shouldn't we just treat this as success=? */ | ||
156 | if (dev->dongle_drv == NULL) { | ||
157 | fsm->result = -EINVAL; | ||
158 | next_state = SIRDEV_STATE_ERROR; | ||
159 | break; | ||
160 | } | ||
161 | |||
162 | ret = sirdev_put_dongle(dev); | ||
163 | if (ret) { | ||
164 | fsm->result = ret; | ||
165 | next_state = SIRDEV_STATE_ERROR; | ||
166 | break; | ||
167 | } | ||
168 | next_state = SIRDEV_STATE_DONE; | ||
169 | break; | ||
170 | |||
171 | case SIRDEV_STATE_SET_DTR_RTS: | ||
172 | ret = sirdev_set_dtr_rts(dev, | ||
173 | (fsm->param&0x02) ? TRUE : FALSE, | ||
174 | (fsm->param&0x01) ? TRUE : FALSE); | ||
175 | next_state = SIRDEV_STATE_DONE; | ||
176 | break; | ||
177 | |||
178 | case SIRDEV_STATE_SET_SPEED: | ||
179 | fsm->substate = SIRDEV_STATE_WAIT_XMIT; | ||
180 | next_state = SIRDEV_STATE_DONGLE_CHECK; | ||
181 | break; | ||
182 | |||
183 | case SIRDEV_STATE_DONGLE_CHECK: | ||
184 | ret = sirdev_tx_complete_fsm(dev); | ||
185 | if (ret < 0) { | ||
186 | fsm->result = ret; | ||
187 | next_state = SIRDEV_STATE_ERROR; | ||
188 | break; | ||
189 | } | ||
190 | if ((delay=ret) != 0) | ||
191 | break; | ||
192 | |||
193 | if (dev->dongle_drv) { | ||
194 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | ||
195 | next_state = SIRDEV_STATE_DONGLE_RESET; | ||
196 | } | ||
197 | else { | ||
198 | dev->speed = fsm->param; | ||
199 | next_state = SIRDEV_STATE_PORT_SPEED; | ||
200 | } | ||
201 | break; | ||
202 | |||
203 | case SIRDEV_STATE_DONGLE_RESET: | ||
204 | if (dev->dongle_drv->reset) { | ||
205 | ret = dev->dongle_drv->reset(dev); | ||
206 | if (ret < 0) { | ||
207 | fsm->result = ret; | ||
208 | next_state = SIRDEV_STATE_ERROR; | ||
209 | break; | ||
210 | } | ||
211 | } | ||
212 | else | ||
213 | ret = 0; | ||
214 | if ((delay=ret) == 0) { | ||
215 | /* set serial port according to dongle default speed */ | ||
216 | if (dev->drv->set_speed) | ||
217 | dev->drv->set_speed(dev, dev->speed); | ||
218 | fsm->substate = SIRDEV_STATE_DONGLE_SPEED; | ||
219 | next_state = SIRDEV_STATE_DONGLE_SPEED; | ||
220 | } | ||
221 | break; | ||
222 | |||
223 | case SIRDEV_STATE_DONGLE_SPEED: | ||
224 | if (dev->dongle_drv->reset) { | ||
225 | ret = dev->dongle_drv->set_speed(dev, fsm->param); | ||
226 | if (ret < 0) { | ||
227 | fsm->result = ret; | ||
228 | next_state = SIRDEV_STATE_ERROR; | ||
229 | break; | ||
230 | } | ||
231 | } | ||
232 | else | ||
233 | ret = 0; | ||
234 | if ((delay=ret) == 0) | ||
235 | next_state = SIRDEV_STATE_PORT_SPEED; | ||
236 | break; | ||
237 | |||
238 | case SIRDEV_STATE_PORT_SPEED: | ||
239 | /* Finally we are ready to change the serial port speed */ | ||
240 | if (dev->drv->set_speed) | ||
241 | dev->drv->set_speed(dev, dev->speed); | ||
242 | dev->new_speed = 0; | ||
243 | next_state = SIRDEV_STATE_DONE; | ||
244 | break; | ||
245 | |||
246 | case SIRDEV_STATE_DONE: | ||
247 | /* Signal network layer so it can send more frames */ | ||
248 | netif_wake_queue(dev->netdev); | ||
249 | next_state = SIRDEV_STATE_COMPLETE; | ||
250 | break; | ||
251 | |||
252 | default: | ||
253 | IRDA_ERROR("%s - undefined state\n", __FUNCTION__); | ||
254 | fsm->result = -EINVAL; | ||
255 | /* fall thru */ | ||
256 | |||
257 | case SIRDEV_STATE_ERROR: | ||
258 | IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result); | ||
259 | |||
260 | #if 0 /* don't enable this before we have netdev->tx_timeout to recover */ | ||
261 | netif_stop_queue(dev->netdev); | ||
262 | #else | ||
263 | netif_wake_queue(dev->netdev); | ||
264 | #endif | ||
265 | /* fall thru */ | ||
266 | |||
267 | case SIRDEV_STATE_COMPLETE: | ||
268 | /* config change finished, so we are not busy any longer */ | ||
269 | sirdev_enable_rx(dev); | ||
270 | up(&fsm->sem); | ||
271 | return; | ||
272 | } | ||
273 | fsm->state = next_state; | ||
274 | } while(!delay); | ||
275 | |||
276 | queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay)); | ||
277 | } | ||
278 | |||
279 | /* schedule some device configuration task for execution by kIrDAd | ||
280 | * on behalf of the above state machine. | ||
281 | * can be called from process or interrupt/tasklet context. | ||
282 | */ | ||
283 | |||
284 | int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param) | ||
285 | { | ||
286 | struct sir_fsm *fsm = &dev->fsm; | ||
287 | |||
288 | IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param); | ||
289 | |||
290 | if (down_trylock(&fsm->sem)) { | ||
291 | if (in_interrupt() || in_atomic() || irqs_disabled()) { | ||
292 | IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__); | ||
293 | return -EWOULDBLOCK; | ||
294 | } else | ||
295 | down(&fsm->sem); | ||
296 | } | ||
297 | |||
298 | if (fsm->state == SIRDEV_STATE_DEAD) { | ||
299 | /* race with sirdev_close should never happen */ | ||
300 | IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__); | ||
301 | up(&fsm->sem); | ||
302 | return -ESTALE; /* or better EPIPE? */ | ||
303 | } | ||
304 | |||
305 | netif_stop_queue(dev->netdev); | ||
306 | atomic_set(&dev->enable_rx, 0); | ||
307 | |||
308 | fsm->state = initial_state; | ||
309 | fsm->param = param; | ||
310 | fsm->result = 0; | ||
311 | |||
312 | INIT_WORK(&fsm->work, sirdev_config_fsm, dev); | ||
313 | queue_work(irda_sir_wq, &fsm->work); | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | |||
26 | /***************************************************************************/ | 318 | /***************************************************************************/ |
27 | 319 | ||
28 | void sirdev_enable_rx(struct sir_dev *dev) | 320 | void sirdev_enable_rx(struct sir_dev *dev) |
@@ -619,10 +911,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n | |||
619 | spin_lock_init(&dev->tx_lock); | 911 | spin_lock_init(&dev->tx_lock); |
620 | init_MUTEX(&dev->fsm.sem); | 912 | init_MUTEX(&dev->fsm.sem); |
621 | 913 | ||
622 | INIT_LIST_HEAD(&dev->fsm.rq.lh_request); | ||
623 | dev->fsm.rq.pending = 0; | ||
624 | init_timer(&dev->fsm.rq.timer); | ||
625 | |||
626 | dev->drv = drv; | 914 | dev->drv = drv; |
627 | dev->netdev = ndev; | 915 | dev->netdev = ndev; |
628 | 916 | ||
@@ -682,3 +970,22 @@ int sirdev_put_instance(struct sir_dev *dev) | |||
682 | } | 970 | } |
683 | EXPORT_SYMBOL(sirdev_put_instance); | 971 | EXPORT_SYMBOL(sirdev_put_instance); |
684 | 972 | ||
973 | static int __init sir_wq_init(void) | ||
974 | { | ||
975 | irda_sir_wq = create_singlethread_workqueue("irda_sir_wq"); | ||
976 | if (!irda_sir_wq) | ||
977 | return -ENOMEM; | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | static void __exit sir_wq_exit(void) | ||
982 | { | ||
983 | destroy_workqueue(irda_sir_wq); | ||
984 | } | ||
985 | |||
986 | module_init(sir_wq_init); | ||
987 | module_exit(sir_wq_exit); | ||
988 | |||
989 | MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>"); | ||
990 | MODULE_DESCRIPTION("IrDA SIR core"); | ||
991 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c deleted file mode 100644 index e3904d6bfecd..000000000000 --- a/drivers/net/irda/sir_kthread.c +++ /dev/null | |||
@@ -1,508 +0,0 @@ | |||
1 | /********************************************************************* | ||
2 | * | ||
3 | * sir_kthread.c: dedicated thread to process scheduled | ||
4 | * sir device setup requests | ||
5 | * | ||
6 | * Copyright (c) 2002 Martin Diehl | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License as | ||
10 | * published by the Free Software Foundation; either version 2 of | ||
11 | * the License, or (at your option) any later version. | ||
12 | * | ||
13 | ********************************************************************/ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/version.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/completion.h> | ||
21 | #include <linux/delay.h> | ||
22 | |||
23 | #include <net/irda/irda.h> | ||
24 | |||
25 | #include "sir-dev.h" | ||
26 | |||
27 | /************************************************************************** | ||
28 | * | ||
29 | * kIrDAd kernel thread and config state machine | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | struct irda_request_queue { | ||
34 | struct list_head request_list; | ||
35 | spinlock_t lock; | ||
36 | task_t *thread; | ||
37 | struct completion exit; | ||
38 | wait_queue_head_t kick, done; | ||
39 | atomic_t num_pending; | ||
40 | }; | ||
41 | |||
42 | static struct irda_request_queue irda_rq_queue; | ||
43 | |||
44 | static int irda_queue_request(struct irda_request *rq) | ||
45 | { | ||
46 | int ret = 0; | ||
47 | unsigned long flags; | ||
48 | |||
49 | if (!test_and_set_bit(0, &rq->pending)) { | ||
50 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
51 | list_add_tail(&rq->lh_request, &irda_rq_queue.request_list); | ||
52 | wake_up(&irda_rq_queue.kick); | ||
53 | atomic_inc(&irda_rq_queue.num_pending); | ||
54 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
55 | ret = 1; | ||
56 | } | ||
57 | return ret; | ||
58 | } | ||
59 | |||
60 | static void irda_request_timer(unsigned long data) | ||
61 | { | ||
62 | struct irda_request *rq = (struct irda_request *)data; | ||
63 | unsigned long flags; | ||
64 | |||
65 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
66 | list_add_tail(&rq->lh_request, &irda_rq_queue.request_list); | ||
67 | wake_up(&irda_rq_queue.kick); | ||
68 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
69 | } | ||
70 | |||
71 | static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay) | ||
72 | { | ||
73 | int ret = 0; | ||
74 | struct timer_list *timer = &rq->timer; | ||
75 | |||
76 | if (!test_and_set_bit(0, &rq->pending)) { | ||
77 | timer->expires = jiffies + delay; | ||
78 | timer->function = irda_request_timer; | ||
79 | timer->data = (unsigned long)rq; | ||
80 | atomic_inc(&irda_rq_queue.num_pending); | ||
81 | add_timer(timer); | ||
82 | ret = 1; | ||
83 | } | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | static void run_irda_queue(void) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | struct list_head *entry, *tmp; | ||
91 | struct irda_request *rq; | ||
92 | |||
93 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
94 | list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) { | ||
95 | rq = list_entry(entry, struct irda_request, lh_request); | ||
96 | list_del_init(entry); | ||
97 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
98 | |||
99 | clear_bit(0, &rq->pending); | ||
100 | rq->func(rq->data); | ||
101 | |||
102 | if (atomic_dec_and_test(&irda_rq_queue.num_pending)) | ||
103 | wake_up(&irda_rq_queue.done); | ||
104 | |||
105 | spin_lock_irqsave(&irda_rq_queue.lock, flags); | ||
106 | } | ||
107 | spin_unlock_irqrestore(&irda_rq_queue.lock, flags); | ||
108 | } | ||
109 | |||
110 | static int irda_thread(void *startup) | ||
111 | { | ||
112 | DECLARE_WAITQUEUE(wait, current); | ||
113 | |||
114 | daemonize("kIrDAd"); | ||
115 | |||
116 | irda_rq_queue.thread = current; | ||
117 | |||
118 | complete((struct completion *)startup); | ||
119 | |||
120 | while (irda_rq_queue.thread != NULL) { | ||
121 | |||
122 | /* We use TASK_INTERRUPTIBLE, rather than | ||
123 | * TASK_UNINTERRUPTIBLE. Andrew Morton made this | ||
124 | * change ; he told me that it is safe, because "signal | ||
125 | * blocking is now handled in daemonize()", he added | ||
126 | * that the problem is that "uninterruptible sleep | ||
127 | * contributes to load average", making user worry. | ||
128 | * Jean II */ | ||
129 | set_task_state(current, TASK_INTERRUPTIBLE); | ||
130 | add_wait_queue(&irda_rq_queue.kick, &wait); | ||
131 | if (list_empty(&irda_rq_queue.request_list)) | ||
132 | schedule(); | ||
133 | else | ||
134 | __set_task_state(current, TASK_RUNNING); | ||
135 | remove_wait_queue(&irda_rq_queue.kick, &wait); | ||
136 | |||
137 | /* make swsusp happy with our thread */ | ||
138 | try_to_freeze(); | ||
139 | |||
140 | run_irda_queue(); | ||
141 | } | ||
142 | |||
143 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35) | ||
144 | reparent_to_init(); | ||
145 | #endif | ||
146 | complete_and_exit(&irda_rq_queue.exit, 0); | ||
147 | /* never reached */ | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | |||
152 | static void flush_irda_queue(void) | ||
153 | { | ||
154 | if (atomic_read(&irda_rq_queue.num_pending)) { | ||
155 | |||
156 | DECLARE_WAITQUEUE(wait, current); | ||
157 | |||
158 | if (!list_empty(&irda_rq_queue.request_list)) | ||
159 | run_irda_queue(); | ||
160 | |||
161 | set_task_state(current, TASK_UNINTERRUPTIBLE); | ||
162 | add_wait_queue(&irda_rq_queue.done, &wait); | ||
163 | if (atomic_read(&irda_rq_queue.num_pending)) | ||
164 | schedule(); | ||
165 | else | ||
166 | __set_task_state(current, TASK_RUNNING); | ||
167 | remove_wait_queue(&irda_rq_queue.done, &wait); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* substate handler of the config-fsm to handle the cases where we want | ||
172 | * to wait for transmit completion before changing the port configuration | ||
173 | */ | ||
174 | |||
175 | static int irda_tx_complete_fsm(struct sir_dev *dev) | ||
176 | { | ||
177 | struct sir_fsm *fsm = &dev->fsm; | ||
178 | unsigned next_state, delay; | ||
179 | unsigned bytes_left; | ||
180 | |||
181 | do { | ||
182 | next_state = fsm->substate; /* default: stay in current substate */ | ||
183 | delay = 0; | ||
184 | |||
185 | switch(fsm->substate) { | ||
186 | |||
187 | case SIRDEV_STATE_WAIT_XMIT: | ||
188 | if (dev->drv->chars_in_buffer) | ||
189 | bytes_left = dev->drv->chars_in_buffer(dev); | ||
190 | else | ||
191 | bytes_left = 0; | ||
192 | if (!bytes_left) { | ||
193 | next_state = SIRDEV_STATE_WAIT_UNTIL_SENT; | ||
194 | break; | ||
195 | } | ||
196 | |||
197 | if (dev->speed > 115200) | ||
198 | delay = (bytes_left*8*10000) / (dev->speed/100); | ||
199 | else if (dev->speed > 0) | ||
200 | delay = (bytes_left*10*10000) / (dev->speed/100); | ||
201 | else | ||
202 | delay = 0; | ||
203 | /* expected delay (usec) until remaining bytes are sent */ | ||
204 | if (delay < 100) { | ||
205 | udelay(delay); | ||
206 | delay = 0; | ||
207 | break; | ||
208 | } | ||
209 | /* sleep some longer delay (msec) */ | ||
210 | delay = (delay+999) / 1000; | ||
211 | break; | ||
212 | |||
213 | case SIRDEV_STATE_WAIT_UNTIL_SENT: | ||
214 | /* block until underlaying hardware buffer are empty */ | ||
215 | if (dev->drv->wait_until_sent) | ||
216 | dev->drv->wait_until_sent(dev); | ||
217 | next_state = SIRDEV_STATE_TX_DONE; | ||
218 | break; | ||
219 | |||
220 | case SIRDEV_STATE_TX_DONE: | ||
221 | return 0; | ||
222 | |||
223 | default: | ||
224 | IRDA_ERROR("%s - undefined state\n", __FUNCTION__); | ||
225 | return -EINVAL; | ||
226 | } | ||
227 | fsm->substate = next_state; | ||
228 | } while (delay == 0); | ||
229 | return delay; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Function irda_config_fsm | ||
234 | * | ||
235 | * State machine to handle the configuration of the device (and attached dongle, if any). | ||
236 | * This handler is scheduled for execution in kIrDAd context, so we can sleep. | ||
237 | * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too | ||
238 | * long. Instead, for longer delays we start a timer to reschedule us later. | ||
239 | * On entry, fsm->sem is always locked and the netdev xmit queue stopped. | ||
240 | * Both must be unlocked/restarted on completion - but only on final exit. | ||
241 | */ | ||
242 | |||
243 | static void irda_config_fsm(void *data) | ||
244 | { | ||
245 | struct sir_dev *dev = data; | ||
246 | struct sir_fsm *fsm = &dev->fsm; | ||
247 | int next_state; | ||
248 | int ret = -1; | ||
249 | unsigned delay; | ||
250 | |||
251 | IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies); | ||
252 | |||
253 | do { | ||
254 | IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", | ||
255 | __FUNCTION__, fsm->state, fsm->substate); | ||
256 | |||
257 | next_state = fsm->state; | ||
258 | delay = 0; | ||
259 | |||
260 | switch(fsm->state) { | ||
261 | |||
262 | case SIRDEV_STATE_DONGLE_OPEN: | ||
263 | if (dev->dongle_drv != NULL) { | ||
264 | ret = sirdev_put_dongle(dev); | ||
265 | if (ret) { | ||
266 | fsm->result = -EINVAL; | ||
267 | next_state = SIRDEV_STATE_ERROR; | ||
268 | break; | ||
269 | } | ||
270 | } | ||
271 | |||
272 | /* Initialize dongle */ | ||
273 | ret = sirdev_get_dongle(dev, fsm->param); | ||
274 | if (ret) { | ||
275 | fsm->result = ret; | ||
276 | next_state = SIRDEV_STATE_ERROR; | ||
277 | break; | ||
278 | } | ||
279 | |||
280 | /* Dongles are powered through the modem control lines which | ||
281 | * were just set during open. Before resetting, let's wait for | ||
282 | * the power to stabilize. This is what some dongle drivers did | ||
283 | * in open before, while others didn't - should be safe anyway. | ||
284 | */ | ||
285 | |||
286 | delay = 50; | ||
287 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | ||
288 | next_state = SIRDEV_STATE_DONGLE_RESET; | ||
289 | |||
290 | fsm->param = 9600; | ||
291 | |||
292 | break; | ||
293 | |||
294 | case SIRDEV_STATE_DONGLE_CLOSE: | ||
295 | /* shouldn't we just treat this as success=? */ | ||
296 | if (dev->dongle_drv == NULL) { | ||
297 | fsm->result = -EINVAL; | ||
298 | next_state = SIRDEV_STATE_ERROR; | ||
299 | break; | ||
300 | } | ||
301 | |||
302 | ret = sirdev_put_dongle(dev); | ||
303 | if (ret) { | ||
304 | fsm->result = ret; | ||
305 | next_state = SIRDEV_STATE_ERROR; | ||
306 | break; | ||
307 | } | ||
308 | next_state = SIRDEV_STATE_DONE; | ||
309 | break; | ||
310 | |||
311 | case SIRDEV_STATE_SET_DTR_RTS: | ||
312 | ret = sirdev_set_dtr_rts(dev, | ||
313 | (fsm->param&0x02) ? TRUE : FALSE, | ||
314 | (fsm->param&0x01) ? TRUE : FALSE); | ||
315 | next_state = SIRDEV_STATE_DONE; | ||
316 | break; | ||
317 | |||
318 | case SIRDEV_STATE_SET_SPEED: | ||
319 | fsm->substate = SIRDEV_STATE_WAIT_XMIT; | ||
320 | next_state = SIRDEV_STATE_DONGLE_CHECK; | ||
321 | break; | ||
322 | |||
323 | case SIRDEV_STATE_DONGLE_CHECK: | ||
324 | ret = irda_tx_complete_fsm(dev); | ||
325 | if (ret < 0) { | ||
326 | fsm->result = ret; | ||
327 | next_state = SIRDEV_STATE_ERROR; | ||
328 | break; | ||
329 | } | ||
330 | if ((delay=ret) != 0) | ||
331 | break; | ||
332 | |||
333 | if (dev->dongle_drv) { | ||
334 | fsm->substate = SIRDEV_STATE_DONGLE_RESET; | ||
335 | next_state = SIRDEV_STATE_DONGLE_RESET; | ||
336 | } | ||
337 | else { | ||
338 | dev->speed = fsm->param; | ||
339 | next_state = SIRDEV_STATE_PORT_SPEED; | ||
340 | } | ||
341 | break; | ||
342 | |||
343 | case SIRDEV_STATE_DONGLE_RESET: | ||
344 | if (dev->dongle_drv->reset) { | ||
345 | ret = dev->dongle_drv->reset(dev); | ||
346 | if (ret < 0) { | ||
347 | fsm->result = ret; | ||
348 | next_state = SIRDEV_STATE_ERROR; | ||
349 | break; | ||
350 | } | ||
351 | } | ||
352 | else | ||
353 | ret = 0; | ||
354 | if ((delay=ret) == 0) { | ||
355 | /* set serial port according to dongle default speed */ | ||
356 | if (dev->drv->set_speed) | ||
357 | dev->drv->set_speed(dev, dev->speed); | ||
358 | fsm->substate = SIRDEV_STATE_DONGLE_SPEED; | ||
359 | next_state = SIRDEV_STATE_DONGLE_SPEED; | ||
360 | } | ||
361 | break; | ||
362 | |||
363 | case SIRDEV_STATE_DONGLE_SPEED: | ||
364 | if (dev->dongle_drv->reset) { | ||
365 | ret = dev->dongle_drv->set_speed(dev, fsm->param); | ||
366 | if (ret < 0) { | ||
367 | fsm->result = ret; | ||
368 | next_state = SIRDEV_STATE_ERROR; | ||
369 | break; | ||
370 | } | ||
371 | } | ||
372 | else | ||
373 | ret = 0; | ||
374 | if ((delay=ret) == 0) | ||
375 | next_state = SIRDEV_STATE_PORT_SPEED; | ||
376 | break; | ||
377 | |||
378 | case SIRDEV_STATE_PORT_SPEED: | ||
379 | /* Finally we are ready to change the serial port speed */ | ||
380 | if (dev->drv->set_speed) | ||
381 | dev->drv->set_speed(dev, dev->speed); | ||
382 | dev->new_speed = 0; | ||
383 | next_state = SIRDEV_STATE_DONE; | ||
384 | break; | ||
385 | |||
386 | case SIRDEV_STATE_DONE: | ||
387 | /* Signal network layer so it can send more frames */ | ||
388 | netif_wake_queue(dev->netdev); | ||
389 | next_state = SIRDEV_STATE_COMPLETE; | ||
390 | break; | ||
391 | |||
392 | default: | ||
393 | IRDA_ERROR("%s - undefined state\n", __FUNCTION__); | ||
394 | fsm->result = -EINVAL; | ||
395 | /* fall thru */ | ||
396 | |||
397 | case SIRDEV_STATE_ERROR: | ||
398 | IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result); | ||
399 | |||
400 | #if 0 /* don't enable this before we have netdev->tx_timeout to recover */ | ||
401 | netif_stop_queue(dev->netdev); | ||
402 | #else | ||
403 | netif_wake_queue(dev->netdev); | ||
404 | #endif | ||
405 | /* fall thru */ | ||
406 | |||
407 | case SIRDEV_STATE_COMPLETE: | ||
408 | /* config change finished, so we are not busy any longer */ | ||
409 | sirdev_enable_rx(dev); | ||
410 | up(&fsm->sem); | ||
411 | return; | ||
412 | } | ||
413 | fsm->state = next_state; | ||
414 | } while(!delay); | ||
415 | |||
416 | irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay)); | ||
417 | } | ||
418 | |||
419 | /* schedule some device configuration task for execution by kIrDAd | ||
420 | * on behalf of the above state machine. | ||
421 | * can be called from process or interrupt/tasklet context. | ||
422 | */ | ||
423 | |||
424 | int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param) | ||
425 | { | ||
426 | struct sir_fsm *fsm = &dev->fsm; | ||
427 | int xmit_was_down; | ||
428 | |||
429 | IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param); | ||
430 | |||
431 | if (down_trylock(&fsm->sem)) { | ||
432 | if (in_interrupt() || in_atomic() || irqs_disabled()) { | ||
433 | IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__); | ||
434 | return -EWOULDBLOCK; | ||
435 | } else | ||
436 | down(&fsm->sem); | ||
437 | } | ||
438 | |||
439 | if (fsm->state == SIRDEV_STATE_DEAD) { | ||
440 | /* race with sirdev_close should never happen */ | ||
441 | IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__); | ||
442 | up(&fsm->sem); | ||
443 | return -ESTALE; /* or better EPIPE? */ | ||
444 | } | ||
445 | |||
446 | xmit_was_down = netif_queue_stopped(dev->netdev); | ||
447 | netif_stop_queue(dev->netdev); | ||
448 | atomic_set(&dev->enable_rx, 0); | ||
449 | |||
450 | fsm->state = initial_state; | ||
451 | fsm->param = param; | ||
452 | fsm->result = 0; | ||
453 | |||
454 | INIT_LIST_HEAD(&fsm->rq.lh_request); | ||
455 | fsm->rq.pending = 0; | ||
456 | fsm->rq.func = irda_config_fsm; | ||
457 | fsm->rq.data = dev; | ||
458 | |||
459 | if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */ | ||
460 | atomic_set(&dev->enable_rx, 1); | ||
461 | if (!xmit_was_down) | ||
462 | netif_wake_queue(dev->netdev); | ||
463 | up(&fsm->sem); | ||
464 | return -EAGAIN; | ||
465 | } | ||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | static int __init irda_thread_create(void) | ||
470 | { | ||
471 | struct completion startup; | ||
472 | int pid; | ||
473 | |||
474 | spin_lock_init(&irda_rq_queue.lock); | ||
475 | irda_rq_queue.thread = NULL; | ||
476 | INIT_LIST_HEAD(&irda_rq_queue.request_list); | ||
477 | init_waitqueue_head(&irda_rq_queue.kick); | ||
478 | init_waitqueue_head(&irda_rq_queue.done); | ||
479 | atomic_set(&irda_rq_queue.num_pending, 0); | ||
480 | |||
481 | init_completion(&startup); | ||
482 | pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES); | ||
483 | if (pid <= 0) | ||
484 | return -EAGAIN; | ||
485 | else | ||
486 | wait_for_completion(&startup); | ||
487 | |||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | static void __exit irda_thread_join(void) | ||
492 | { | ||
493 | if (irda_rq_queue.thread) { | ||
494 | flush_irda_queue(); | ||
495 | init_completion(&irda_rq_queue.exit); | ||
496 | irda_rq_queue.thread = NULL; | ||
497 | wake_up(&irda_rq_queue.kick); | ||
498 | wait_for_completion(&irda_rq_queue.exit); | ||
499 | } | ||
500 | } | ||
501 | |||
502 | module_init(irda_thread_create); | ||
503 | module_exit(irda_thread_join); | ||
504 | |||
505 | MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>"); | ||
506 | MODULE_DESCRIPTION("IrDA SIR core"); | ||
507 | MODULE_LICENSE("GPL"); | ||
508 | |||
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 58f76cefbc83..a4674044bd6f 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/rtnetlink.h> | 54 | #include <linux/rtnetlink.h> |
55 | #include <linux/serial_reg.h> | 55 | #include <linux/serial_reg.h> |
56 | #include <linux/dma-mapping.h> | 56 | #include <linux/dma-mapping.h> |
57 | #include <linux/pnp.h> | ||
57 | #include <linux/platform_device.h> | 58 | #include <linux/platform_device.h> |
58 | 59 | ||
59 | #include <asm/io.h> | 60 | #include <asm/io.h> |
@@ -358,6 +359,16 @@ static inline void register_bank(int iobase, int bank) | |||
358 | iobase + IRCC_MASTER); | 359 | iobase + IRCC_MASTER); |
359 | } | 360 | } |
360 | 361 | ||
362 | #ifdef CONFIG_PNP | ||
363 | /* PNP hotplug support */ | ||
364 | static const struct pnp_device_id smsc_ircc_pnp_table[] = { | ||
365 | { .id = "SMCf010", .driver_data = 0 }, | ||
366 | /* and presumably others */ | ||
367 | { } | ||
368 | }; | ||
369 | MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table); | ||
370 | #endif | ||
371 | |||
361 | 372 | ||
362 | /******************************************************************************* | 373 | /******************************************************************************* |
363 | * | 374 | * |
@@ -2072,7 +2083,8 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self) | |||
2072 | 2083 | ||
2073 | /* PROBING | 2084 | /* PROBING |
2074 | * | 2085 | * |
2075 | * | 2086 | * REVISIT we can be told about the device by PNP, and should use that info |
2087 | * instead of probing hardware and creating a platform_device ... | ||
2076 | */ | 2088 | */ |
2077 | 2089 | ||
2078 | static int __init smsc_ircc_look_for_chips(void) | 2090 | static int __init smsc_ircc_look_for_chips(void) |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index ea62a3e7d586..411f4d809c47 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -1419,6 +1419,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1419 | mv643xx_eth_update_pscr(dev, &cmd); | 1419 | mv643xx_eth_update_pscr(dev, &cmd); |
1420 | mv643xx_set_settings(dev, &cmd); | 1420 | mv643xx_set_settings(dev, &cmd); |
1421 | 1421 | ||
1422 | SET_MODULE_OWNER(dev); | ||
1423 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1422 | err = register_netdev(dev); | 1424 | err = register_netdev(dev); |
1423 | if (err) | 1425 | if (err) |
1424 | goto out; | 1426 | goto out; |
diff --git a/drivers/net/ne.c b/drivers/net/ne.c index 93c494bcd18d..b32765215f75 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c | |||
@@ -139,8 +139,9 @@ bad_clone_list[] __initdata = { | |||
139 | 139 | ||
140 | #if defined(CONFIG_PLAT_MAPPI) | 140 | #if defined(CONFIG_PLAT_MAPPI) |
141 | # define DCR_VAL 0x4b | 141 | # define DCR_VAL 0x4b |
142 | #elif defined(CONFIG_PLAT_OAKS32R) | 142 | #elif defined(CONFIG_PLAT_OAKS32R) || \ |
143 | # define DCR_VAL 0x48 | 143 | defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) |
144 | # define DCR_VAL 0x48 /* 8-bit mode */ | ||
144 | #else | 145 | #else |
145 | # define DCR_VAL 0x49 | 146 | # define DCR_VAL 0x49 |
146 | #endif | 147 | #endif |
@@ -396,10 +397,22 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) | |||
396 | /* We must set the 8390 for word mode. */ | 397 | /* We must set the 8390 for word mode. */ |
397 | outb_p(DCR_VAL, ioaddr + EN0_DCFG); | 398 | outb_p(DCR_VAL, ioaddr + EN0_DCFG); |
398 | start_page = NESM_START_PG; | 399 | start_page = NESM_START_PG; |
399 | stop_page = NESM_STOP_PG; | 400 | |
401 | /* | ||
402 | * Realtek RTL8019AS datasheet says that the PSTOP register | ||
403 | * shouldn't exceed 0x60 in 8-bit mode. | ||
404 | * This chip can be identified by reading the signature from | ||
405 | * the remote byte count registers (otherwise write-only)... | ||
406 | */ | ||
407 | if ((DCR_VAL & 0x01) == 0 && /* 8-bit mode */ | ||
408 | inb(ioaddr + EN0_RCNTLO) == 0x50 && | ||
409 | inb(ioaddr + EN0_RCNTHI) == 0x70) | ||
410 | stop_page = 0x60; | ||
411 | else | ||
412 | stop_page = NESM_STOP_PG; | ||
400 | } else { | 413 | } else { |
401 | start_page = NE1SM_START_PG; | 414 | start_page = NE1SM_START_PG; |
402 | stop_page = NE1SM_STOP_PG; | 415 | stop_page = NE1SM_STOP_PG; |
403 | } | 416 | } |
404 | 417 | ||
405 | #if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R) | 418 | #if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R) |
@@ -509,15 +522,9 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) | |||
509 | ei_status.name = name; | 522 | ei_status.name = name; |
510 | ei_status.tx_start_page = start_page; | 523 | ei_status.tx_start_page = start_page; |
511 | ei_status.stop_page = stop_page; | 524 | ei_status.stop_page = stop_page; |
512 | #if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) | ||
513 | wordlength = 1; | ||
514 | #endif | ||
515 | 525 | ||
516 | #ifdef CONFIG_PLAT_OAKS32R | 526 | /* Use 16-bit mode only if this wasn't overridden by DCR_VAL */ |
517 | ei_status.word16 = 0; | 527 | ei_status.word16 = (wordlength == 2 && (DCR_VAL & 0x01)); |
518 | #else | ||
519 | ei_status.word16 = (wordlength == 2); | ||
520 | #endif | ||
521 | 528 | ||
522 | ei_status.rx_start_page = start_page + TX_PAGES; | 529 | ei_status.rx_start_page = start_page + TX_PAGES; |
523 | #ifdef PACKETBUF_MEMSIZE | 530 | #ifdef PACKETBUF_MEMSIZE |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 459443b572ce..1b236bdf6b92 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -60,8 +60,10 @@ int mdiobus_register(struct mii_bus *bus) | |||
60 | for (i = 0; i < PHY_MAX_ADDR; i++) { | 60 | for (i = 0; i < PHY_MAX_ADDR; i++) { |
61 | struct phy_device *phydev; | 61 | struct phy_device *phydev; |
62 | 62 | ||
63 | if (bus->phy_mask & (1 << i)) | 63 | if (bus->phy_mask & (1 << i)) { |
64 | bus->phy_map[i] = NULL; | ||
64 | continue; | 65 | continue; |
66 | } | ||
65 | 67 | ||
66 | phydev = get_phy_device(bus, i); | 68 | phydev = get_phy_device(bus, i); |
67 | 69 | ||
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index b82191d2bee1..f5a3bf4d959a 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -127,6 +127,7 @@ static const struct mii_chip_info { | |||
127 | } mii_chip_table[] = { | 127 | } mii_chip_table[] = { |
128 | { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN }, | 128 | { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN }, |
129 | { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN }, | 129 | { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN }, |
130 | { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN }, | ||
130 | { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN }, | 131 | { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN }, |
131 | { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN }, | 132 | { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN }, |
132 | { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN }, | 133 | { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN }, |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 227df9876a2c..ffd267fab21d 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -51,7 +51,7 @@ | |||
51 | #include "sky2.h" | 51 | #include "sky2.h" |
52 | 52 | ||
53 | #define DRV_NAME "sky2" | 53 | #define DRV_NAME "sky2" |
54 | #define DRV_VERSION "1.2" | 54 | #define DRV_VERSION "1.3" |
55 | #define PFX DRV_NAME " " | 55 | #define PFX DRV_NAME " " |
56 | 56 | ||
57 | /* | 57 | /* |
@@ -79,6 +79,8 @@ | |||
79 | #define NAPI_WEIGHT 64 | 79 | #define NAPI_WEIGHT 64 |
80 | #define PHY_RETRIES 1000 | 80 | #define PHY_RETRIES 1000 |
81 | 81 | ||
82 | #define RING_NEXT(x,s) (((x)+1) & ((s)-1)) | ||
83 | |||
82 | static const u32 default_msg = | 84 | static const u32 default_msg = |
83 | NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | 85 | NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
84 | | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | 86 | | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR |
@@ -96,6 +98,10 @@ static int disable_msi = 0; | |||
96 | module_param(disable_msi, int, 0); | 98 | module_param(disable_msi, int, 0); |
97 | MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); | 99 | MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); |
98 | 100 | ||
101 | static int idle_timeout = 100; | ||
102 | module_param(idle_timeout, int, 0); | ||
103 | MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)"); | ||
104 | |||
99 | static const struct pci_device_id sky2_id_table[] = { | 105 | static const struct pci_device_id sky2_id_table[] = { |
100 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, | 106 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, |
101 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, | 107 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, |
@@ -122,6 +128,7 @@ MODULE_DEVICE_TABLE(pci, sky2_id_table); | |||
122 | /* Avoid conditionals by using array */ | 128 | /* Avoid conditionals by using array */ |
123 | static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; | 129 | static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; |
124 | static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; | 130 | static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; |
131 | static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 }; | ||
125 | 132 | ||
126 | /* This driver supports yukon2 chipset only */ | 133 | /* This driver supports yukon2 chipset only */ |
127 | static const char *yukon2_name[] = { | 134 | static const char *yukon2_name[] = { |
@@ -298,7 +305,8 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
298 | struct sky2_port *sky2 = netdev_priv(hw->dev[port]); | 305 | struct sky2_port *sky2 = netdev_priv(hw->dev[port]); |
299 | u16 ctrl, ct1000, adv, pg, ledctrl, ledover; | 306 | u16 ctrl, ct1000, adv, pg, ledctrl, ledover; |
300 | 307 | ||
301 | if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) { | 308 | if (sky2->autoneg == AUTONEG_ENABLE && |
309 | (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { | ||
302 | u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); | 310 | u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); |
303 | 311 | ||
304 | ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | | 312 | ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | |
@@ -326,7 +334,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
326 | ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); | 334 | ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); |
327 | 335 | ||
328 | if (sky2->autoneg == AUTONEG_ENABLE && | 336 | if (sky2->autoneg == AUTONEG_ENABLE && |
329 | hw->chip_id == CHIP_ID_YUKON_XL) { | 337 | (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { |
330 | ctrl &= ~PHY_M_PC_DSC_MSK; | 338 | ctrl &= ~PHY_M_PC_DSC_MSK; |
331 | ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; | 339 | ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; |
332 | } | 340 | } |
@@ -442,10 +450,11 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
442 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); | 450 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); |
443 | 451 | ||
444 | /* set LED Function Control register */ | 452 | /* set LED Function Control register */ |
445 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ | 453 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, |
446 | PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ | 454 | (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ |
447 | PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ | 455 | PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ |
448 | PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ | 456 | PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ |
457 | PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ | ||
449 | 458 | ||
450 | /* set Polarity Control register */ | 459 | /* set Polarity Control register */ |
451 | gm_phy_write(hw, port, PHY_MARV_PHY_STAT, | 460 | gm_phy_write(hw, port, PHY_MARV_PHY_STAT, |
@@ -459,6 +468,25 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
459 | /* restore page register */ | 468 | /* restore page register */ |
460 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); | 469 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); |
461 | break; | 470 | break; |
471 | case CHIP_ID_YUKON_EC_U: | ||
472 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | ||
473 | |||
474 | /* select page 3 to access LED control register */ | ||
475 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); | ||
476 | |||
477 | /* set LED Function Control register */ | ||
478 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
479 | (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ | ||
480 | PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */ | ||
481 | PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ | ||
482 | PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */ | ||
483 | |||
484 | /* set Blink Rate in LED Timer Control Register */ | ||
485 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, | ||
486 | ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS)); | ||
487 | /* restore page register */ | ||
488 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); | ||
489 | break; | ||
462 | 490 | ||
463 | default: | 491 | default: |
464 | /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ | 492 | /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ |
@@ -467,19 +495,21 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
467 | ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); | 495 | ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); |
468 | } | 496 | } |
469 | 497 | ||
470 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) { | 498 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) { |
471 | /* apply fixes in PHY AFE */ | 499 | /* apply fixes in PHY AFE */ |
472 | gm_phy_write(hw, port, 22, 255); | 500 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
501 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255); | ||
502 | |||
473 | /* increase differential signal amplitude in 10BASE-T */ | 503 | /* increase differential signal amplitude in 10BASE-T */ |
474 | gm_phy_write(hw, port, 24, 0xaa99); | 504 | gm_phy_write(hw, port, 0x18, 0xaa99); |
475 | gm_phy_write(hw, port, 23, 0x2011); | 505 | gm_phy_write(hw, port, 0x17, 0x2011); |
476 | 506 | ||
477 | /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ | 507 | /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ |
478 | gm_phy_write(hw, port, 24, 0xa204); | 508 | gm_phy_write(hw, port, 0x18, 0xa204); |
479 | gm_phy_write(hw, port, 23, 0x2002); | 509 | gm_phy_write(hw, port, 0x17, 0x2002); |
480 | 510 | ||
481 | /* set page register to 0 */ | 511 | /* set page register to 0 */ |
482 | gm_phy_write(hw, port, 22, 0); | 512 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); |
483 | } else { | 513 | } else { |
484 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); | 514 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); |
485 | 515 | ||
@@ -553,6 +583,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | |||
553 | 583 | ||
554 | if (sky2->duplex == DUPLEX_FULL) | 584 | if (sky2->duplex == DUPLEX_FULL) |
555 | reg |= GM_GPCR_DUP_FULL; | 585 | reg |= GM_GPCR_DUP_FULL; |
586 | |||
587 | /* turn off pause in 10/100mbps half duplex */ | ||
588 | else if (sky2->speed != SPEED_1000 && | ||
589 | hw->chip_id != CHIP_ID_YUKON_EC_U) | ||
590 | sky2->tx_pause = sky2->rx_pause = 0; | ||
556 | } else | 591 | } else |
557 | reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; | 592 | reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; |
558 | 593 | ||
@@ -719,7 +754,7 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) | |||
719 | { | 754 | { |
720 | struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; | 755 | struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; |
721 | 756 | ||
722 | sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE; | 757 | sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE); |
723 | return le; | 758 | return le; |
724 | } | 759 | } |
725 | 760 | ||
@@ -735,7 +770,7 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) | |||
735 | static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) | 770 | static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) |
736 | { | 771 | { |
737 | struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; | 772 | struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; |
738 | sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE; | 773 | sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); |
739 | return le; | 774 | return le; |
740 | } | 775 | } |
741 | 776 | ||
@@ -1050,7 +1085,7 @@ static int sky2_up(struct net_device *dev) | |||
1050 | 1085 | ||
1051 | /* Enable interrupts from phy/mac for port */ | 1086 | /* Enable interrupts from phy/mac for port */ |
1052 | imask = sky2_read32(hw, B0_IMSK); | 1087 | imask = sky2_read32(hw, B0_IMSK); |
1053 | imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; | 1088 | imask |= portirq_msk[port]; |
1054 | sky2_write32(hw, B0_IMSK, imask); | 1089 | sky2_write32(hw, B0_IMSK, imask); |
1055 | 1090 | ||
1056 | return 0; | 1091 | return 0; |
@@ -1078,7 +1113,7 @@ err_out: | |||
1078 | /* Modular subtraction in ring */ | 1113 | /* Modular subtraction in ring */ |
1079 | static inline int tx_dist(unsigned tail, unsigned head) | 1114 | static inline int tx_dist(unsigned tail, unsigned head) |
1080 | { | 1115 | { |
1081 | return (head - tail) % TX_RING_SIZE; | 1116 | return (head - tail) & (TX_RING_SIZE - 1); |
1082 | } | 1117 | } |
1083 | 1118 | ||
1084 | /* Number of list elements available for next tx */ | 1119 | /* Number of list elements available for next tx */ |
@@ -1255,7 +1290,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1255 | le->opcode = OP_BUFFER | HW_OWNER; | 1290 | le->opcode = OP_BUFFER | HW_OWNER; |
1256 | 1291 | ||
1257 | fre = sky2->tx_ring | 1292 | fre = sky2->tx_ring |
1258 | + ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE; | 1293 | + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE); |
1259 | pci_unmap_addr_set(fre, mapaddr, mapping); | 1294 | pci_unmap_addr_set(fre, mapaddr, mapping); |
1260 | } | 1295 | } |
1261 | 1296 | ||
@@ -1315,7 +1350,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
1315 | 1350 | ||
1316 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 1351 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1317 | struct tx_ring_info *fre; | 1352 | struct tx_ring_info *fre; |
1318 | fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE; | 1353 | fre = sky2->tx_ring + RING_NEXT(put + i, TX_RING_SIZE); |
1319 | pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), | 1354 | pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), |
1320 | skb_shinfo(skb)->frags[i].size, | 1355 | skb_shinfo(skb)->frags[i].size, |
1321 | PCI_DMA_TODEVICE); | 1356 | PCI_DMA_TODEVICE); |
@@ -1401,7 +1436,7 @@ static int sky2_down(struct net_device *dev) | |||
1401 | 1436 | ||
1402 | /* Disable port IRQ */ | 1437 | /* Disable port IRQ */ |
1403 | imask = sky2_read32(hw, B0_IMSK); | 1438 | imask = sky2_read32(hw, B0_IMSK); |
1404 | imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; | 1439 | imask &= ~portirq_msk[port]; |
1405 | sky2_write32(hw, B0_IMSK, imask); | 1440 | sky2_write32(hw, B0_IMSK, imask); |
1406 | 1441 | ||
1407 | /* turn off LED's */ | 1442 | /* turn off LED's */ |
@@ -1498,17 +1533,26 @@ static void sky2_link_up(struct sky2_port *sky2) | |||
1498 | sky2_write8(hw, SK_REG(port, LNK_LED_REG), | 1533 | sky2_write8(hw, SK_REG(port, LNK_LED_REG), |
1499 | LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); | 1534 | LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); |
1500 | 1535 | ||
1501 | if (hw->chip_id == CHIP_ID_YUKON_XL) { | 1536 | if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) { |
1502 | u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | 1537 | u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
1538 | u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */ | ||
1539 | |||
1540 | switch(sky2->speed) { | ||
1541 | case SPEED_10: | ||
1542 | led |= PHY_M_LEDC_INIT_CTRL(7); | ||
1543 | break; | ||
1544 | |||
1545 | case SPEED_100: | ||
1546 | led |= PHY_M_LEDC_STA1_CTRL(7); | ||
1547 | break; | ||
1548 | |||
1549 | case SPEED_1000: | ||
1550 | led |= PHY_M_LEDC_STA0_CTRL(7); | ||
1551 | break; | ||
1552 | } | ||
1503 | 1553 | ||
1504 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); | 1554 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); |
1505 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ | 1555 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, led); |
1506 | PHY_M_LEDC_INIT_CTRL(sky2->speed == | ||
1507 | SPEED_10 ? 7 : 0) | | ||
1508 | PHY_M_LEDC_STA1_CTRL(sky2->speed == | ||
1509 | SPEED_100 ? 7 : 0) | | ||
1510 | PHY_M_LEDC_STA0_CTRL(sky2->speed == | ||
1511 | SPEED_1000 ? 7 : 0)); | ||
1512 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); | 1556 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); |
1513 | } | 1557 | } |
1514 | 1558 | ||
@@ -1583,7 +1627,7 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) | |||
1583 | sky2->speed = sky2_phy_speed(hw, aux); | 1627 | sky2->speed = sky2_phy_speed(hw, aux); |
1584 | 1628 | ||
1585 | /* Pause bits are offset (9..8) */ | 1629 | /* Pause bits are offset (9..8) */ |
1586 | if (hw->chip_id == CHIP_ID_YUKON_XL) | 1630 | if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) |
1587 | aux >>= 6; | 1631 | aux >>= 6; |
1588 | 1632 | ||
1589 | sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0; | 1633 | sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0; |
@@ -1859,35 +1903,28 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) | |||
1859 | static int sky2_status_intr(struct sky2_hw *hw, int to_do) | 1903 | static int sky2_status_intr(struct sky2_hw *hw, int to_do) |
1860 | { | 1904 | { |
1861 | int work_done = 0; | 1905 | int work_done = 0; |
1906 | u16 hwidx = sky2_read16(hw, STAT_PUT_IDX); | ||
1862 | 1907 | ||
1863 | rmb(); | 1908 | rmb(); |
1864 | 1909 | ||
1865 | for(;;) { | 1910 | while (hw->st_idx != hwidx) { |
1866 | struct sky2_status_le *le = hw->st_le + hw->st_idx; | 1911 | struct sky2_status_le *le = hw->st_le + hw->st_idx; |
1867 | struct net_device *dev; | 1912 | struct net_device *dev; |
1868 | struct sky2_port *sky2; | 1913 | struct sky2_port *sky2; |
1869 | struct sk_buff *skb; | 1914 | struct sk_buff *skb; |
1870 | u32 status; | 1915 | u32 status; |
1871 | u16 length; | 1916 | u16 length; |
1872 | u8 link, opcode; | ||
1873 | 1917 | ||
1874 | opcode = le->opcode; | 1918 | hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE); |
1875 | if (!opcode) | ||
1876 | break; | ||
1877 | opcode &= ~HW_OWNER; | ||
1878 | |||
1879 | hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; | ||
1880 | le->opcode = 0; | ||
1881 | 1919 | ||
1882 | link = le->link; | 1920 | BUG_ON(le->link >= 2); |
1883 | BUG_ON(link >= 2); | 1921 | dev = hw->dev[le->link]; |
1884 | dev = hw->dev[link]; | ||
1885 | 1922 | ||
1886 | sky2 = netdev_priv(dev); | 1923 | sky2 = netdev_priv(dev); |
1887 | length = le->length; | 1924 | length = le->length; |
1888 | status = le->status; | 1925 | status = le->status; |
1889 | 1926 | ||
1890 | switch (opcode) { | 1927 | switch (le->opcode & ~HW_OWNER) { |
1891 | case OP_RXSTAT: | 1928 | case OP_RXSTAT: |
1892 | skb = sky2_receive(sky2, length, status); | 1929 | skb = sky2_receive(sky2, length, status); |
1893 | if (!skb) | 1930 | if (!skb) |
@@ -1927,7 +1964,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
1927 | 1964 | ||
1928 | case OP_TXINDEXLE: | 1965 | case OP_TXINDEXLE: |
1929 | /* TX index reports status for both ports */ | 1966 | /* TX index reports status for both ports */ |
1930 | sky2_tx_done(hw->dev[0], status & 0xffff); | 1967 | BUILD_BUG_ON(TX_RING_SIZE > 0x1000); |
1968 | sky2_tx_done(hw->dev[0], status & 0xfff); | ||
1931 | if (hw->dev[1]) | 1969 | if (hw->dev[1]) |
1932 | sky2_tx_done(hw->dev[1], | 1970 | sky2_tx_done(hw->dev[1], |
1933 | ((status >> 24) & 0xff) | 1971 | ((status >> 24) & 0xff) |
@@ -1937,8 +1975,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
1937 | default: | 1975 | default: |
1938 | if (net_ratelimit()) | 1976 | if (net_ratelimit()) |
1939 | printk(KERN_WARNING PFX | 1977 | printk(KERN_WARNING PFX |
1940 | "unknown status opcode 0x%x\n", opcode); | 1978 | "unknown status opcode 0x%x\n", le->opcode); |
1941 | break; | 1979 | goto exit_loop; |
1942 | } | 1980 | } |
1943 | } | 1981 | } |
1944 | 1982 | ||
@@ -2089,12 +2127,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port, | |||
2089 | */ | 2127 | */ |
2090 | static void sky2_idle(unsigned long arg) | 2128 | static void sky2_idle(unsigned long arg) |
2091 | { | 2129 | { |
2092 | struct net_device *dev = (struct net_device *) arg; | 2130 | struct sky2_hw *hw = (struct sky2_hw *) arg; |
2131 | struct net_device *dev = hw->dev[0]; | ||
2093 | 2132 | ||
2094 | local_irq_disable(); | ||
2095 | if (__netif_rx_schedule_prep(dev)) | 2133 | if (__netif_rx_schedule_prep(dev)) |
2096 | __netif_rx_schedule(dev); | 2134 | __netif_rx_schedule(dev); |
2097 | local_irq_enable(); | 2135 | |
2136 | mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout)); | ||
2098 | } | 2137 | } |
2099 | 2138 | ||
2100 | 2139 | ||
@@ -2105,65 +2144,46 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
2105 | int work_done = 0; | 2144 | int work_done = 0; |
2106 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); | 2145 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); |
2107 | 2146 | ||
2108 | restart_poll: | 2147 | if (status & Y2_IS_HW_ERR) |
2109 | if (unlikely(status & ~Y2_IS_STAT_BMU)) { | 2148 | sky2_hw_intr(hw); |
2110 | if (status & Y2_IS_HW_ERR) | ||
2111 | sky2_hw_intr(hw); | ||
2112 | |||
2113 | if (status & Y2_IS_IRQ_PHY1) | ||
2114 | sky2_phy_intr(hw, 0); | ||
2115 | |||
2116 | if (status & Y2_IS_IRQ_PHY2) | ||
2117 | sky2_phy_intr(hw, 1); | ||
2118 | 2149 | ||
2119 | if (status & Y2_IS_IRQ_MAC1) | 2150 | if (status & Y2_IS_IRQ_PHY1) |
2120 | sky2_mac_intr(hw, 0); | 2151 | sky2_phy_intr(hw, 0); |
2121 | 2152 | ||
2122 | if (status & Y2_IS_IRQ_MAC2) | 2153 | if (status & Y2_IS_IRQ_PHY2) |
2123 | sky2_mac_intr(hw, 1); | 2154 | sky2_phy_intr(hw, 1); |
2124 | 2155 | ||
2125 | if (status & Y2_IS_CHK_RX1) | 2156 | if (status & Y2_IS_IRQ_MAC1) |
2126 | sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); | 2157 | sky2_mac_intr(hw, 0); |
2127 | 2158 | ||
2128 | if (status & Y2_IS_CHK_RX2) | 2159 | if (status & Y2_IS_IRQ_MAC2) |
2129 | sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); | 2160 | sky2_mac_intr(hw, 1); |
2130 | 2161 | ||
2131 | if (status & Y2_IS_CHK_TXA1) | 2162 | if (status & Y2_IS_CHK_RX1) |
2132 | sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); | 2163 | sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); |
2133 | 2164 | ||
2134 | if (status & Y2_IS_CHK_TXA2) | 2165 | if (status & Y2_IS_CHK_RX2) |
2135 | sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); | 2166 | sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); |
2136 | } | ||
2137 | 2167 | ||
2138 | if (status & Y2_IS_STAT_BMU) { | 2168 | if (status & Y2_IS_CHK_TXA1) |
2139 | work_done += sky2_status_intr(hw, work_limit - work_done); | 2169 | sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); |
2140 | *budget -= work_done; | ||
2141 | dev0->quota -= work_done; | ||
2142 | 2170 | ||
2143 | if (work_done >= work_limit) | 2171 | if (status & Y2_IS_CHK_TXA2) |
2144 | return 1; | 2172 | sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); |
2145 | 2173 | ||
2174 | if (status & Y2_IS_STAT_BMU) | ||
2146 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); | 2175 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); |
2147 | } | ||
2148 | |||
2149 | mod_timer(&hw->idle_timer, jiffies + HZ); | ||
2150 | 2176 | ||
2151 | local_irq_disable(); | 2177 | work_done = sky2_status_intr(hw, work_limit); |
2152 | __netif_rx_complete(dev0); | 2178 | *budget -= work_done; |
2179 | dev0->quota -= work_done; | ||
2153 | 2180 | ||
2154 | status = sky2_read32(hw, B0_Y2_SP_LISR); | 2181 | if (work_done >= work_limit) |
2182 | return 1; | ||
2155 | 2183 | ||
2156 | if (unlikely(status)) { | 2184 | netif_rx_complete(dev0); |
2157 | /* More work pending, try and keep going */ | ||
2158 | if (__netif_rx_schedule_prep(dev0)) { | ||
2159 | __netif_rx_reschedule(dev0, work_done); | ||
2160 | status = sky2_read32(hw, B0_Y2_SP_EISR); | ||
2161 | local_irq_enable(); | ||
2162 | goto restart_poll; | ||
2163 | } | ||
2164 | } | ||
2165 | 2185 | ||
2166 | local_irq_enable(); | 2186 | status = sky2_read32(hw, B0_Y2_SP_LISR); |
2167 | return 0; | 2187 | return 0; |
2168 | } | 2188 | } |
2169 | 2189 | ||
@@ -2244,13 +2264,6 @@ static int __devinit sky2_reset(struct sky2_hw *hw) | |||
2244 | return -EOPNOTSUPP; | 2264 | return -EOPNOTSUPP; |
2245 | } | 2265 | } |
2246 | 2266 | ||
2247 | /* This chip is new and not tested yet */ | ||
2248 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { | ||
2249 | pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n", | ||
2250 | pci_name(hw->pdev)); | ||
2251 | pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n"); | ||
2252 | } | ||
2253 | |||
2254 | /* disable ASF */ | 2267 | /* disable ASF */ |
2255 | if (hw->chip_id <= CHIP_ID_YUKON_EC) { | 2268 | if (hw->chip_id <= CHIP_ID_YUKON_EC) { |
2256 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); | 2269 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); |
@@ -3302,7 +3315,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
3302 | 3315 | ||
3303 | sky2_write32(hw, B0_IMSK, Y2_IS_BASE); | 3316 | sky2_write32(hw, B0_IMSK, Y2_IS_BASE); |
3304 | 3317 | ||
3305 | setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) dev); | 3318 | setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); |
3319 | if (idle_timeout > 0) | ||
3320 | mod_timer(&hw->idle_timer, | ||
3321 | jiffies + msecs_to_jiffies(idle_timeout)); | ||
3306 | 3322 | ||
3307 | pci_set_drvdata(pdev, hw); | 3323 | pci_set_drvdata(pdev, hw); |
3308 | 3324 | ||
@@ -3342,6 +3358,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev) | |||
3342 | del_timer_sync(&hw->idle_timer); | 3358 | del_timer_sync(&hw->idle_timer); |
3343 | 3359 | ||
3344 | sky2_write32(hw, B0_IMSK, 0); | 3360 | sky2_write32(hw, B0_IMSK, 0); |
3361 | synchronize_irq(hw->pdev->irq); | ||
3362 | |||
3345 | dev0 = hw->dev[0]; | 3363 | dev0 = hw->dev[0]; |
3346 | dev1 = hw->dev[1]; | 3364 | dev1 = hw->dev[1]; |
3347 | if (dev1) | 3365 | if (dev1) |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index b026f5653f04..8012994c9b93 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -378,6 +378,9 @@ enum { | |||
378 | CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ | 378 | CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ |
379 | CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ | 379 | CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ |
380 | CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ | 380 | CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ |
381 | |||
382 | CHIP_REV_YU_EC_U_A0 = 0, | ||
383 | CHIP_REV_YU_EC_U_A1 = 1, | ||
381 | }; | 384 | }; |
382 | 385 | ||
383 | /* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ | 386 | /* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 43f5e86fc559..394339d5e87c 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -1652,6 +1652,8 @@ spider_net_enable_card(struct spider_net_card *card) | |||
1652 | { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE }, | 1652 | { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE }, |
1653 | 1653 | ||
1654 | { SPIDER_NET_GMRWOLCTRL, 0 }, | 1654 | { SPIDER_NET_GMRWOLCTRL, 0 }, |
1655 | { SPIDER_NET_GTESTMD, 0x10000000 }, | ||
1656 | { SPIDER_NET_GTTQMSK, 0x00400040 }, | ||
1655 | { SPIDER_NET_GTESTMD, 0 }, | 1657 | { SPIDER_NET_GTESTMD, 0 }, |
1656 | 1658 | ||
1657 | { SPIDER_NET_GMACINTEN, 0 }, | 1659 | { SPIDER_NET_GMACINTEN, 0 }, |
@@ -1792,15 +1794,7 @@ spider_net_setup_phy(struct spider_net_card *card) | |||
1792 | if (phy->def->ops->setup_forced) | 1794 | if (phy->def->ops->setup_forced) |
1793 | phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL); | 1795 | phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL); |
1794 | 1796 | ||
1795 | /* the following two writes could be moved to sungem_phy.c */ | 1797 | phy->def->ops->enable_fiber(phy); |
1796 | /* enable fiber mode */ | ||
1797 | spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x9020); | ||
1798 | /* LEDs active in both modes, autosense prio = fiber */ | ||
1799 | spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f); | ||
1800 | |||
1801 | /* switch off fibre autoneg */ | ||
1802 | spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0xfc01); | ||
1803 | spider_net_write_phy(card->netdev, 1, 0x0b, 0x0004); | ||
1804 | 1798 | ||
1805 | phy->def->ops->read_link(phy); | 1799 | phy->def->ops->read_link(phy); |
1806 | pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name, | 1800 | pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name, |
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index 5922b529a048..3b8d951cf73c 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h | |||
@@ -120,6 +120,8 @@ extern char spider_net_driver_name[]; | |||
120 | #define SPIDER_NET_GMRUAFILnR 0x00000500 | 120 | #define SPIDER_NET_GMRUAFILnR 0x00000500 |
121 | #define SPIDER_NET_GMRUA0FIL15R 0x00000578 | 121 | #define SPIDER_NET_GMRUA0FIL15R 0x00000578 |
122 | 122 | ||
123 | #define SPIDER_NET_GTTQMSK 0x00000934 | ||
124 | |||
123 | /* RX DMA controller registers, all 0x00000a.. are for DMA controller A, | 125 | /* RX DMA controller registers, all 0x00000a.. are for DMA controller A, |
124 | * 0x00000b.. for DMA controller B, etc. */ | 126 | * 0x00000b.. for DMA controller B, etc. */ |
125 | #define SPIDER_NET_GDADCHA 0x00000a00 | 127 | #define SPIDER_NET_GDADCHA 0x00000a00 |
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index 046371ee5bbe..b2ddd5e79303 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c | |||
@@ -329,6 +329,30 @@ static int bcm5421_init(struct mii_phy* phy) | |||
329 | return 0; | 329 | return 0; |
330 | } | 330 | } |
331 | 331 | ||
332 | static int bcm5421_enable_fiber(struct mii_phy* phy) | ||
333 | { | ||
334 | /* enable fiber mode */ | ||
335 | phy_write(phy, MII_NCONFIG, 0x9020); | ||
336 | /* LEDs active in both modes, autosense prio = fiber */ | ||
337 | phy_write(phy, MII_NCONFIG, 0x945f); | ||
338 | |||
339 | /* switch off fibre autoneg */ | ||
340 | phy_write(phy, MII_NCONFIG, 0xfc01); | ||
341 | phy_write(phy, 0x0b, 0x0004); | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | static int bcm5461_enable_fiber(struct mii_phy* phy) | ||
347 | { | ||
348 | phy_write(phy, MII_NCONFIG, 0xfc0c); | ||
349 | phy_write(phy, MII_BMCR, 0x4140); | ||
350 | phy_write(phy, MII_NCONFIG, 0xfc0b); | ||
351 | phy_write(phy, MII_BMCR, 0x0140); | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
332 | static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) | 356 | static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) |
333 | { | 357 | { |
334 | u16 ctl, adv; | 358 | u16 ctl, adv; |
@@ -762,6 +786,7 @@ static struct mii_phy_ops bcm5421_phy_ops = { | |||
762 | .setup_forced = bcm54xx_setup_forced, | 786 | .setup_forced = bcm54xx_setup_forced, |
763 | .poll_link = genmii_poll_link, | 787 | .poll_link = genmii_poll_link, |
764 | .read_link = bcm54xx_read_link, | 788 | .read_link = bcm54xx_read_link, |
789 | .enable_fiber = bcm5421_enable_fiber, | ||
765 | }; | 790 | }; |
766 | 791 | ||
767 | static struct mii_phy_def bcm5421_phy_def = { | 792 | static struct mii_phy_def bcm5421_phy_def = { |
@@ -792,6 +817,25 @@ static struct mii_phy_def bcm5421k2_phy_def = { | |||
792 | .ops = &bcm5421k2_phy_ops | 817 | .ops = &bcm5421k2_phy_ops |
793 | }; | 818 | }; |
794 | 819 | ||
820 | static struct mii_phy_ops bcm5461_phy_ops = { | ||
821 | .init = bcm5421_init, | ||
822 | .suspend = generic_suspend, | ||
823 | .setup_aneg = bcm54xx_setup_aneg, | ||
824 | .setup_forced = bcm54xx_setup_forced, | ||
825 | .poll_link = genmii_poll_link, | ||
826 | .read_link = bcm54xx_read_link, | ||
827 | .enable_fiber = bcm5461_enable_fiber, | ||
828 | }; | ||
829 | |||
830 | static struct mii_phy_def bcm5461_phy_def = { | ||
831 | .phy_id = 0x002060c0, | ||
832 | .phy_id_mask = 0xfffffff0, | ||
833 | .name = "BCM5461", | ||
834 | .features = MII_GBIT_FEATURES, | ||
835 | .magic_aneg = 1, | ||
836 | .ops = &bcm5461_phy_ops | ||
837 | }; | ||
838 | |||
795 | /* Broadcom BCM 5462 built-in Vesta */ | 839 | /* Broadcom BCM 5462 built-in Vesta */ |
796 | static struct mii_phy_ops bcm5462V_phy_ops = { | 840 | static struct mii_phy_ops bcm5462V_phy_ops = { |
797 | .init = bcm5421_init, | 841 | .init = bcm5421_init, |
@@ -857,6 +901,7 @@ static struct mii_phy_def* mii_phy_table[] = { | |||
857 | &bcm5411_phy_def, | 901 | &bcm5411_phy_def, |
858 | &bcm5421_phy_def, | 902 | &bcm5421_phy_def, |
859 | &bcm5421k2_phy_def, | 903 | &bcm5421k2_phy_def, |
904 | &bcm5461_phy_def, | ||
860 | &bcm5462V_phy_def, | 905 | &bcm5462V_phy_def, |
861 | &marvell_phy_def, | 906 | &marvell_phy_def, |
862 | &genmii_phy_def, | 907 | &genmii_phy_def, |
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h index 430544496c52..69e125197fcf 100644 --- a/drivers/net/sungem_phy.h +++ b/drivers/net/sungem_phy.h | |||
@@ -12,6 +12,7 @@ struct mii_phy_ops | |||
12 | int (*setup_forced)(struct mii_phy *phy, int speed, int fd); | 12 | int (*setup_forced)(struct mii_phy *phy, int speed, int fd); |
13 | int (*poll_link)(struct mii_phy *phy); | 13 | int (*poll_link)(struct mii_phy *phy); |
14 | int (*read_link)(struct mii_phy *phy); | 14 | int (*read_link)(struct mii_phy *phy); |
15 | int (*enable_fiber)(struct mii_phy *phy); | ||
15 | }; | 16 | }; |
16 | 17 | ||
17 | /* Structure used to statically define an mii/gii based PHY */ | 18 | /* Structure used to statically define an mii/gii based PHY */ |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 73e271e59c6a..e1b33a25a25f 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -69,8 +69,8 @@ | |||
69 | 69 | ||
70 | #define DRV_MODULE_NAME "tg3" | 70 | #define DRV_MODULE_NAME "tg3" |
71 | #define PFX DRV_MODULE_NAME ": " | 71 | #define PFX DRV_MODULE_NAME ": " |
72 | #define DRV_MODULE_VERSION "3.56" | 72 | #define DRV_MODULE_VERSION "3.57" |
73 | #define DRV_MODULE_RELDATE "Apr 1, 2006" | 73 | #define DRV_MODULE_RELDATE "Apr 28, 2006" |
74 | 74 | ||
75 | #define TG3_DEF_MAC_MODE 0 | 75 | #define TG3_DEF_MAC_MODE 0 |
76 | #define TG3_DEF_RX_MODE 0 | 76 | #define TG3_DEF_RX_MODE 0 |
@@ -974,6 +974,8 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) | |||
974 | return err; | 974 | return err; |
975 | } | 975 | } |
976 | 976 | ||
977 | static void tg3_link_report(struct tg3 *); | ||
978 | |||
977 | /* This will reset the tigon3 PHY if there is no valid | 979 | /* This will reset the tigon3 PHY if there is no valid |
978 | * link unless the FORCE argument is non-zero. | 980 | * link unless the FORCE argument is non-zero. |
979 | */ | 981 | */ |
@@ -987,6 +989,11 @@ static int tg3_phy_reset(struct tg3 *tp) | |||
987 | if (err != 0) | 989 | if (err != 0) |
988 | return -EBUSY; | 990 | return -EBUSY; |
989 | 991 | ||
992 | if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { | ||
993 | netif_carrier_off(tp->dev); | ||
994 | tg3_link_report(tp); | ||
995 | } | ||
996 | |||
990 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 997 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || |
991 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 998 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || |
992 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 999 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { |
@@ -1023,6 +1030,12 @@ out: | |||
1023 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); | 1030 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); |
1024 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 1031 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); |
1025 | } | 1032 | } |
1033 | else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { | ||
1034 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | ||
1035 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); | ||
1036 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); | ||
1037 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | ||
1038 | } | ||
1026 | /* Set Extended packet length bit (bit 14) on all chips that */ | 1039 | /* Set Extended packet length bit (bit 14) on all chips that */ |
1027 | /* support jumbo frames */ | 1040 | /* support jumbo frames */ |
1028 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { | 1041 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { |
@@ -3531,7 +3544,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id, | |||
3531 | return IRQ_RETVAL(0); | 3544 | return IRQ_RETVAL(0); |
3532 | } | 3545 | } |
3533 | 3546 | ||
3534 | static int tg3_init_hw(struct tg3 *); | 3547 | static int tg3_init_hw(struct tg3 *, int); |
3535 | static int tg3_halt(struct tg3 *, int, int); | 3548 | static int tg3_halt(struct tg3 *, int, int); |
3536 | 3549 | ||
3537 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3550 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -3567,7 +3580,7 @@ static void tg3_reset_task(void *_data) | |||
3567 | tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; | 3580 | tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; |
3568 | 3581 | ||
3569 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); | 3582 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); |
3570 | tg3_init_hw(tp); | 3583 | tg3_init_hw(tp, 1); |
3571 | 3584 | ||
3572 | tg3_netif_start(tp); | 3585 | tg3_netif_start(tp); |
3573 | 3586 | ||
@@ -4042,7 +4055,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) | |||
4042 | 4055 | ||
4043 | tg3_set_mtu(dev, tp, new_mtu); | 4056 | tg3_set_mtu(dev, tp, new_mtu); |
4044 | 4057 | ||
4045 | tg3_init_hw(tp); | 4058 | tg3_init_hw(tp, 0); |
4046 | 4059 | ||
4047 | tg3_netif_start(tp); | 4060 | tg3_netif_start(tp); |
4048 | 4061 | ||
@@ -5719,9 +5732,23 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5719 | if (!netif_running(dev)) | 5732 | if (!netif_running(dev)) |
5720 | return 0; | 5733 | return 0; |
5721 | 5734 | ||
5722 | spin_lock_bh(&tp->lock); | 5735 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { |
5723 | __tg3_set_mac_addr(tp); | 5736 | /* Reset chip so that ASF can re-init any MAC addresses it |
5724 | spin_unlock_bh(&tp->lock); | 5737 | * needs. |
5738 | */ | ||
5739 | tg3_netif_stop(tp); | ||
5740 | tg3_full_lock(tp, 1); | ||
5741 | |||
5742 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | ||
5743 | tg3_init_hw(tp, 0); | ||
5744 | |||
5745 | tg3_netif_start(tp); | ||
5746 | tg3_full_unlock(tp); | ||
5747 | } else { | ||
5748 | spin_lock_bh(&tp->lock); | ||
5749 | __tg3_set_mac_addr(tp); | ||
5750 | spin_unlock_bh(&tp->lock); | ||
5751 | } | ||
5725 | 5752 | ||
5726 | return 0; | 5753 | return 0; |
5727 | } | 5754 | } |
@@ -5771,7 +5798,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | |||
5771 | } | 5798 | } |
5772 | 5799 | ||
5773 | /* tp->lock is held. */ | 5800 | /* tp->lock is held. */ |
5774 | static int tg3_reset_hw(struct tg3 *tp) | 5801 | static int tg3_reset_hw(struct tg3 *tp, int reset_phy) |
5775 | { | 5802 | { |
5776 | u32 val, rdmac_mode; | 5803 | u32 val, rdmac_mode; |
5777 | int i, err, limit; | 5804 | int i, err, limit; |
@@ -5786,7 +5813,7 @@ static int tg3_reset_hw(struct tg3 *tp) | |||
5786 | tg3_abort_hw(tp, 1); | 5813 | tg3_abort_hw(tp, 1); |
5787 | } | 5814 | } |
5788 | 5815 | ||
5789 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) | 5816 | if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy) |
5790 | tg3_phy_reset(tp); | 5817 | tg3_phy_reset(tp); |
5791 | 5818 | ||
5792 | err = tg3_chip_reset(tp); | 5819 | err = tg3_chip_reset(tp); |
@@ -6327,7 +6354,7 @@ static int tg3_reset_hw(struct tg3 *tp) | |||
6327 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); | 6354 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); |
6328 | } | 6355 | } |
6329 | 6356 | ||
6330 | err = tg3_setup_phy(tp, 1); | 6357 | err = tg3_setup_phy(tp, reset_phy); |
6331 | if (err) | 6358 | if (err) |
6332 | return err; | 6359 | return err; |
6333 | 6360 | ||
@@ -6400,7 +6427,7 @@ static int tg3_reset_hw(struct tg3 *tp) | |||
6400 | /* Called at device open time to get the chip ready for | 6427 | /* Called at device open time to get the chip ready for |
6401 | * packet processing. Invoked with tp->lock held. | 6428 | * packet processing. Invoked with tp->lock held. |
6402 | */ | 6429 | */ |
6403 | static int tg3_init_hw(struct tg3 *tp) | 6430 | static int tg3_init_hw(struct tg3 *tp, int reset_phy) |
6404 | { | 6431 | { |
6405 | int err; | 6432 | int err; |
6406 | 6433 | ||
@@ -6413,7 +6440,7 @@ static int tg3_init_hw(struct tg3 *tp) | |||
6413 | 6440 | ||
6414 | tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); | 6441 | tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); |
6415 | 6442 | ||
6416 | err = tg3_reset_hw(tp); | 6443 | err = tg3_reset_hw(tp, reset_phy); |
6417 | 6444 | ||
6418 | out: | 6445 | out: |
6419 | return err; | 6446 | return err; |
@@ -6683,7 +6710,7 @@ static int tg3_test_msi(struct tg3 *tp) | |||
6683 | tg3_full_lock(tp, 1); | 6710 | tg3_full_lock(tp, 1); |
6684 | 6711 | ||
6685 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 6712 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
6686 | err = tg3_init_hw(tp); | 6713 | err = tg3_init_hw(tp, 1); |
6687 | 6714 | ||
6688 | tg3_full_unlock(tp); | 6715 | tg3_full_unlock(tp); |
6689 | 6716 | ||
@@ -6748,7 +6775,7 @@ static int tg3_open(struct net_device *dev) | |||
6748 | 6775 | ||
6749 | tg3_full_lock(tp, 0); | 6776 | tg3_full_lock(tp, 0); |
6750 | 6777 | ||
6751 | err = tg3_init_hw(tp); | 6778 | err = tg3_init_hw(tp, 1); |
6752 | if (err) { | 6779 | if (err) { |
6753 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 6780 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
6754 | tg3_free_rings(tp); | 6781 | tg3_free_rings(tp); |
@@ -7626,21 +7653,23 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
7626 | cmd->supported |= (SUPPORTED_1000baseT_Half | | 7653 | cmd->supported |= (SUPPORTED_1000baseT_Half | |
7627 | SUPPORTED_1000baseT_Full); | 7654 | SUPPORTED_1000baseT_Full); |
7628 | 7655 | ||
7629 | if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) | 7656 | if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { |
7630 | cmd->supported |= (SUPPORTED_100baseT_Half | | 7657 | cmd->supported |= (SUPPORTED_100baseT_Half | |
7631 | SUPPORTED_100baseT_Full | | 7658 | SUPPORTED_100baseT_Full | |
7632 | SUPPORTED_10baseT_Half | | 7659 | SUPPORTED_10baseT_Half | |
7633 | SUPPORTED_10baseT_Full | | 7660 | SUPPORTED_10baseT_Full | |
7634 | SUPPORTED_MII); | 7661 | SUPPORTED_MII); |
7635 | else | 7662 | cmd->port = PORT_TP; |
7663 | } else { | ||
7636 | cmd->supported |= SUPPORTED_FIBRE; | 7664 | cmd->supported |= SUPPORTED_FIBRE; |
7665 | cmd->port = PORT_FIBRE; | ||
7666 | } | ||
7637 | 7667 | ||
7638 | cmd->advertising = tp->link_config.advertising; | 7668 | cmd->advertising = tp->link_config.advertising; |
7639 | if (netif_running(dev)) { | 7669 | if (netif_running(dev)) { |
7640 | cmd->speed = tp->link_config.active_speed; | 7670 | cmd->speed = tp->link_config.active_speed; |
7641 | cmd->duplex = tp->link_config.active_duplex; | 7671 | cmd->duplex = tp->link_config.active_duplex; |
7642 | } | 7672 | } |
7643 | cmd->port = 0; | ||
7644 | cmd->phy_address = PHY_ADDR; | 7673 | cmd->phy_address = PHY_ADDR; |
7645 | cmd->transceiver = 0; | 7674 | cmd->transceiver = 0; |
7646 | cmd->autoneg = tp->link_config.autoneg; | 7675 | cmd->autoneg = tp->link_config.autoneg; |
@@ -7839,7 +7868,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e | |||
7839 | 7868 | ||
7840 | if (netif_running(dev)) { | 7869 | if (netif_running(dev)) { |
7841 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 7870 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
7842 | tg3_init_hw(tp); | 7871 | tg3_init_hw(tp, 1); |
7843 | tg3_netif_start(tp); | 7872 | tg3_netif_start(tp); |
7844 | } | 7873 | } |
7845 | 7874 | ||
@@ -7884,7 +7913,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
7884 | 7913 | ||
7885 | if (netif_running(dev)) { | 7914 | if (netif_running(dev)) { |
7886 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 7915 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
7887 | tg3_init_hw(tp); | 7916 | tg3_init_hw(tp, 1); |
7888 | tg3_netif_start(tp); | 7917 | tg3_netif_start(tp); |
7889 | } | 7918 | } |
7890 | 7919 | ||
@@ -8427,6 +8456,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
8427 | 8456 | ||
8428 | tx_len = 1514; | 8457 | tx_len = 1514; |
8429 | skb = dev_alloc_skb(tx_len); | 8458 | skb = dev_alloc_skb(tx_len); |
8459 | if (!skb) | ||
8460 | return -ENOMEM; | ||
8461 | |||
8430 | tx_data = skb_put(skb, tx_len); | 8462 | tx_data = skb_put(skb, tx_len); |
8431 | memcpy(tx_data, tp->dev->dev_addr, 6); | 8463 | memcpy(tx_data, tp->dev->dev_addr, 6); |
8432 | memset(tx_data + 6, 0x0, 8); | 8464 | memset(tx_data + 6, 0x0, 8); |
@@ -8522,7 +8554,7 @@ static int tg3_test_loopback(struct tg3 *tp) | |||
8522 | if (!netif_running(tp->dev)) | 8554 | if (!netif_running(tp->dev)) |
8523 | return TG3_LOOPBACK_FAILED; | 8555 | return TG3_LOOPBACK_FAILED; |
8524 | 8556 | ||
8525 | tg3_reset_hw(tp); | 8557 | tg3_reset_hw(tp, 1); |
8526 | 8558 | ||
8527 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) | 8559 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) |
8528 | err |= TG3_MAC_LOOPBACK_FAILED; | 8560 | err |= TG3_MAC_LOOPBACK_FAILED; |
@@ -8596,7 +8628,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
8596 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8628 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8597 | if (netif_running(dev)) { | 8629 | if (netif_running(dev)) { |
8598 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 8630 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
8599 | tg3_init_hw(tp); | 8631 | tg3_init_hw(tp, 1); |
8600 | tg3_netif_start(tp); | 8632 | tg3_netif_start(tp); |
8601 | } | 8633 | } |
8602 | 8634 | ||
@@ -9377,7 +9409,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, | |||
9377 | 9409 | ||
9378 | if ((page_off == 0) || (i == 0)) | 9410 | if ((page_off == 0) || (i == 0)) |
9379 | nvram_cmd |= NVRAM_CMD_FIRST; | 9411 | nvram_cmd |= NVRAM_CMD_FIRST; |
9380 | else if (page_off == (tp->nvram_pagesize - 4)) | 9412 | if (page_off == (tp->nvram_pagesize - 4)) |
9381 | nvram_cmd |= NVRAM_CMD_LAST; | 9413 | nvram_cmd |= NVRAM_CMD_LAST; |
9382 | 9414 | ||
9383 | if (i == (len - 4)) | 9415 | if (i == (len - 4)) |
@@ -10353,10 +10385,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
10353 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) | 10385 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) |
10354 | tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; | 10386 | tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; |
10355 | 10387 | ||
10356 | if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 10388 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
10357 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && | 10389 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
10358 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)) | 10390 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) |
10359 | tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; | 10391 | tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; |
10392 | else | ||
10393 | tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; | ||
10394 | } | ||
10360 | 10395 | ||
10361 | tp->coalesce_mode = 0; | 10396 | tp->coalesce_mode = 0; |
10362 | if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && | 10397 | if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && |
@@ -11569,7 +11604,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | |||
11569 | tg3_full_lock(tp, 0); | 11604 | tg3_full_lock(tp, 0); |
11570 | 11605 | ||
11571 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 11606 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
11572 | tg3_init_hw(tp); | 11607 | tg3_init_hw(tp, 1); |
11573 | 11608 | ||
11574 | tp->timer.expires = jiffies + tp->timer_offset; | 11609 | tp->timer.expires = jiffies + tp->timer_offset; |
11575 | add_timer(&tp->timer); | 11610 | add_timer(&tp->timer); |
@@ -11603,7 +11638,7 @@ static int tg3_resume(struct pci_dev *pdev) | |||
11603 | tg3_full_lock(tp, 0); | 11638 | tg3_full_lock(tp, 0); |
11604 | 11639 | ||
11605 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 11640 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
11606 | tg3_init_hw(tp); | 11641 | tg3_init_hw(tp, 1); |
11607 | 11642 | ||
11608 | tp->timer.expires = jiffies + tp->timer_offset; | 11643 | tp->timer.expires = jiffies + tp->timer_offset; |
11609 | add_timer(&tp->timer); | 11644 | add_timer(&tp->timer); |
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 8c8b987d1250..0e29b885d449 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
@@ -2215,6 +2215,7 @@ struct tg3 { | |||
2215 | #define TG3_FLG2_HW_TSO_2 0x08000000 | 2215 | #define TG3_FLG2_HW_TSO_2 0x08000000 |
2216 | #define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) | 2216 | #define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) |
2217 | #define TG3_FLG2_1SHOT_MSI 0x10000000 | 2217 | #define TG3_FLG2_1SHOT_MSI 0x10000000 |
2218 | #define TG3_FLG2_PHY_JITTER_BUG 0x20000000 | ||
2218 | 2219 | ||
2219 | u32 split_mode_max_reqs; | 2220 | u32 split_mode_max_reqs; |
2220 | #define SPLIT_MODE_5704_MAX_REQ 3 | 2221 | #define SPLIT_MODE_5704_MAX_REQ 3 |
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 6a23964c1317..a6dc53b4250d 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -129,6 +129,7 @@ | |||
129 | - Massive clean-up | 129 | - Massive clean-up |
130 | - Rewrite PHY, media handling (remove options, full_duplex, backoff) | 130 | - Rewrite PHY, media handling (remove options, full_duplex, backoff) |
131 | - Fix Tx engine race for good | 131 | - Fix Tx engine race for good |
132 | - Craig Brind: Zero padded aligned buffers for short packets. | ||
132 | 133 | ||
133 | */ | 134 | */ |
134 | 135 | ||
@@ -1326,7 +1327,12 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1326 | rp->stats.tx_dropped++; | 1327 | rp->stats.tx_dropped++; |
1327 | return 0; | 1328 | return 0; |
1328 | } | 1329 | } |
1330 | |||
1331 | /* Padding is not copied and so must be redone. */ | ||
1329 | skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); | 1332 | skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); |
1333 | if (skb->len < ETH_ZLEN) | ||
1334 | memset(rp->tx_buf[entry] + skb->len, 0, | ||
1335 | ETH_ZLEN - skb->len); | ||
1330 | rp->tx_skbuff_dma[entry] = 0; | 1336 | rp->tx_skbuff_dma[entry] = 0; |
1331 | rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + | 1337 | rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + |
1332 | (rp->tx_buf[entry] - | 1338 | (rp->tx_buf[entry] - |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index 9a06e61df0a2..e2982a83ae42 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c | |||
@@ -939,9 +939,9 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm) | |||
939 | return 0; | 939 | return 0; |
940 | } | 940 | } |
941 | 941 | ||
942 | static void bcm43xx_geo_init(struct bcm43xx_private *bcm) | 942 | static int bcm43xx_geo_init(struct bcm43xx_private *bcm) |
943 | { | 943 | { |
944 | struct ieee80211_geo geo; | 944 | struct ieee80211_geo *geo; |
945 | struct ieee80211_channel *chan; | 945 | struct ieee80211_channel *chan; |
946 | int have_a = 0, have_bg = 0; | 946 | int have_a = 0, have_bg = 0; |
947 | int i; | 947 | int i; |
@@ -949,7 +949,10 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm) | |||
949 | struct bcm43xx_phyinfo *phy; | 949 | struct bcm43xx_phyinfo *phy; |
950 | const char *iso_country; | 950 | const char *iso_country; |
951 | 951 | ||
952 | memset(&geo, 0, sizeof(geo)); | 952 | geo = kzalloc(sizeof(*geo), GFP_KERNEL); |
953 | if (!geo) | ||
954 | return -ENOMEM; | ||
955 | |||
953 | for (i = 0; i < bcm->nr_80211_available; i++) { | 956 | for (i = 0; i < bcm->nr_80211_available; i++) { |
954 | phy = &(bcm->core_80211_ext[i].phy); | 957 | phy = &(bcm->core_80211_ext[i].phy); |
955 | switch (phy->type) { | 958 | switch (phy->type) { |
@@ -967,31 +970,36 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm) | |||
967 | iso_country = bcm43xx_locale_iso(bcm->sprom.locale); | 970 | iso_country = bcm43xx_locale_iso(bcm->sprom.locale); |
968 | 971 | ||
969 | if (have_a) { | 972 | if (have_a) { |
970 | for (i = 0, channel = 0; channel < 201; channel++) { | 973 | for (i = 0, channel = IEEE80211_52GHZ_MIN_CHANNEL; |
971 | chan = &geo.a[i++]; | 974 | channel <= IEEE80211_52GHZ_MAX_CHANNEL; channel++) { |
975 | chan = &geo->a[i++]; | ||
972 | chan->freq = bcm43xx_channel_to_freq_a(channel); | 976 | chan->freq = bcm43xx_channel_to_freq_a(channel); |
973 | chan->channel = channel; | 977 | chan->channel = channel; |
974 | } | 978 | } |
975 | geo.a_channels = i; | 979 | geo->a_channels = i; |
976 | } | 980 | } |
977 | if (have_bg) { | 981 | if (have_bg) { |
978 | for (i = 0, channel = 1; channel < 15; channel++) { | 982 | for (i = 0, channel = IEEE80211_24GHZ_MIN_CHANNEL; |
979 | chan = &geo.bg[i++]; | 983 | channel <= IEEE80211_24GHZ_MAX_CHANNEL; channel++) { |
984 | chan = &geo->bg[i++]; | ||
980 | chan->freq = bcm43xx_channel_to_freq_bg(channel); | 985 | chan->freq = bcm43xx_channel_to_freq_bg(channel); |
981 | chan->channel = channel; | 986 | chan->channel = channel; |
982 | } | 987 | } |
983 | geo.bg_channels = i; | 988 | geo->bg_channels = i; |
984 | } | 989 | } |
985 | memcpy(geo.name, iso_country, 2); | 990 | memcpy(geo->name, iso_country, 2); |
986 | if (0 /*TODO: Outdoor use only */) | 991 | if (0 /*TODO: Outdoor use only */) |
987 | geo.name[2] = 'O'; | 992 | geo->name[2] = 'O'; |
988 | else if (0 /*TODO: Indoor use only */) | 993 | else if (0 /*TODO: Indoor use only */) |
989 | geo.name[2] = 'I'; | 994 | geo->name[2] = 'I'; |
990 | else | 995 | else |
991 | geo.name[2] = ' '; | 996 | geo->name[2] = ' '; |
992 | geo.name[3] = '\0'; | 997 | geo->name[3] = '\0'; |
998 | |||
999 | ieee80211_set_geo(bcm->ieee, geo); | ||
1000 | kfree(geo); | ||
993 | 1001 | ||
994 | ieee80211_set_geo(bcm->ieee, &geo); | 1002 | return 0; |
995 | } | 1003 | } |
996 | 1004 | ||
997 | /* DummyTransmission function, as documented on | 1005 | /* DummyTransmission function, as documented on |
@@ -3479,16 +3487,17 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm) | |||
3479 | goto err_80211_unwind; | 3487 | goto err_80211_unwind; |
3480 | bcm43xx_wireless_core_disable(bcm); | 3488 | bcm43xx_wireless_core_disable(bcm); |
3481 | } | 3489 | } |
3490 | err = bcm43xx_geo_init(bcm); | ||
3491 | if (err) | ||
3492 | goto err_80211_unwind; | ||
3482 | bcm43xx_pctl_set_crystal(bcm, 0); | 3493 | bcm43xx_pctl_set_crystal(bcm, 0); |
3483 | 3494 | ||
3484 | /* Set the MAC address in the networking subsystem */ | 3495 | /* Set the MAC address in the networking subsystem */ |
3485 | if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A) | 3496 | if (is_valid_ether_addr(bcm->sprom.et1macaddr)) |
3486 | memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6); | 3497 | memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6); |
3487 | else | 3498 | else |
3488 | memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6); | 3499 | memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6); |
3489 | 3500 | ||
3490 | bcm43xx_geo_init(bcm); | ||
3491 | |||
3492 | snprintf(bcm->nick, IW_ESSID_MAX_SIZE, | 3501 | snprintf(bcm->nick, IW_ESSID_MAX_SIZE, |
3493 | "Broadcom %04X", bcm->chip_id); | 3502 | "Broadcom %04X", bcm->chip_id); |
3494 | 3503 | ||
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h index eca79a38594a..30a202b258b5 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h | |||
@@ -118,12 +118,14 @@ int bcm43xx_channel_to_freq(struct bcm43xx_private *bcm, | |||
118 | static inline | 118 | static inline |
119 | int bcm43xx_is_valid_channel_a(u8 channel) | 119 | int bcm43xx_is_valid_channel_a(u8 channel) |
120 | { | 120 | { |
121 | return (channel <= 200); | 121 | return (channel >= IEEE80211_52GHZ_MIN_CHANNEL |
122 | && channel <= IEEE80211_52GHZ_MAX_CHANNEL); | ||
122 | } | 123 | } |
123 | static inline | 124 | static inline |
124 | int bcm43xx_is_valid_channel_bg(u8 channel) | 125 | int bcm43xx_is_valid_channel_bg(u8 channel) |
125 | { | 126 | { |
126 | return (channel >= 1 && channel <= 14); | 127 | return (channel >= IEEE80211_24GHZ_MIN_CHANNEL |
128 | && channel <= IEEE80211_24GHZ_MAX_CHANNEL); | ||
127 | } | 129 | } |
128 | static inline | 130 | static inline |
129 | int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm, | 131 | int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm, |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c index 33137165727f..b0abac515530 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c | |||
@@ -1287,7 +1287,7 @@ static void bcm43xx_phy_initg(struct bcm43xx_private *bcm) | |||
1287 | if (radio->revision == 8) | 1287 | if (radio->revision == 8) |
1288 | bcm43xx_phy_write(bcm, 0x0805, 0x3230); | 1288 | bcm43xx_phy_write(bcm, 0x0805, 0x3230); |
1289 | bcm43xx_phy_init_pctl(bcm); | 1289 | bcm43xx_phy_init_pctl(bcm); |
1290 | if (bcm->chip_id == 0x4306 && bcm->chip_package != 2) { | 1290 | if (bcm->chip_id == 0x4306 && bcm->chip_package == 2) { |
1291 | bcm43xx_phy_write(bcm, 0x0429, | 1291 | bcm43xx_phy_write(bcm, 0x0429, |
1292 | bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF); | 1292 | bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF); |
1293 | bcm43xx_phy_write(bcm, 0x04C3, | 1293 | bcm43xx_phy_write(bcm, 0x04C3, |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c index 3edbb481a0a0..b45063974ae9 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c | |||
@@ -182,8 +182,11 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev, | |||
182 | mode = BCM43xx_INITIAL_IWMODE; | 182 | mode = BCM43xx_INITIAL_IWMODE; |
183 | 183 | ||
184 | bcm43xx_lock_mmio(bcm, flags); | 184 | bcm43xx_lock_mmio(bcm, flags); |
185 | if (bcm->ieee->iw_mode != mode) | 185 | if (bcm->initialized) { |
186 | bcm43xx_set_iwmode(bcm, mode); | 186 | if (bcm->ieee->iw_mode != mode) |
187 | bcm43xx_set_iwmode(bcm, mode); | ||
188 | } else | ||
189 | bcm->ieee->iw_mode = mode; | ||
187 | bcm43xx_unlock_mmio(bcm, flags); | 190 | bcm43xx_unlock_mmio(bcm, flags); |
188 | 191 | ||
189 | return 0; | 192 | return 0; |