aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/au1000_eth.c18
-rw-r--r--drivers/net/b44.c28
-rw-r--r--drivers/net/bnx2.c20
-rw-r--r--drivers/net/dl2k.c13
-rw-r--r--drivers/net/e1000/e1000_ethtool.c5
-rw-r--r--drivers/net/e1000/e1000_main.c19
-rw-r--r--drivers/net/forcedeth.c473
-rw-r--r--drivers/net/gianfar.c56
-rw-r--r--drivers/net/gianfar.h67
-rw-r--r--drivers/net/gianfar_ethtool.c20
-rw-r--r--drivers/net/gianfar_sysfs.c24
-rw-r--r--drivers/net/hamradio/dmascc.c1
-rw-r--r--drivers/net/hamradio/scc.c1
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/irda/Kconfig20
-rw-r--r--drivers/net/irda/Makefile2
-rw-r--r--drivers/net/irda/irda-usb.c8
-rw-r--r--drivers/net/irda/sir-dev.h13
-rw-r--r--drivers/net/irda/sir_dev.c315
-rw-r--r--drivers/net/irda/sir_kthread.c508
-rw-r--r--drivers/net/irda/smsc-ircc2.c330
-rw-r--r--drivers/net/ixp2000/enp2611.c13
-rw-r--r--drivers/net/ixp2000/pm3386.c30
-rw-r--r--drivers/net/ixp2000/pm3386.h1
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/ne.c33
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c13
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/pppoe.c3
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/skge.c8
-rw-r--r--drivers/net/sky2.c282
-rw-r--r--drivers/net/sky2.h7
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/spider_net.h2
-rw-r--r--drivers/net/sungem_phy.c56
-rw-r--r--drivers/net/sungem_phy.h1
-rw-r--r--drivers/net/tg3.c97
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/tulip/winbond-840.c4
-rw-r--r--drivers/net/via-rhine.c40
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/airo.c46
-rw-r--r--drivers/net/wireless/arlan-main.c4
-rw-r--r--drivers/net/wireless/atmel.c11
-rw-r--r--drivers/net/wireless/bcm43xx/Kconfig3
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h17
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c8
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c44
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.h8
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c53
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.c3
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_pio.c92
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_pio.h16
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_power.c115
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_power.h9
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c115
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_sysfs.h16
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c15
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/orinoco.c6
-rw-r--r--drivers/net/wireless/wavelan.c2
67 files changed, 1829 insertions, 1325 deletions
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 1363083b4d83..14dbad14afb6 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -52,6 +52,7 @@
52#include <linux/mii.h> 52#include <linux/mii.h>
53#include <linux/skbuff.h> 53#include <linux/skbuff.h>
54#include <linux/delay.h> 54#include <linux/delay.h>
55#include <linux/crc32.h>
55#include <asm/mipsregs.h> 56#include <asm/mipsregs.h>
56#include <asm/irq.h> 57#include <asm/irq.h>
57#include <asm/io.h> 58#include <asm/io.h>
@@ -2070,23 +2071,6 @@ static void au1000_tx_timeout(struct net_device *dev)
2070 netif_wake_queue(dev); 2071 netif_wake_queue(dev);
2071} 2072}
2072 2073
2073
2074static unsigned const ethernet_polynomial = 0x04c11db7U;
2075static inline u32 ether_crc(int length, unsigned char *data)
2076{
2077 int crc = -1;
2078
2079 while(--length >= 0) {
2080 unsigned char current_octet = *data++;
2081 int bit;
2082 for (bit = 0; bit < 8; bit++, current_octet >>= 1)
2083 crc = (crc << 1) ^
2084 ((crc < 0) ^ (current_octet & 1) ?
2085 ethernet_polynomial : 0);
2086 }
2087 return crc;
2088}
2089
2090static void set_rx_mode(struct net_device *dev) 2074static void set_rx_mode(struct net_device *dev)
2091{ 2075{
2092 struct au1000_private *aup = (struct au1000_private *) dev->priv; 2076 struct au1000_private *aup = (struct au1000_private *) dev->priv;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 3d306681919e..d8233e0b7899 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -650,9 +650,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
650 650
651 /* Hardware bug work-around, the chip is unable to do PCI DMA 651 /* Hardware bug work-around, the chip is unable to do PCI DMA
652 to/from anything above 1GB :-( */ 652 to/from anything above 1GB :-( */
653 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { 653 if (dma_mapping_error(mapping) ||
654 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
654 /* Sigh... */ 655 /* Sigh... */
655 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 656 if (!dma_mapping_error(mapping))
657 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
656 dev_kfree_skb_any(skb); 658 dev_kfree_skb_any(skb);
657 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA); 659 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
658 if (skb == NULL) 660 if (skb == NULL)
@@ -660,8 +662,10 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
660 mapping = pci_map_single(bp->pdev, skb->data, 662 mapping = pci_map_single(bp->pdev, skb->data,
661 RX_PKT_BUF_SZ, 663 RX_PKT_BUF_SZ,
662 PCI_DMA_FROMDEVICE); 664 PCI_DMA_FROMDEVICE);
663 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { 665 if (dma_mapping_error(mapping) ||
664 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 666 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
667 if (!dma_mapping_error(mapping))
668 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
665 dev_kfree_skb_any(skb); 669 dev_kfree_skb_any(skb);
666 return -ENOMEM; 670 return -ENOMEM;
667 } 671 }
@@ -967,9 +971,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
967 } 971 }
968 972
969 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); 973 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
970 if (mapping + len > B44_DMA_MASK) { 974 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
971 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 975 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
972 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); 976 if (!dma_mapping_error(mapping))
977 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
973 978
974 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, 979 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
975 GFP_ATOMIC|GFP_DMA); 980 GFP_ATOMIC|GFP_DMA);
@@ -978,8 +983,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
978 983
979 mapping = pci_map_single(bp->pdev, bounce_skb->data, 984 mapping = pci_map_single(bp->pdev, bounce_skb->data,
980 len, PCI_DMA_TODEVICE); 985 len, PCI_DMA_TODEVICE);
981 if (mapping + len > B44_DMA_MASK) { 986 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
982 pci_unmap_single(bp->pdev, mapping, 987 if (!dma_mapping_error(mapping))
988 pci_unmap_single(bp->pdev, mapping,
983 len, PCI_DMA_TODEVICE); 989 len, PCI_DMA_TODEVICE);
984 dev_kfree_skb_any(bounce_skb); 990 dev_kfree_skb_any(bounce_skb);
985 goto err_out; 991 goto err_out;
@@ -1203,7 +1209,8 @@ static int b44_alloc_consistent(struct b44 *bp)
1203 DMA_TABLE_BYTES, 1209 DMA_TABLE_BYTES,
1204 DMA_BIDIRECTIONAL); 1210 DMA_BIDIRECTIONAL);
1205 1211
1206 if (rx_ring_dma + size > B44_DMA_MASK) { 1212 if (dma_mapping_error(rx_ring_dma) ||
1213 rx_ring_dma + size > B44_DMA_MASK) {
1207 kfree(rx_ring); 1214 kfree(rx_ring);
1208 goto out_err; 1215 goto out_err;
1209 } 1216 }
@@ -1229,7 +1236,8 @@ static int b44_alloc_consistent(struct b44 *bp)
1229 DMA_TABLE_BYTES, 1236 DMA_TABLE_BYTES,
1230 DMA_TO_DEVICE); 1237 DMA_TO_DEVICE);
1231 1238
1232 if (tx_ring_dma + size > B44_DMA_MASK) { 1239 if (dma_mapping_error(tx_ring_dma) ||
1240 tx_ring_dma + size > B44_DMA_MASK) {
1233 kfree(tx_ring); 1241 kfree(tx_ring);
1234 goto out_err; 1242 goto out_err;
1235 } 1243 }
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 5ca99e26660a..54161aef3cac 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -55,8 +55,8 @@
55 55
56#define DRV_MODULE_NAME "bnx2" 56#define DRV_MODULE_NAME "bnx2"
57#define PFX DRV_MODULE_NAME ": " 57#define PFX DRV_MODULE_NAME ": "
58#define DRV_MODULE_VERSION "1.4.39" 58#define DRV_MODULE_VERSION "1.4.40"
59#define DRV_MODULE_RELDATE "March 22, 2006" 59#define DRV_MODULE_RELDATE "May 22, 2006"
60 60
61#define RUN_AT(x) (jiffies + (x)) 61#define RUN_AT(x) (jiffies + (x))
62 62
@@ -2945,7 +2945,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2945 int buf_size) 2945 int buf_size)
2946{ 2946{
2947 u32 written, offset32, len32; 2947 u32 written, offset32, len32;
2948 u8 *buf, start[4], end[4]; 2948 u8 *buf, start[4], end[4], *flash_buffer = NULL;
2949 int rc = 0; 2949 int rc = 0;
2950 int align_start, align_end; 2950 int align_start, align_end;
2951 2951
@@ -2985,12 +2985,19 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2985 memcpy(buf + align_start, data_buf, buf_size); 2985 memcpy(buf + align_start, data_buf, buf_size);
2986 } 2986 }
2987 2987
2988 if (bp->flash_info->buffered == 0) {
2989 flash_buffer = kmalloc(264, GFP_KERNEL);
2990 if (flash_buffer == NULL) {
2991 rc = -ENOMEM;
2992 goto nvram_write_end;
2993 }
2994 }
2995
2988 written = 0; 2996 written = 0;
2989 while ((written < len32) && (rc == 0)) { 2997 while ((written < len32) && (rc == 0)) {
2990 u32 page_start, page_end, data_start, data_end; 2998 u32 page_start, page_end, data_start, data_end;
2991 u32 addr, cmd_flags; 2999 u32 addr, cmd_flags;
2992 int i; 3000 int i;
2993 u8 flash_buffer[264];
2994 3001
2995 /* Find the page_start addr */ 3002 /* Find the page_start addr */
2996 page_start = offset32 + written; 3003 page_start = offset32 + written;
@@ -3061,7 +3068,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3061 } 3068 }
3062 3069
3063 /* Loop to write the new data from data_start to data_end */ 3070 /* Loop to write the new data from data_start to data_end */
3064 for (addr = data_start; addr < data_end; addr += 4, i++) { 3071 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3065 if ((addr == page_end - 4) || 3072 if ((addr == page_end - 4) ||
3066 ((bp->flash_info->buffered) && 3073 ((bp->flash_info->buffered) &&
3067 (addr == data_end - 4))) { 3074 (addr == data_end - 4))) {
@@ -3109,6 +3116,9 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3109 } 3116 }
3110 3117
3111nvram_write_end: 3118nvram_write_end:
3119 if (bp->flash_info->buffered == 0)
3120 kfree(flash_buffer);
3121
3112 if (align_start || align_end) 3122 if (align_start || align_end)
3113 kfree(buf); 3123 kfree(buf);
3114 return rc; 3124 return rc;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 1f3627470c95..038447fb5c5e 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -53,6 +53,7 @@
53#define DRV_VERSION "v1.17b" 53#define DRV_VERSION "v1.17b"
54#define DRV_RELDATE "2006/03/10" 54#define DRV_RELDATE "2006/03/10"
55#include "dl2k.h" 55#include "dl2k.h"
56#include <linux/dma-mapping.h>
56 57
57static char version[] __devinitdata = 58static char version[] __devinitdata =
58 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 59 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
@@ -765,7 +766,7 @@ rio_free_tx (struct net_device *dev, int irq)
765 break; 766 break;
766 skb = np->tx_skbuff[entry]; 767 skb = np->tx_skbuff[entry];
767 pci_unmap_single (np->pdev, 768 pci_unmap_single (np->pdev,
768 np->tx_ring[entry].fraginfo & 0xffffffffffff, 769 np->tx_ring[entry].fraginfo & DMA_48BIT_MASK,
769 skb->len, PCI_DMA_TODEVICE); 770 skb->len, PCI_DMA_TODEVICE);
770 if (irq) 771 if (irq)
771 dev_kfree_skb_irq (skb); 772 dev_kfree_skb_irq (skb);
@@ -893,7 +894,7 @@ receive_packet (struct net_device *dev)
893 /* Small skbuffs for short packets */ 894 /* Small skbuffs for short packets */
894 if (pkt_len > copy_thresh) { 895 if (pkt_len > copy_thresh) {
895 pci_unmap_single (np->pdev, 896 pci_unmap_single (np->pdev,
896 desc->fraginfo & 0xffffffffffff, 897 desc->fraginfo & DMA_48BIT_MASK,
897 np->rx_buf_sz, 898 np->rx_buf_sz,
898 PCI_DMA_FROMDEVICE); 899 PCI_DMA_FROMDEVICE);
899 skb_put (skb = np->rx_skbuff[entry], pkt_len); 900 skb_put (skb = np->rx_skbuff[entry], pkt_len);
@@ -901,7 +902,7 @@ receive_packet (struct net_device *dev)
901 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { 902 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
902 pci_dma_sync_single_for_cpu(np->pdev, 903 pci_dma_sync_single_for_cpu(np->pdev,
903 desc->fraginfo & 904 desc->fraginfo &
904 0xffffffffffff, 905 DMA_48BIT_MASK,
905 np->rx_buf_sz, 906 np->rx_buf_sz,
906 PCI_DMA_FROMDEVICE); 907 PCI_DMA_FROMDEVICE);
907 skb->dev = dev; 908 skb->dev = dev;
@@ -913,7 +914,7 @@ receive_packet (struct net_device *dev)
913 skb_put (skb, pkt_len); 914 skb_put (skb, pkt_len);
914 pci_dma_sync_single_for_device(np->pdev, 915 pci_dma_sync_single_for_device(np->pdev,
915 desc->fraginfo & 916 desc->fraginfo &
916 0xffffffffffff, 917 DMA_48BIT_MASK,
917 np->rx_buf_sz, 918 np->rx_buf_sz,
918 PCI_DMA_FROMDEVICE); 919 PCI_DMA_FROMDEVICE);
919 } 920 }
@@ -1800,7 +1801,7 @@ rio_close (struct net_device *dev)
1800 skb = np->rx_skbuff[i]; 1801 skb = np->rx_skbuff[i];
1801 if (skb) { 1802 if (skb) {
1802 pci_unmap_single(np->pdev, 1803 pci_unmap_single(np->pdev,
1803 np->rx_ring[i].fraginfo & 0xffffffffffff, 1804 np->rx_ring[i].fraginfo & DMA_48BIT_MASK,
1804 skb->len, PCI_DMA_FROMDEVICE); 1805 skb->len, PCI_DMA_FROMDEVICE);
1805 dev_kfree_skb (skb); 1806 dev_kfree_skb (skb);
1806 np->rx_skbuff[i] = NULL; 1807 np->rx_skbuff[i] = NULL;
@@ -1810,7 +1811,7 @@ rio_close (struct net_device *dev)
1810 skb = np->tx_skbuff[i]; 1811 skb = np->tx_skbuff[i];
1811 if (skb) { 1812 if (skb) {
1812 pci_unmap_single(np->pdev, 1813 pci_unmap_single(np->pdev,
1813 np->tx_ring[i].fraginfo & 0xffffffffffff, 1814 np->tx_ring[i].fraginfo & DMA_48BIT_MASK,
1814 skb->len, PCI_DMA_TODEVICE); 1815 skb->len, PCI_DMA_TODEVICE);
1815 dev_kfree_skb (skb); 1816 dev_kfree_skb (skb);
1816 np->tx_skbuff[i] = NULL; 1817 np->tx_skbuff[i] = NULL;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index ecccca35c6f4..d1c705b412c2 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -870,13 +870,16 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
870 *data = 0; 870 *data = 0;
871 871
872 /* Hook up test interrupt handler just for this test */ 872 /* Hook up test interrupt handler just for this test */
873 if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 873 if (!request_irq(irq, &e1000_test_intr, SA_PROBEIRQ, netdev->name,
874 netdev)) {
874 shared_int = FALSE; 875 shared_int = FALSE;
875 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ, 876 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
876 netdev->name, netdev)){ 877 netdev->name, netdev)){
877 *data = 1; 878 *data = 1;
878 return -1; 879 return -1;
879 } 880 }
881 DPRINTK(PROBE,INFO, "testing %s interrupt\n",
882 (shared_int ? "shared" : "unshared"));
880 883
881 /* Disable all the interrupts */ 884 /* Disable all the interrupts */
882 E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF); 885 E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index add8dc4aa7b0..97e71a4fe8eb 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -220,6 +220,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter);
220static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 220static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
221static int e1000_resume(struct pci_dev *pdev); 221static int e1000_resume(struct pci_dev *pdev);
222#endif 222#endif
223static void e1000_shutdown(struct pci_dev *pdev);
223 224
224#ifdef CONFIG_NET_POLL_CONTROLLER 225#ifdef CONFIG_NET_POLL_CONTROLLER
225/* for netdump / net console */ 226/* for netdump / net console */
@@ -235,8 +236,9 @@ static struct pci_driver e1000_driver = {
235 /* Power Managment Hooks */ 236 /* Power Managment Hooks */
236#ifdef CONFIG_PM 237#ifdef CONFIG_PM
237 .suspend = e1000_suspend, 238 .suspend = e1000_suspend,
238 .resume = e1000_resume 239 .resume = e1000_resume,
239#endif 240#endif
241 .shutdown = e1000_shutdown
240}; 242};
241 243
242MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 244MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -3517,7 +3519,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3517 buffer_info = &rx_ring->buffer_info[i]; 3519 buffer_info = &rx_ring->buffer_info[i];
3518 3520
3519 while (rx_desc->status & E1000_RXD_STAT_DD) { 3521 while (rx_desc->status & E1000_RXD_STAT_DD) {
3520 struct sk_buff *skb, *next_skb; 3522 struct sk_buff *skb;
3521 u8 status; 3523 u8 status;
3522#ifdef CONFIG_E1000_NAPI 3524#ifdef CONFIG_E1000_NAPI
3523 if (*work_done >= work_to_do) 3525 if (*work_done >= work_to_do)
@@ -3535,8 +3537,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3535 prefetch(next_rxd); 3537 prefetch(next_rxd);
3536 3538
3537 next_buffer = &rx_ring->buffer_info[i]; 3539 next_buffer = &rx_ring->buffer_info[i];
3538 next_skb = next_buffer->skb;
3539 prefetch(next_skb->data - NET_IP_ALIGN);
3540 3540
3541 cleaned = TRUE; 3541 cleaned = TRUE;
3542 cleaned_count++; 3542 cleaned_count++;
@@ -3666,7 +3666,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3666 struct e1000_buffer *buffer_info, *next_buffer; 3666 struct e1000_buffer *buffer_info, *next_buffer;
3667 struct e1000_ps_page *ps_page; 3667 struct e1000_ps_page *ps_page;
3668 struct e1000_ps_page_dma *ps_page_dma; 3668 struct e1000_ps_page_dma *ps_page_dma;
3669 struct sk_buff *skb, *next_skb; 3669 struct sk_buff *skb;
3670 unsigned int i, j; 3670 unsigned int i, j;
3671 uint32_t length, staterr; 3671 uint32_t length, staterr;
3672 int cleaned_count = 0; 3672 int cleaned_count = 0;
@@ -3695,8 +3695,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3695 prefetch(next_rxd); 3695 prefetch(next_rxd);
3696 3696
3697 next_buffer = &rx_ring->buffer_info[i]; 3697 next_buffer = &rx_ring->buffer_info[i];
3698 next_skb = next_buffer->skb;
3699 prefetch(next_skb->data - NET_IP_ALIGN);
3700 3698
3701 cleaned = TRUE; 3699 cleaned = TRUE;
3702 cleaned_count++; 3700 cleaned_count++;
@@ -3768,6 +3766,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3768 ps_page->ps_page[j] = NULL; 3766 ps_page->ps_page[j] = NULL;
3769 skb->len += length; 3767 skb->len += length;
3770 skb->data_len += length; 3768 skb->data_len += length;
3769 skb->truesize += length;
3771 } 3770 }
3772 3771
3773copydone: 3772copydone:
@@ -4610,6 +4609,12 @@ e1000_resume(struct pci_dev *pdev)
4610 return 0; 4609 return 0;
4611} 4610}
4612#endif 4611#endif
4612
4613static void e1000_shutdown(struct pci_dev *pdev)
4614{
4615 e1000_suspend(pdev, PMSG_SUSPEND);
4616}
4617
4613#ifdef CONFIG_NET_POLL_CONTROLLER 4618#ifdef CONFIG_NET_POLL_CONTROLLER
4614/* 4619/*
4615 * Polling 'interrupt' - used by things like netconsole to send skbs 4620 * Polling 'interrupt' - used by things like netconsole to send skbs
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 7627a75f4f7c..feb5b223cd60 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -105,6 +105,8 @@
105 * 0.50: 20 Jan 2006: Add 8021pq tagging support. 105 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. 106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
107 * 0.52: 20 Jan 2006: Add MSI/MSIX support. 107 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
108 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
108 * 110 *
109 * Known bugs: 111 * Known bugs:
110 * We suspect that on some hardware no TX done interrupts are generated. 112 * We suspect that on some hardware no TX done interrupts are generated.
@@ -116,7 +118,7 @@
116 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 118 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
117 * superfluous timer interrupts from the nic. 119 * superfluous timer interrupts from the nic.
118 */ 120 */
119#define FORCEDETH_VERSION "0.52" 121#define FORCEDETH_VERSION "0.54"
120#define DRV_NAME "forcedeth" 122#define DRV_NAME "forcedeth"
121 123
122#include <linux/module.h> 124#include <linux/module.h>
@@ -160,6 +162,7 @@
160#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ 162#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
161#define DEV_HAS_MSI 0x0040 /* device supports MSI */ 163#define DEV_HAS_MSI 0x0040 /* device supports MSI */
162#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 164#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
165#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
163 166
164enum { 167enum {
165 NvRegIrqStatus = 0x000, 168 NvRegIrqStatus = 0x000,
@@ -203,6 +206,8 @@ enum {
203#define NVREG_MISC1_HD 0x02 206#define NVREG_MISC1_HD 0x02
204#define NVREG_MISC1_FORCE 0x3b0f3c 207#define NVREG_MISC1_FORCE 0x3b0f3c
205 208
209 NvRegMacReset = 0x3c,
210#define NVREG_MAC_RESET_ASSERT 0x0F3
206 NvRegTransmitterControl = 0x084, 211 NvRegTransmitterControl = 0x084,
207#define NVREG_XMITCTL_START 0x01 212#define NVREG_XMITCTL_START 0x01
208 NvRegTransmitterStatus = 0x088, 213 NvRegTransmitterStatus = 0x088,
@@ -326,6 +331,10 @@ enum {
326 NvRegMSIXMap0 = 0x3e0, 331 NvRegMSIXMap0 = 0x3e0,
327 NvRegMSIXMap1 = 0x3e4, 332 NvRegMSIXMap1 = 0x3e4,
328 NvRegMSIXIrqStatus = 0x3f0, 333 NvRegMSIXIrqStatus = 0x3f0,
334
335 NvRegPowerState2 = 0x600,
336#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
337#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
329}; 338};
330 339
331/* Big endian: should work, but is untested */ 340/* Big endian: should work, but is untested */
@@ -414,7 +423,8 @@ typedef union _ring_type {
414#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 423#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
415 424
416/* Miscelaneous hardware related defines: */ 425/* Miscelaneous hardware related defines: */
417#define NV_PCI_REGSZ 0x270 426#define NV_PCI_REGSZ_VER1 0x270
427#define NV_PCI_REGSZ_VER2 0x604
418 428
419/* various timeout delays: all in usec */ 429/* various timeout delays: all in usec */
420#define NV_TXRX_RESET_DELAY 4 430#define NV_TXRX_RESET_DELAY 4
@@ -431,6 +441,7 @@ typedef union _ring_type {
431#define NV_MIIBUSY_DELAY 50 441#define NV_MIIBUSY_DELAY 50
432#define NV_MIIPHY_DELAY 10 442#define NV_MIIPHY_DELAY 10
433#define NV_MIIPHY_DELAYMAX 10000 443#define NV_MIIPHY_DELAYMAX 10000
444#define NV_MAC_RESET_DELAY 64
434 445
435#define NV_WAKEUPPATTERNS 5 446#define NV_WAKEUPPATTERNS 5
436#define NV_WAKEUPMASKENTRIES 4 447#define NV_WAKEUPMASKENTRIES 4
@@ -552,6 +563,8 @@ struct fe_priv {
552 u32 desc_ver; 563 u32 desc_ver;
553 u32 txrxctl_bits; 564 u32 txrxctl_bits;
554 u32 vlanctl_bits; 565 u32 vlanctl_bits;
566 u32 driver_data;
567 u32 register_size;
555 568
556 void __iomem *base; 569 void __iomem *base;
557 570
@@ -698,6 +711,72 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
698 } 711 }
699} 712}
700 713
714static int using_multi_irqs(struct net_device *dev)
715{
716 struct fe_priv *np = get_nvpriv(dev);
717
718 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
719 ((np->msi_flags & NV_MSI_X_ENABLED) &&
720 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
721 return 0;
722 else
723 return 1;
724}
725
726static void nv_enable_irq(struct net_device *dev)
727{
728 struct fe_priv *np = get_nvpriv(dev);
729
730 if (!using_multi_irqs(dev)) {
731 if (np->msi_flags & NV_MSI_X_ENABLED)
732 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
733 else
734 enable_irq(dev->irq);
735 } else {
736 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
737 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
738 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
739 }
740}
741
742static void nv_disable_irq(struct net_device *dev)
743{
744 struct fe_priv *np = get_nvpriv(dev);
745
746 if (!using_multi_irqs(dev)) {
747 if (np->msi_flags & NV_MSI_X_ENABLED)
748 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
749 else
750 disable_irq(dev->irq);
751 } else {
752 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
753 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
754 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
755 }
756}
757
758/* In MSIX mode, a write to irqmask behaves as XOR */
759static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
760{
761 u8 __iomem *base = get_hwbase(dev);
762
763 writel(mask, base + NvRegIrqMask);
764}
765
766static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
767{
768 struct fe_priv *np = get_nvpriv(dev);
769 u8 __iomem *base = get_hwbase(dev);
770
771 if (np->msi_flags & NV_MSI_X_ENABLED) {
772 writel(mask, base + NvRegIrqMask);
773 } else {
774 if (np->msi_flags & NV_MSI_ENABLED)
775 writel(0, base + NvRegMSIIrqMask);
776 writel(0, base + NvRegIrqMask);
777 }
778}
779
701#define MII_READ (-1) 780#define MII_READ (-1)
702/* mii_rw: read/write a register on the PHY. 781/* mii_rw: read/write a register on the PHY.
703 * 782 *
@@ -919,6 +998,24 @@ static void nv_txrx_reset(struct net_device *dev)
919 pci_push(base); 998 pci_push(base);
920} 999}
921 1000
1001static void nv_mac_reset(struct net_device *dev)
1002{
1003 struct fe_priv *np = netdev_priv(dev);
1004 u8 __iomem *base = get_hwbase(dev);
1005
1006 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1007 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1008 pci_push(base);
1009 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1010 pci_push(base);
1011 udelay(NV_MAC_RESET_DELAY);
1012 writel(0, base + NvRegMacReset);
1013 pci_push(base);
1014 udelay(NV_MAC_RESET_DELAY);
1015 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1016 pci_push(base);
1017}
1018
922/* 1019/*
923 * nv_get_stats: dev->get_stats function 1020 * nv_get_stats: dev->get_stats function
924 * Get latest stats value from the nic. 1021 * Get latest stats value from the nic.
@@ -989,24 +1086,25 @@ static void nv_do_rx_refill(unsigned long data)
989 struct net_device *dev = (struct net_device *) data; 1086 struct net_device *dev = (struct net_device *) data;
990 struct fe_priv *np = netdev_priv(dev); 1087 struct fe_priv *np = netdev_priv(dev);
991 1088
992 1089 if (!using_multi_irqs(dev)) {
993 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1090 if (np->msi_flags & NV_MSI_X_ENABLED)
994 ((np->msi_flags & NV_MSI_X_ENABLED) && 1091 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
995 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 1092 else
996 disable_irq(dev->irq); 1093 disable_irq(dev->irq);
997 } else { 1094 } else {
998 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1095 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
999 } 1096 }
1000 if (nv_alloc_rx(dev)) { 1097 if (nv_alloc_rx(dev)) {
1001 spin_lock(&np->lock); 1098 spin_lock_irq(&np->lock);
1002 if (!np->in_shutdown) 1099 if (!np->in_shutdown)
1003 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1100 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1004 spin_unlock(&np->lock); 1101 spin_unlock_irq(&np->lock);
1005 } 1102 }
1006 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1103 if (!using_multi_irqs(dev)) {
1007 ((np->msi_flags & NV_MSI_X_ENABLED) && 1104 if (np->msi_flags & NV_MSI_X_ENABLED)
1008 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 1105 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1009 enable_irq(dev->irq); 1106 else
1107 enable_irq(dev->irq);
1010 } else { 1108 } else {
1011 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1109 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1012 } 1110 }
@@ -1331,7 +1429,7 @@ static void nv_tx_timeout(struct net_device *dev)
1331 dev->name, (unsigned long)np->ring_addr, 1429 dev->name, (unsigned long)np->ring_addr,
1332 np->next_tx, np->nic_tx); 1430 np->next_tx, np->nic_tx);
1333 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 1431 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1334 for (i=0;i<0x400;i+= 32) { 1432 for (i=0;i<=np->register_size;i+= 32) {
1335 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 1433 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1336 i, 1434 i,
1337 readl(base + i + 0), readl(base + i + 4), 1435 readl(base + i + 0), readl(base + i + 4),
@@ -1638,15 +1736,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1638 * guessed, there is probably a simpler approach. 1736 * guessed, there is probably a simpler approach.
1639 * Changing the MTU is a rare event, it shouldn't matter. 1737 * Changing the MTU is a rare event, it shouldn't matter.
1640 */ 1738 */
1641 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1739 nv_disable_irq(dev);
1642 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1643 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1644 disable_irq(dev->irq);
1645 } else {
1646 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1647 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1648 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1649 }
1650 spin_lock_bh(&dev->xmit_lock); 1740 spin_lock_bh(&dev->xmit_lock);
1651 spin_lock(&np->lock); 1741 spin_lock(&np->lock);
1652 /* stop engines */ 1742 /* stop engines */
@@ -1679,15 +1769,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1679 nv_start_tx(dev); 1769 nv_start_tx(dev);
1680 spin_unlock(&np->lock); 1770 spin_unlock(&np->lock);
1681 spin_unlock_bh(&dev->xmit_lock); 1771 spin_unlock_bh(&dev->xmit_lock);
1682 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1772 nv_enable_irq(dev);
1683 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1684 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1685 enable_irq(dev->irq);
1686 } else {
1687 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1688 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1689 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1690 }
1691 } 1773 }
1692 return 0; 1774 return 0;
1693} 1775}
@@ -2078,16 +2160,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2078 if (!(events & np->irqmask)) 2160 if (!(events & np->irqmask))
2079 break; 2161 break;
2080 2162
2081 spin_lock(&np->lock); 2163 spin_lock_irq(&np->lock);
2082 nv_tx_done(dev); 2164 nv_tx_done(dev);
2083 spin_unlock(&np->lock); 2165 spin_unlock_irq(&np->lock);
2084 2166
2085 if (events & (NVREG_IRQ_TX_ERR)) { 2167 if (events & (NVREG_IRQ_TX_ERR)) {
2086 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2168 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2087 dev->name, events); 2169 dev->name, events);
2088 } 2170 }
2089 if (i > max_interrupt_work) { 2171 if (i > max_interrupt_work) {
2090 spin_lock(&np->lock); 2172 spin_lock_irq(&np->lock);
2091 /* disable interrupts on the nic */ 2173 /* disable interrupts on the nic */
2092 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 2174 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
2093 pci_push(base); 2175 pci_push(base);
@@ -2097,7 +2179,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2097 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2179 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2098 } 2180 }
2099 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 2181 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
2100 spin_unlock(&np->lock); 2182 spin_unlock_irq(&np->lock);
2101 break; 2183 break;
2102 } 2184 }
2103 2185
@@ -2127,14 +2209,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2127 2209
2128 nv_rx_process(dev); 2210 nv_rx_process(dev);
2129 if (nv_alloc_rx(dev)) { 2211 if (nv_alloc_rx(dev)) {
2130 spin_lock(&np->lock); 2212 spin_lock_irq(&np->lock);
2131 if (!np->in_shutdown) 2213 if (!np->in_shutdown)
2132 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2214 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2133 spin_unlock(&np->lock); 2215 spin_unlock_irq(&np->lock);
2134 } 2216 }
2135 2217
2136 if (i > max_interrupt_work) { 2218 if (i > max_interrupt_work) {
2137 spin_lock(&np->lock); 2219 spin_lock_irq(&np->lock);
2138 /* disable interrupts on the nic */ 2220 /* disable interrupts on the nic */
2139 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2221 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2140 pci_push(base); 2222 pci_push(base);
@@ -2144,7 +2226,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2144 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2226 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2145 } 2227 }
2146 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 2228 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
2147 spin_unlock(&np->lock); 2229 spin_unlock_irq(&np->lock);
2148 break; 2230 break;
2149 } 2231 }
2150 2232
@@ -2173,14 +2255,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2173 break; 2255 break;
2174 2256
2175 if (events & NVREG_IRQ_LINK) { 2257 if (events & NVREG_IRQ_LINK) {
2176 spin_lock(&np->lock); 2258 spin_lock_irq(&np->lock);
2177 nv_link_irq(dev); 2259 nv_link_irq(dev);
2178 spin_unlock(&np->lock); 2260 spin_unlock_irq(&np->lock);
2179 } 2261 }
2180 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2262 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2181 spin_lock(&np->lock); 2263 spin_lock_irq(&np->lock);
2182 nv_linkchange(dev); 2264 nv_linkchange(dev);
2183 spin_unlock(&np->lock); 2265 spin_unlock_irq(&np->lock);
2184 np->link_timeout = jiffies + LINK_TIMEOUT; 2266 np->link_timeout = jiffies + LINK_TIMEOUT;
2185 } 2267 }
2186 if (events & (NVREG_IRQ_UNKNOWN)) { 2268 if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2188,7 +2270,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2188 dev->name, events); 2270 dev->name, events);
2189 } 2271 }
2190 if (i > max_interrupt_work) { 2272 if (i > max_interrupt_work) {
2191 spin_lock(&np->lock); 2273 spin_lock_irq(&np->lock);
2192 /* disable interrupts on the nic */ 2274 /* disable interrupts on the nic */
2193 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 2275 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
2194 pci_push(base); 2276 pci_push(base);
@@ -2198,7 +2280,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2198 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2280 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2199 } 2281 }
2200 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 2282 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
2201 spin_unlock(&np->lock); 2283 spin_unlock_irq(&np->lock);
2202 break; 2284 break;
2203 } 2285 }
2204 2286
@@ -2221,10 +2303,11 @@ static void nv_do_nic_poll(unsigned long data)
2221 * nv_nic_irq because that may decide to do otherwise 2303 * nv_nic_irq because that may decide to do otherwise
2222 */ 2304 */
2223 2305
2224 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 2306 if (!using_multi_irqs(dev)) {
2225 ((np->msi_flags & NV_MSI_X_ENABLED) && 2307 if (np->msi_flags & NV_MSI_X_ENABLED)
2226 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 2308 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2227 disable_irq(dev->irq); 2309 else
2310 disable_irq(dev->irq);
2228 mask = np->irqmask; 2311 mask = np->irqmask;
2229 } else { 2312 } else {
2230 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2313 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@@ -2247,11 +2330,12 @@ static void nv_do_nic_poll(unsigned long data)
2247 writel(mask, base + NvRegIrqMask); 2330 writel(mask, base + NvRegIrqMask);
2248 pci_push(base); 2331 pci_push(base);
2249 2332
2250 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 2333 if (!using_multi_irqs(dev)) {
2251 ((np->msi_flags & NV_MSI_X_ENABLED) &&
2252 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
2253 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); 2334 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
2254 enable_irq(dev->irq); 2335 if (np->msi_flags & NV_MSI_X_ENABLED)
2336 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2337 else
2338 enable_irq(dev->irq);
2255 } else { 2339 } else {
2256 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2340 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2257 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); 2341 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
@@ -2488,11 +2572,11 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2488} 2572}
2489 2573
2490#define FORCEDETH_REGS_VER 1 2574#define FORCEDETH_REGS_VER 1
2491#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
2492 2575
2493static int nv_get_regs_len(struct net_device *dev) 2576static int nv_get_regs_len(struct net_device *dev)
2494{ 2577{
2495 return FORCEDETH_REGS_SIZE; 2578 struct fe_priv *np = netdev_priv(dev);
2579 return np->register_size;
2496} 2580}
2497 2581
2498static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 2582static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
@@ -2504,7 +2588,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
2504 2588
2505 regs->version = FORCEDETH_REGS_VER; 2589 regs->version = FORCEDETH_REGS_VER;
2506 spin_lock_irq(&np->lock); 2590 spin_lock_irq(&np->lock);
2507 for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++) 2591 for (i = 0;i <= np->register_size/sizeof(u32); i++)
2508 rbuf[i] = readl(base + i*sizeof(u32)); 2592 rbuf[i] = readl(base + i*sizeof(u32));
2509 spin_unlock_irq(&np->lock); 2593 spin_unlock_irq(&np->lock);
2510} 2594}
@@ -2531,6 +2615,18 @@ static int nv_nway_reset(struct net_device *dev)
2531 return ret; 2615 return ret;
2532} 2616}
2533 2617
2618#ifdef NETIF_F_TSO
2619static int nv_set_tso(struct net_device *dev, u32 value)
2620{
2621 struct fe_priv *np = netdev_priv(dev);
2622
2623 if ((np->driver_data & DEV_HAS_CHECKSUM))
2624 return ethtool_op_set_tso(dev, value);
2625 else
2626 return value ? -EOPNOTSUPP : 0;
2627}
2628#endif
2629
2534static struct ethtool_ops ops = { 2630static struct ethtool_ops ops = {
2535 .get_drvinfo = nv_get_drvinfo, 2631 .get_drvinfo = nv_get_drvinfo,
2536 .get_link = ethtool_op_get_link, 2632 .get_link = ethtool_op_get_link,
@@ -2542,6 +2638,10 @@ static struct ethtool_ops ops = {
2542 .get_regs = nv_get_regs, 2638 .get_regs = nv_get_regs,
2543 .nway_reset = nv_nway_reset, 2639 .nway_reset = nv_nway_reset,
2544 .get_perm_addr = ethtool_op_get_perm_addr, 2640 .get_perm_addr = ethtool_op_get_perm_addr,
2641#ifdef NETIF_F_TSO
2642 .get_tso = ethtool_op_get_tso,
2643 .set_tso = nv_set_tso
2644#endif
2545}; 2645};
2546 2646
2547static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 2647static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
@@ -2598,6 +2698,113 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
2598 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 2698 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
2599} 2699}
2600 2700
2701static int nv_request_irq(struct net_device *dev)
2702{
2703 struct fe_priv *np = get_nvpriv(dev);
2704 u8 __iomem *base = get_hwbase(dev);
2705 int ret = 1;
2706 int i;
2707
2708 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2709 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2710 np->msi_x_entry[i].entry = i;
2711 }
2712 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
2713 np->msi_flags |= NV_MSI_X_ENABLED;
2714 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2715 /* Request irq for rx handling */
2716 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
2717 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2718 pci_disable_msix(np->pci_dev);
2719 np->msi_flags &= ~NV_MSI_X_ENABLED;
2720 goto out_err;
2721 }
2722 /* Request irq for tx handling */
2723 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
2724 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2725 pci_disable_msix(np->pci_dev);
2726 np->msi_flags &= ~NV_MSI_X_ENABLED;
2727 goto out_free_rx;
2728 }
2729 /* Request irq for link and timer handling */
2730 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
2731 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2732 pci_disable_msix(np->pci_dev);
2733 np->msi_flags &= ~NV_MSI_X_ENABLED;
2734 goto out_free_tx;
2735 }
2736 /* map interrupts to their respective vector */
2737 writel(0, base + NvRegMSIXMap0);
2738 writel(0, base + NvRegMSIXMap1);
2739 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
2740 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
2741 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2742 } else {
2743 /* Request irq for all interrupts */
2744 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2745 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2746 pci_disable_msix(np->pci_dev);
2747 np->msi_flags &= ~NV_MSI_X_ENABLED;
2748 goto out_err;
2749 }
2750
2751 /* map interrupts to vector 0 */
2752 writel(0, base + NvRegMSIXMap0);
2753 writel(0, base + NvRegMSIXMap1);
2754 }
2755 }
2756 }
2757 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2758 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2759 np->msi_flags |= NV_MSI_ENABLED;
2760 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2761 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2762 pci_disable_msi(np->pci_dev);
2763 np->msi_flags &= ~NV_MSI_ENABLED;
2764 goto out_err;
2765 }
2766
2767 /* map interrupts to vector 0 */
2768 writel(0, base + NvRegMSIMap0);
2769 writel(0, base + NvRegMSIMap1);
2770 /* enable msi vector 0 */
2771 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
2772 }
2773 }
2774 if (ret != 0) {
2775 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
2776 goto out_err;
2777 }
2778
2779 return 0;
2780out_free_tx:
2781 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
2782out_free_rx:
2783 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
2784out_err:
2785 return 1;
2786}
2787
2788static void nv_free_irq(struct net_device *dev)
2789{
2790 struct fe_priv *np = get_nvpriv(dev);
2791 int i;
2792
2793 if (np->msi_flags & NV_MSI_X_ENABLED) {
2794 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2795 free_irq(np->msi_x_entry[i].vector, dev);
2796 }
2797 pci_disable_msix(np->pci_dev);
2798 np->msi_flags &= ~NV_MSI_X_ENABLED;
2799 } else {
2800 free_irq(np->pci_dev->irq, dev);
2801 if (np->msi_flags & NV_MSI_ENABLED) {
2802 pci_disable_msi(np->pci_dev);
2803 np->msi_flags &= ~NV_MSI_ENABLED;
2804 }
2805 }
2806}
2807
2601static int nv_open(struct net_device *dev) 2808static int nv_open(struct net_device *dev)
2602{ 2809{
2603 struct fe_priv *np = netdev_priv(dev); 2810 struct fe_priv *np = netdev_priv(dev);
@@ -2608,6 +2815,8 @@ static int nv_open(struct net_device *dev)
2608 dprintk(KERN_DEBUG "nv_open: begin\n"); 2815 dprintk(KERN_DEBUG "nv_open: begin\n");
2609 2816
2610 /* 1) erase previous misconfiguration */ 2817 /* 1) erase previous misconfiguration */
2818 if (np->driver_data & DEV_HAS_POWER_CNTRL)
2819 nv_mac_reset(dev);
2611 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */ 2820 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
2612 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 2821 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
2613 writel(0, base + NvRegMulticastAddrB); 2822 writel(0, base + NvRegMulticastAddrB);
@@ -2688,86 +2897,18 @@ static int nv_open(struct net_device *dev)
2688 udelay(10); 2897 udelay(10);
2689 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 2898 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
2690 2899
2691 writel(0, base + NvRegIrqMask); 2900 nv_disable_hw_interrupts(dev, np->irqmask);
2692 pci_push(base); 2901 pci_push(base);
2693 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 2902 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
2694 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2903 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2695 pci_push(base); 2904 pci_push(base);
2696 2905
2697 if (np->msi_flags & NV_MSI_X_CAPABLE) { 2906 if (nv_request_irq(dev)) {
2698 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2907 goto out_drain;
2699 np->msi_x_entry[i].entry = i;
2700 }
2701 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
2702 np->msi_flags |= NV_MSI_X_ENABLED;
2703 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2704 /* Request irq for rx handling */
2705 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
2706 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2707 pci_disable_msix(np->pci_dev);
2708 np->msi_flags &= ~NV_MSI_X_ENABLED;
2709 goto out_drain;
2710 }
2711 /* Request irq for tx handling */
2712 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
2713 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2714 pci_disable_msix(np->pci_dev);
2715 np->msi_flags &= ~NV_MSI_X_ENABLED;
2716 goto out_drain;
2717 }
2718 /* Request irq for link and timer handling */
2719 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
2720 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2721 pci_disable_msix(np->pci_dev);
2722 np->msi_flags &= ~NV_MSI_X_ENABLED;
2723 goto out_drain;
2724 }
2725
2726 /* map interrupts to their respective vector */
2727 writel(0, base + NvRegMSIXMap0);
2728 writel(0, base + NvRegMSIXMap1);
2729 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
2730 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
2731 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2732 } else {
2733 /* Request irq for all interrupts */
2734 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2735 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2736 pci_disable_msix(np->pci_dev);
2737 np->msi_flags &= ~NV_MSI_X_ENABLED;
2738 goto out_drain;
2739 }
2740
2741 /* map interrupts to vector 0 */
2742 writel(0, base + NvRegMSIXMap0);
2743 writel(0, base + NvRegMSIXMap1);
2744 }
2745 }
2746 }
2747 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2748 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2749 np->msi_flags |= NV_MSI_ENABLED;
2750 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2751 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2752 pci_disable_msi(np->pci_dev);
2753 np->msi_flags &= ~NV_MSI_ENABLED;
2754 goto out_drain;
2755 }
2756
2757 /* map interrupts to vector 0 */
2758 writel(0, base + NvRegMSIMap0);
2759 writel(0, base + NvRegMSIMap1);
2760 /* enable msi vector 0 */
2761 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
2762 }
2763 }
2764 if (ret != 0) {
2765 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
2766 goto out_drain;
2767 } 2908 }
2768 2909
2769 /* ask for interrupts */ 2910 /* ask for interrupts */
2770 writel(np->irqmask, base + NvRegIrqMask); 2911 nv_enable_hw_interrupts(dev, np->irqmask);
2771 2912
2772 spin_lock_irq(&np->lock); 2913 spin_lock_irq(&np->lock);
2773 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 2914 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
@@ -2811,7 +2952,6 @@ static int nv_close(struct net_device *dev)
2811{ 2952{
2812 struct fe_priv *np = netdev_priv(dev); 2953 struct fe_priv *np = netdev_priv(dev);
2813 u8 __iomem *base; 2954 u8 __iomem *base;
2814 int i;
2815 2955
2816 spin_lock_irq(&np->lock); 2956 spin_lock_irq(&np->lock);
2817 np->in_shutdown = 1; 2957 np->in_shutdown = 1;
@@ -2829,31 +2969,13 @@ static int nv_close(struct net_device *dev)
2829 2969
2830 /* disable interrupts on the nic or we will lock up */ 2970 /* disable interrupts on the nic or we will lock up */
2831 base = get_hwbase(dev); 2971 base = get_hwbase(dev);
2832 if (np->msi_flags & NV_MSI_X_ENABLED) { 2972 nv_disable_hw_interrupts(dev, np->irqmask);
2833 writel(np->irqmask, base + NvRegIrqMask);
2834 } else {
2835 if (np->msi_flags & NV_MSI_ENABLED)
2836 writel(0, base + NvRegMSIIrqMask);
2837 writel(0, base + NvRegIrqMask);
2838 }
2839 pci_push(base); 2973 pci_push(base);
2840 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 2974 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
2841 2975
2842 spin_unlock_irq(&np->lock); 2976 spin_unlock_irq(&np->lock);
2843 2977
2844 if (np->msi_flags & NV_MSI_X_ENABLED) { 2978 nv_free_irq(dev);
2845 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2846 free_irq(np->msi_x_entry[i].vector, dev);
2847 }
2848 pci_disable_msix(np->pci_dev);
2849 np->msi_flags &= ~NV_MSI_X_ENABLED;
2850 } else {
2851 free_irq(np->pci_dev->irq, dev);
2852 if (np->msi_flags & NV_MSI_ENABLED) {
2853 pci_disable_msi(np->pci_dev);
2854 np->msi_flags &= ~NV_MSI_ENABLED;
2855 }
2856 }
2857 2979
2858 drain_ring(dev); 2980 drain_ring(dev);
2859 2981
@@ -2878,6 +3000,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2878 unsigned long addr; 3000 unsigned long addr;
2879 u8 __iomem *base; 3001 u8 __iomem *base;
2880 int err, i; 3002 int err, i;
3003 u32 powerstate;
2881 3004
2882 dev = alloc_etherdev(sizeof(struct fe_priv)); 3005 dev = alloc_etherdev(sizeof(struct fe_priv));
2883 err = -ENOMEM; 3006 err = -ENOMEM;
@@ -2910,6 +3033,11 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2910 if (err < 0) 3033 if (err < 0)
2911 goto out_disable; 3034 goto out_disable;
2912 3035
3036 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL))
3037 np->register_size = NV_PCI_REGSZ_VER2;
3038 else
3039 np->register_size = NV_PCI_REGSZ_VER1;
3040
2913 err = -EINVAL; 3041 err = -EINVAL;
2914 addr = 0; 3042 addr = 0;
2915 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 3043 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -2918,7 +3046,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2918 pci_resource_len(pci_dev, i), 3046 pci_resource_len(pci_dev, i),
2919 pci_resource_flags(pci_dev, i)); 3047 pci_resource_flags(pci_dev, i));
2920 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 3048 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
2921 pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) { 3049 pci_resource_len(pci_dev, i) >= np->register_size) {
2922 addr = pci_resource_start(pci_dev, i); 3050 addr = pci_resource_start(pci_dev, i);
2923 break; 3051 break;
2924 } 3052 }
@@ -2929,24 +3057,25 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2929 goto out_relreg; 3057 goto out_relreg;
2930 } 3058 }
2931 3059
3060 /* copy of driver data */
3061 np->driver_data = id->driver_data;
3062
2932 /* handle different descriptor versions */ 3063 /* handle different descriptor versions */
2933 if (id->driver_data & DEV_HAS_HIGH_DMA) { 3064 if (id->driver_data & DEV_HAS_HIGH_DMA) {
2934 /* packet format 3: supports 40-bit addressing */ 3065 /* packet format 3: supports 40-bit addressing */
2935 np->desc_ver = DESC_VER_3; 3066 np->desc_ver = DESC_VER_3;
3067 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2936 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { 3068 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
2937 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 3069 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2938 pci_name(pci_dev)); 3070 pci_name(pci_dev));
2939 } else { 3071 } else {
2940 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { 3072 dev->features |= NETIF_F_HIGHDMA;
2941 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", 3073 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2942 pci_name(pci_dev)); 3074 }
2943 goto out_relreg; 3075 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2944 } else { 3076 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
2945 dev->features |= NETIF_F_HIGHDMA; 3077 pci_name(pci_dev));
2946 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2947 }
2948 } 3078 }
2949 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2950 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 3079 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
2951 /* packet format 2: supports jumbo frames */ 3080 /* packet format 2: supports jumbo frames */
2952 np->desc_ver = DESC_VER_2; 3081 np->desc_ver = DESC_VER_2;
@@ -2986,7 +3115,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2986 } 3115 }
2987 3116
2988 err = -ENOMEM; 3117 err = -ENOMEM;
2989 np->base = ioremap(addr, NV_PCI_REGSZ); 3118 np->base = ioremap(addr, np->register_size);
2990 if (!np->base) 3119 if (!np->base)
2991 goto out_relreg; 3120 goto out_relreg;
2992 dev->base_addr = (unsigned long)np->base; 3121 dev->base_addr = (unsigned long)np->base;
@@ -3062,6 +3191,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
3062 writel(0, base + NvRegWakeUpFlags); 3191 writel(0, base + NvRegWakeUpFlags);
3063 np->wolenabled = 0; 3192 np->wolenabled = 0;
3064 3193
3194 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
3195 u8 revision_id;
3196 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
3197
3198 /* take phy and nic out of low power mode */
3199 powerstate = readl(base + NvRegPowerState2);
3200 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
3201 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
3202 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
3203 revision_id >= 0xA3)
3204 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
3205 writel(powerstate, base + NvRegPowerState2);
3206 }
3207
3065 if (np->desc_ver == DESC_VER_1) { 3208 if (np->desc_ver == DESC_VER_1) {
3066 np->tx_flags = NV_TX_VALID; 3209 np->tx_flags = NV_TX_VALID;
3067 } else { 3210 } else {
@@ -3223,19 +3366,19 @@ static struct pci_device_id pci_tbl[] = {
3223 }, 3366 },
3224 { /* MCP51 Ethernet Controller */ 3367 { /* MCP51 Ethernet Controller */
3225 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 3368 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
3226 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, 3369 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
3227 }, 3370 },
3228 { /* MCP51 Ethernet Controller */ 3371 { /* MCP51 Ethernet Controller */
3229 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 3372 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
3230 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, 3373 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
3231 }, 3374 },
3232 { /* MCP55 Ethernet Controller */ 3375 { /* MCP55 Ethernet Controller */
3233 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 3376 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
3234 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, 3377 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
3235 }, 3378 },
3236 { /* MCP55 Ethernet Controller */ 3379 { /* MCP55 Ethernet Controller */
3237 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 3380 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
3238 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, 3381 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
3239 }, 3382 },
3240 {0,}, 3383 {0,},
3241}; 3384};
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 771e25d8c417..218d31764c52 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -210,7 +210,8 @@ static int gfar_probe(struct platform_device *pdev)
210 goto regs_fail; 210 goto regs_fail;
211 } 211 }
212 212
213 spin_lock_init(&priv->lock); 213 spin_lock_init(&priv->txlock);
214 spin_lock_init(&priv->rxlock);
214 215
215 platform_set_drvdata(pdev, dev); 216 platform_set_drvdata(pdev, dev);
216 217
@@ -515,11 +516,13 @@ void stop_gfar(struct net_device *dev)
515 phy_stop(priv->phydev); 516 phy_stop(priv->phydev);
516 517
517 /* Lock it down */ 518 /* Lock it down */
518 spin_lock_irqsave(&priv->lock, flags); 519 spin_lock_irqsave(&priv->txlock, flags);
520 spin_lock(&priv->rxlock);
519 521
520 gfar_halt(dev); 522 gfar_halt(dev);
521 523
522 spin_unlock_irqrestore(&priv->lock, flags); 524 spin_unlock(&priv->rxlock);
525 spin_unlock_irqrestore(&priv->txlock, flags);
523 526
524 /* Free the IRQs */ 527 /* Free the IRQs */
525 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 528 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -605,14 +608,15 @@ void gfar_start(struct net_device *dev)
605 tempval |= DMACTRL_INIT_SETTINGS; 608 tempval |= DMACTRL_INIT_SETTINGS;
606 gfar_write(&priv->regs->dmactrl, tempval); 609 gfar_write(&priv->regs->dmactrl, tempval);
607 610
608 /* Clear THLT, so that the DMA starts polling now */
609 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
610
611 /* Make sure we aren't stopped */ 611 /* Make sure we aren't stopped */
612 tempval = gfar_read(&priv->regs->dmactrl); 612 tempval = gfar_read(&priv->regs->dmactrl);
613 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 613 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
614 gfar_write(&priv->regs->dmactrl, tempval); 614 gfar_write(&priv->regs->dmactrl, tempval);
615 615
616 /* Clear THLT/RHLT, so that the DMA starts polling now */
617 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
618 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
619
616 /* Unmask the interrupts we look for */ 620 /* Unmask the interrupts we look for */
617 gfar_write(&regs->imask, IMASK_DEFAULT); 621 gfar_write(&regs->imask, IMASK_DEFAULT);
618} 622}
@@ -928,12 +932,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
928 struct txfcb *fcb = NULL; 932 struct txfcb *fcb = NULL;
929 struct txbd8 *txbdp; 933 struct txbd8 *txbdp;
930 u16 status; 934 u16 status;
935 unsigned long flags;
931 936
932 /* Update transmit stats */ 937 /* Update transmit stats */
933 priv->stats.tx_bytes += skb->len; 938 priv->stats.tx_bytes += skb->len;
934 939
935 /* Lock priv now */ 940 /* Lock priv now */
936 spin_lock_irq(&priv->lock); 941 spin_lock_irqsave(&priv->txlock, flags);
937 942
938 /* Point at the first free tx descriptor */ 943 /* Point at the first free tx descriptor */
939 txbdp = priv->cur_tx; 944 txbdp = priv->cur_tx;
@@ -1004,7 +1009,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1004 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1009 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1005 1010
1006 /* Unlock priv */ 1011 /* Unlock priv */
1007 spin_unlock_irq(&priv->lock); 1012 spin_unlock_irqrestore(&priv->txlock, flags);
1008 1013
1009 return 0; 1014 return 0;
1010} 1015}
@@ -1049,7 +1054,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1049 unsigned long flags; 1054 unsigned long flags;
1050 u32 tempval; 1055 u32 tempval;
1051 1056
1052 spin_lock_irqsave(&priv->lock, flags); 1057 spin_lock_irqsave(&priv->rxlock, flags);
1053 1058
1054 priv->vlgrp = grp; 1059 priv->vlgrp = grp;
1055 1060
@@ -1076,7 +1081,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1076 gfar_write(&priv->regs->rctrl, tempval); 1081 gfar_write(&priv->regs->rctrl, tempval);
1077 } 1082 }
1078 1083
1079 spin_unlock_irqrestore(&priv->lock, flags); 1084 spin_unlock_irqrestore(&priv->rxlock, flags);
1080} 1085}
1081 1086
1082 1087
@@ -1085,12 +1090,12 @@ static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1085 struct gfar_private *priv = netdev_priv(dev); 1090 struct gfar_private *priv = netdev_priv(dev);
1086 unsigned long flags; 1091 unsigned long flags;
1087 1092
1088 spin_lock_irqsave(&priv->lock, flags); 1093 spin_lock_irqsave(&priv->rxlock, flags);
1089 1094
1090 if (priv->vlgrp) 1095 if (priv->vlgrp)
1091 priv->vlgrp->vlan_devices[vid] = NULL; 1096 priv->vlgrp->vlan_devices[vid] = NULL;
1092 1097
1093 spin_unlock_irqrestore(&priv->lock, flags); 1098 spin_unlock_irqrestore(&priv->rxlock, flags);
1094} 1099}
1095 1100
1096 1101
@@ -1179,7 +1184,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1179 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1184 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1180 1185
1181 /* Lock priv */ 1186 /* Lock priv */
1182 spin_lock(&priv->lock); 1187 spin_lock(&priv->txlock);
1183 bdp = priv->dirty_tx; 1188 bdp = priv->dirty_tx;
1184 while ((bdp->status & TXBD_READY) == 0) { 1189 while ((bdp->status & TXBD_READY) == 0) {
1185 /* If dirty_tx and cur_tx are the same, then either the */ 1190 /* If dirty_tx and cur_tx are the same, then either the */
@@ -1224,7 +1229,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1224 else 1229 else
1225 gfar_write(&priv->regs->txic, 0); 1230 gfar_write(&priv->regs->txic, 0);
1226 1231
1227 spin_unlock(&priv->lock); 1232 spin_unlock(&priv->txlock);
1228 1233
1229 return IRQ_HANDLED; 1234 return IRQ_HANDLED;
1230} 1235}
@@ -1305,9 +1310,10 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1305{ 1310{
1306 struct net_device *dev = (struct net_device *) dev_id; 1311 struct net_device *dev = (struct net_device *) dev_id;
1307 struct gfar_private *priv = netdev_priv(dev); 1312 struct gfar_private *priv = netdev_priv(dev);
1308
1309#ifdef CONFIG_GFAR_NAPI 1313#ifdef CONFIG_GFAR_NAPI
1310 u32 tempval; 1314 u32 tempval;
1315#else
1316 unsigned long flags;
1311#endif 1317#endif
1312 1318
1313 /* Clear IEVENT, so rx interrupt isn't called again 1319 /* Clear IEVENT, so rx interrupt isn't called again
@@ -1330,7 +1336,7 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1330 } 1336 }
1331#else 1337#else
1332 1338
1333 spin_lock(&priv->lock); 1339 spin_lock_irqsave(&priv->rxlock, flags);
1334 gfar_clean_rx_ring(dev, priv->rx_ring_size); 1340 gfar_clean_rx_ring(dev, priv->rx_ring_size);
1335 1341
1336 /* If we are coalescing interrupts, update the timer */ 1342 /* If we are coalescing interrupts, update the timer */
@@ -1341,7 +1347,7 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1341 else 1347 else
1342 gfar_write(&priv->regs->rxic, 0); 1348 gfar_write(&priv->regs->rxic, 0);
1343 1349
1344 spin_unlock(&priv->lock); 1350 spin_unlock_irqrestore(&priv->rxlock, flags);
1345#endif 1351#endif
1346 1352
1347 return IRQ_HANDLED; 1353 return IRQ_HANDLED;
@@ -1490,13 +1496,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1490 /* Update the current rxbd pointer to be the next one */ 1496 /* Update the current rxbd pointer to be the next one */
1491 priv->cur_rx = bdp; 1497 priv->cur_rx = bdp;
1492 1498
1493 /* If no packets have arrived since the
1494 * last one we processed, clear the IEVENT RX and
1495 * BSY bits so that another interrupt won't be
1496 * generated when we set IMASK */
1497 if (bdp->status & RXBD_EMPTY)
1498 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1499
1500 return howmany; 1499 return howmany;
1501} 1500}
1502 1501
@@ -1516,7 +1515,7 @@ static int gfar_poll(struct net_device *dev, int *budget)
1516 rx_work_limit -= howmany; 1515 rx_work_limit -= howmany;
1517 *budget -= howmany; 1516 *budget -= howmany;
1518 1517
1519 if (rx_work_limit >= 0) { 1518 if (rx_work_limit > 0) {
1520 netif_rx_complete(dev); 1519 netif_rx_complete(dev);
1521 1520
1522 /* Clear the halt bit in RSTAT */ 1521 /* Clear the halt bit in RSTAT */
@@ -1533,7 +1532,8 @@ static int gfar_poll(struct net_device *dev, int *budget)
1533 gfar_write(&priv->regs->rxic, 0); 1532 gfar_write(&priv->regs->rxic, 0);
1534 } 1533 }
1535 1534
1536 return (rx_work_limit < 0) ? 1 : 0; 1535 /* Return 1 if there's more work to do */
1536 return (rx_work_limit > 0) ? 0 : 1;
1537} 1537}
1538#endif 1538#endif
1539 1539
@@ -1629,7 +1629,7 @@ static void adjust_link(struct net_device *dev)
1629 struct phy_device *phydev = priv->phydev; 1629 struct phy_device *phydev = priv->phydev;
1630 int new_state = 0; 1630 int new_state = 0;
1631 1631
1632 spin_lock_irqsave(&priv->lock, flags); 1632 spin_lock_irqsave(&priv->txlock, flags);
1633 if (phydev->link) { 1633 if (phydev->link) {
1634 u32 tempval = gfar_read(&regs->maccfg2); 1634 u32 tempval = gfar_read(&regs->maccfg2);
1635 u32 ecntrl = gfar_read(&regs->ecntrl); 1635 u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -1694,7 +1694,7 @@ static void adjust_link(struct net_device *dev)
1694 if (new_state && netif_msg_link(priv)) 1694 if (new_state && netif_msg_link(priv))
1695 phy_print_status(phydev); 1695 phy_print_status(phydev);
1696 1696
1697 spin_unlock_irqrestore(&priv->lock, flags); 1697 spin_unlock_irqrestore(&priv->txlock, flags);
1698} 1698}
1699 1699
1700/* Update the hash table based on the current list of multicast 1700/* Update the hash table based on the current list of multicast
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index d37d5401be6e..127c98cf3336 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -656,43 +656,62 @@ struct gfar {
656 * the buffer descriptor determines the actual condition. 656 * the buffer descriptor determines the actual condition.
657 */ 657 */
658struct gfar_private { 658struct gfar_private {
659 /* pointers to arrays of skbuffs for tx and rx */ 659 /* Fields controlled by TX lock */
660 spinlock_t txlock;
661
662 /* Pointer to the array of skbuffs */
660 struct sk_buff ** tx_skbuff; 663 struct sk_buff ** tx_skbuff;
661 struct sk_buff ** rx_skbuff;
662 664
663 /* indices pointing to the next free sbk in skb arrays */ 665 /* next free skb in the array */
664 u16 skb_curtx; 666 u16 skb_curtx;
665 u16 skb_currx;
666 667
667 /* index of the first skb which hasn't been transmitted 668 /* First skb in line to be transmitted */
668 * yet. */
669 u16 skb_dirtytx; 669 u16 skb_dirtytx;
670 670
671 /* Configuration info for the coalescing features */ 671 /* Configuration info for the coalescing features */
672 unsigned char txcoalescing; 672 unsigned char txcoalescing;
673 unsigned short txcount; 673 unsigned short txcount;
674 unsigned short txtime; 674 unsigned short txtime;
675
676 /* Buffer descriptor pointers */
677 struct txbd8 *tx_bd_base; /* First tx buffer descriptor */
678 struct txbd8 *cur_tx; /* Next free ring entry */
679 struct txbd8 *dirty_tx; /* First buffer in line
680 to be transmitted */
681 unsigned int tx_ring_size;
682
683 /* RX Locked fields */
684 spinlock_t rxlock;
685
686 /* skb array and index */
687 struct sk_buff ** rx_skbuff;
688 u16 skb_currx;
689
690 /* RX Coalescing values */
675 unsigned char rxcoalescing; 691 unsigned char rxcoalescing;
676 unsigned short rxcount; 692 unsigned short rxcount;
677 unsigned short rxtime; 693 unsigned short rxtime;
678 694
679 /* GFAR addresses */ 695 struct rxbd8 *rx_bd_base; /* First Rx buffers */
680 struct rxbd8 *rx_bd_base; /* Base addresses of Rx and Tx Buffers */
681 struct txbd8 *tx_bd_base;
682 struct rxbd8 *cur_rx; /* Next free rx ring entry */ 696 struct rxbd8 *cur_rx; /* Next free rx ring entry */
683 struct txbd8 *cur_tx; /* Next free ring entry */ 697
684 struct txbd8 *dirty_tx; /* The Ring entry to be freed. */ 698 /* RX parameters */
685 struct gfar __iomem *regs; /* Pointer to the GFAR memory mapped Registers */ 699 unsigned int rx_ring_size;
686 u32 __iomem *hash_regs[16];
687 int hash_width;
688 struct net_device_stats stats; /* linux network statistics */
689 struct gfar_extra_stats extra_stats;
690 spinlock_t lock;
691 unsigned int rx_buffer_size; 700 unsigned int rx_buffer_size;
692 unsigned int rx_stash_size; 701 unsigned int rx_stash_size;
693 unsigned int rx_stash_index; 702 unsigned int rx_stash_index;
694 unsigned int tx_ring_size; 703
695 unsigned int rx_ring_size; 704 struct vlan_group *vlgrp;
705
706 /* Unprotected fields */
707 /* Pointer to the GFAR memory mapped Registers */
708 struct gfar __iomem *regs;
709
710 /* Hash registers and their width */
711 u32 __iomem *hash_regs[16];
712 int hash_width;
713
714 /* global parameters */
696 unsigned int fifo_threshold; 715 unsigned int fifo_threshold;
697 unsigned int fifo_starve; 716 unsigned int fifo_starve;
698 unsigned int fifo_starve_off; 717 unsigned int fifo_starve_off;
@@ -702,13 +721,15 @@ struct gfar_private {
702 extended_hash:1, 721 extended_hash:1,
703 bd_stash_en:1; 722 bd_stash_en:1;
704 unsigned short padding; 723 unsigned short padding;
705 struct vlan_group *vlgrp; 724
706 /* Info structure initialized by board setup code */
707 unsigned int interruptTransmit; 725 unsigned int interruptTransmit;
708 unsigned int interruptReceive; 726 unsigned int interruptReceive;
709 unsigned int interruptError; 727 unsigned int interruptError;
728
729 /* info structure initialized by platform code */
710 struct gianfar_platform_data *einfo; 730 struct gianfar_platform_data *einfo;
711 731
732 /* PHY stuff */
712 struct phy_device *phydev; 733 struct phy_device *phydev;
713 struct mii_bus *mii_bus; 734 struct mii_bus *mii_bus;
714 int oldspeed; 735 int oldspeed;
@@ -716,6 +737,10 @@ struct gfar_private {
716 int oldlink; 737 int oldlink;
717 738
718 uint32_t msg_enable; 739 uint32_t msg_enable;
740
741 /* Network Statistics */
742 struct net_device_stats stats;
743 struct gfar_extra_stats extra_stats;
719}; 744};
720 745
721static inline u32 gfar_read(volatile unsigned __iomem *addr) 746static inline u32 gfar_read(volatile unsigned __iomem *addr)
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5de7b2e259dc..d69698c695ef 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -455,10 +455,14 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
455 455
456 /* Halt TX and RX, and process the frames which 456 /* Halt TX and RX, and process the frames which
457 * have already been received */ 457 * have already been received */
458 spin_lock_irqsave(&priv->lock, flags); 458 spin_lock_irqsave(&priv->txlock, flags);
459 spin_lock(&priv->rxlock);
460
459 gfar_halt(dev); 461 gfar_halt(dev);
460 gfar_clean_rx_ring(dev, priv->rx_ring_size); 462 gfar_clean_rx_ring(dev, priv->rx_ring_size);
461 spin_unlock_irqrestore(&priv->lock, flags); 463
464 spin_unlock(&priv->rxlock);
465 spin_unlock_irqrestore(&priv->txlock, flags);
462 466
463 /* Now we take down the rings to rebuild them */ 467 /* Now we take down the rings to rebuild them */
464 stop_gfar(dev); 468 stop_gfar(dev);
@@ -488,10 +492,14 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
488 492
489 /* Halt TX and RX, and process the frames which 493 /* Halt TX and RX, and process the frames which
490 * have already been received */ 494 * have already been received */
491 spin_lock_irqsave(&priv->lock, flags); 495 spin_lock_irqsave(&priv->txlock, flags);
496 spin_lock(&priv->rxlock);
497
492 gfar_halt(dev); 498 gfar_halt(dev);
493 gfar_clean_rx_ring(dev, priv->rx_ring_size); 499 gfar_clean_rx_ring(dev, priv->rx_ring_size);
494 spin_unlock_irqrestore(&priv->lock, flags); 500
501 spin_unlock(&priv->rxlock);
502 spin_unlock_irqrestore(&priv->txlock, flags);
495 503
496 /* Now we take down the rings to rebuild them */ 504 /* Now we take down the rings to rebuild them */
497 stop_gfar(dev); 505 stop_gfar(dev);
@@ -523,7 +531,7 @@ static int gfar_set_tx_csum(struct net_device *dev, uint32_t data)
523 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 531 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
524 return -EOPNOTSUPP; 532 return -EOPNOTSUPP;
525 533
526 spin_lock_irqsave(&priv->lock, flags); 534 spin_lock_irqsave(&priv->txlock, flags);
527 gfar_halt(dev); 535 gfar_halt(dev);
528 536
529 if (data) 537 if (data)
@@ -532,7 +540,7 @@ static int gfar_set_tx_csum(struct net_device *dev, uint32_t data)
532 dev->features &= ~NETIF_F_IP_CSUM; 540 dev->features &= ~NETIF_F_IP_CSUM;
533 541
534 gfar_start(dev); 542 gfar_start(dev);
535 spin_unlock_irqrestore(&priv->lock, flags); 543 spin_unlock_irqrestore(&priv->txlock, flags);
536 544
537 return 0; 545 return 0;
538} 546}
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 51ef181b1368..a6d5c43199cb 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -82,7 +82,7 @@ static ssize_t gfar_set_bd_stash(struct class_device *cdev,
82 else 82 else
83 return count; 83 return count;
84 84
85 spin_lock_irqsave(&priv->lock, flags); 85 spin_lock_irqsave(&priv->rxlock, flags);
86 86
87 /* Set the new stashing value */ 87 /* Set the new stashing value */
88 priv->bd_stash_en = new_setting; 88 priv->bd_stash_en = new_setting;
@@ -96,7 +96,7 @@ static ssize_t gfar_set_bd_stash(struct class_device *cdev,
96 96
97 gfar_write(&priv->regs->attr, temp); 97 gfar_write(&priv->regs->attr, temp);
98 98
99 spin_unlock_irqrestore(&priv->lock, flags); 99 spin_unlock_irqrestore(&priv->rxlock, flags);
100 100
101 return count; 101 return count;
102} 102}
@@ -118,7 +118,7 @@ static ssize_t gfar_set_rx_stash_size(struct class_device *cdev,
118 u32 temp; 118 u32 temp;
119 unsigned long flags; 119 unsigned long flags;
120 120
121 spin_lock_irqsave(&priv->lock, flags); 121 spin_lock_irqsave(&priv->rxlock, flags);
122 if (length > priv->rx_buffer_size) 122 if (length > priv->rx_buffer_size)
123 return count; 123 return count;
124 124
@@ -142,7 +142,7 @@ static ssize_t gfar_set_rx_stash_size(struct class_device *cdev,
142 142
143 gfar_write(&priv->regs->attr, temp); 143 gfar_write(&priv->regs->attr, temp);
144 144
145 spin_unlock_irqrestore(&priv->lock, flags); 145 spin_unlock_irqrestore(&priv->rxlock, flags);
146 146
147 return count; 147 return count;
148} 148}
@@ -166,7 +166,7 @@ static ssize_t gfar_set_rx_stash_index(struct class_device *cdev,
166 u32 temp; 166 u32 temp;
167 unsigned long flags; 167 unsigned long flags;
168 168
169 spin_lock_irqsave(&priv->lock, flags); 169 spin_lock_irqsave(&priv->rxlock, flags);
170 if (index > priv->rx_stash_size) 170 if (index > priv->rx_stash_size)
171 return count; 171 return count;
172 172
@@ -180,7 +180,7 @@ static ssize_t gfar_set_rx_stash_index(struct class_device *cdev,
180 temp |= ATTRELI_EI(index); 180 temp |= ATTRELI_EI(index);
181 gfar_write(&priv->regs->attreli, flags); 181 gfar_write(&priv->regs->attreli, flags);
182 182
183 spin_unlock_irqrestore(&priv->lock, flags); 183 spin_unlock_irqrestore(&priv->rxlock, flags);
184 184
185 return count; 185 return count;
186} 186}
@@ -205,7 +205,7 @@ static ssize_t gfar_set_fifo_threshold(struct class_device *cdev,
205 if (length > GFAR_MAX_FIFO_THRESHOLD) 205 if (length > GFAR_MAX_FIFO_THRESHOLD)
206 return count; 206 return count;
207 207
208 spin_lock_irqsave(&priv->lock, flags); 208 spin_lock_irqsave(&priv->txlock, flags);
209 209
210 priv->fifo_threshold = length; 210 priv->fifo_threshold = length;
211 211
@@ -214,7 +214,7 @@ static ssize_t gfar_set_fifo_threshold(struct class_device *cdev,
214 temp |= length; 214 temp |= length;
215 gfar_write(&priv->regs->fifo_tx_thr, temp); 215 gfar_write(&priv->regs->fifo_tx_thr, temp);
216 216
217 spin_unlock_irqrestore(&priv->lock, flags); 217 spin_unlock_irqrestore(&priv->txlock, flags);
218 218
219 return count; 219 return count;
220} 220}
@@ -240,7 +240,7 @@ static ssize_t gfar_set_fifo_starve(struct class_device *cdev,
240 if (num > GFAR_MAX_FIFO_STARVE) 240 if (num > GFAR_MAX_FIFO_STARVE)
241 return count; 241 return count;
242 242
243 spin_lock_irqsave(&priv->lock, flags); 243 spin_lock_irqsave(&priv->txlock, flags);
244 244
245 priv->fifo_starve = num; 245 priv->fifo_starve = num;
246 246
@@ -249,7 +249,7 @@ static ssize_t gfar_set_fifo_starve(struct class_device *cdev,
249 temp |= num; 249 temp |= num;
250 gfar_write(&priv->regs->fifo_tx_starve, temp); 250 gfar_write(&priv->regs->fifo_tx_starve, temp);
251 251
252 spin_unlock_irqrestore(&priv->lock, flags); 252 spin_unlock_irqrestore(&priv->txlock, flags);
253 253
254 return count; 254 return count;
255} 255}
@@ -274,7 +274,7 @@ static ssize_t gfar_set_fifo_starve_off(struct class_device *cdev,
274 if (num > GFAR_MAX_FIFO_STARVE_OFF) 274 if (num > GFAR_MAX_FIFO_STARVE_OFF)
275 return count; 275 return count;
276 276
277 spin_lock_irqsave(&priv->lock, flags); 277 spin_lock_irqsave(&priv->txlock, flags);
278 278
279 priv->fifo_starve_off = num; 279 priv->fifo_starve_off = num;
280 280
@@ -283,7 +283,7 @@ static ssize_t gfar_set_fifo_starve_off(struct class_device *cdev,
283 temp |= num; 283 temp |= num;
284 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp); 284 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp);
285 285
286 spin_unlock_irqrestore(&priv->lock, flags); 286 spin_unlock_irqrestore(&priv->txlock, flags);
287 287
288 return count; 288 return count;
289} 289}
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 79a8fbcf5f93..0d5fccc984bb 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -582,7 +582,6 @@ static int __init setup_adapter(int card_base, int type, int n)
582 INIT_WORK(&priv->rx_work, rx_bh, priv); 582 INIT_WORK(&priv->rx_work, rx_bh, priv);
583 dev->priv = priv; 583 dev->priv = priv;
584 sprintf(dev->name, "dmascc%i", 2 * n + i); 584 sprintf(dev->name, "dmascc%i", 2 * n + i);
585 SET_MODULE_OWNER(dev);
586 dev->base_addr = card_base; 585 dev->base_addr = card_base;
587 dev->irq = irq; 586 dev->irq = irq;
588 dev->open = scc_open; 587 dev->open = scc_open;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 6ace0e914fd1..5927784df3f9 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1550,7 +1550,6 @@ static unsigned char ax25_nocall[AX25_ADDR_LEN] =
1550 1550
1551static void scc_net_setup(struct net_device *dev) 1551static void scc_net_setup(struct net_device *dev)
1552{ 1552{
1553 SET_MODULE_OWNER(dev);
1554 dev->tx_queue_len = 16; /* should be enough... */ 1553 dev->tx_queue_len = 16; /* should be enough... */
1555 1554
1556 dev->open = scc_net_open; 1555 dev->open = scc_net_open;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index fe22479eb202..b49884048caa 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1098,7 +1098,6 @@ static void yam_setup(struct net_device *dev)
1098 1098
1099 dev->base_addr = yp->iobase; 1099 dev->base_addr = yp->iobase;
1100 dev->irq = yp->irq; 1100 dev->irq = yp->irq;
1101 SET_MODULE_OWNER(dev);
1102 1101
1103 dev->open = yam_open; 1102 dev->open = yam_open;
1104 dev->stop = yam_close; 1103 dev->stop = yam_close;
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 5e6d00752990..cff8598aa800 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -33,7 +33,7 @@ config DONGLE
33 33
34config ESI_DONGLE 34config ESI_DONGLE
35 tristate "ESI JetEye PC dongle" 35 tristate "ESI JetEye PC dongle"
36 depends on DONGLE && IRDA 36 depends on IRTTY_SIR && DONGLE && IRDA
37 help 37 help
38 Say Y here if you want to build support for the Extended Systems 38 Say Y here if you want to build support for the Extended Systems
39 JetEye PC dongle. To compile it as a module, choose M here. The ESI 39 JetEye PC dongle. To compile it as a module, choose M here. The ESI
@@ -44,7 +44,7 @@ config ESI_DONGLE
44 44
45config ACTISYS_DONGLE 45config ACTISYS_DONGLE
46 tristate "ACTiSYS IR-220L and IR220L+ dongle" 46 tristate "ACTiSYS IR-220L and IR220L+ dongle"
47 depends on DONGLE && IRDA 47 depends on IRTTY_SIR && DONGLE && IRDA
48 help 48 help
49 Say Y here if you want to build support for the ACTiSYS IR-220L and 49 Say Y here if you want to build support for the ACTiSYS IR-220L and
50 IR220L+ dongles. To compile it as a module, choose M here. The 50 IR220L+ dongles. To compile it as a module, choose M here. The
@@ -55,7 +55,7 @@ config ACTISYS_DONGLE
55 55
56config TEKRAM_DONGLE 56config TEKRAM_DONGLE
57 tristate "Tekram IrMate 210B dongle" 57 tristate "Tekram IrMate 210B dongle"
58 depends on DONGLE && IRDA 58 depends on IRTTY_SIR && DONGLE && IRDA
59 help 59 help
60 Say Y here if you want to build support for the Tekram IrMate 210B 60 Say Y here if you want to build support for the Tekram IrMate 210B
61 dongle. To compile it as a module, choose M here. The Tekram dongle 61 dongle. To compile it as a module, choose M here. The Tekram dongle
@@ -66,7 +66,7 @@ config TEKRAM_DONGLE
66 66
67config TOIM3232_DONGLE 67config TOIM3232_DONGLE
68 tristate "TOIM3232 IrDa dongle" 68 tristate "TOIM3232 IrDa dongle"
69 depends on DONGLE && IRDA 69 depends on IRTTY_SIR && DONGLE && IRDA
70 help 70 help
71 Say Y here if you want to build support for the Vishay/Temic 71 Say Y here if you want to build support for the Vishay/Temic
72 TOIM3232 and TOIM4232 based dongles. 72 TOIM3232 and TOIM4232 based dongles.
@@ -74,7 +74,7 @@ config TOIM3232_DONGLE
74 74
75config LITELINK_DONGLE 75config LITELINK_DONGLE
76 tristate "Parallax LiteLink dongle" 76 tristate "Parallax LiteLink dongle"
77 depends on DONGLE && IRDA 77 depends on IRTTY_SIR && DONGLE && IRDA
78 help 78 help
79 Say Y here if you want to build support for the Parallax Litelink 79 Say Y here if you want to build support for the Parallax Litelink
80 dongle. To compile it as a module, choose M here. The Parallax 80 dongle. To compile it as a module, choose M here. The Parallax
@@ -85,7 +85,7 @@ config LITELINK_DONGLE
85 85
86config MA600_DONGLE 86config MA600_DONGLE
87 tristate "Mobile Action MA600 dongle" 87 tristate "Mobile Action MA600 dongle"
88 depends on DONGLE && IRDA && EXPERIMENTAL 88 depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
89 help 89 help
90 Say Y here if you want to build support for the Mobile Action MA600 90 Say Y here if you want to build support for the Mobile Action MA600
91 dongle. To compile it as a module, choose M here. The MA600 dongle 91 dongle. To compile it as a module, choose M here. The MA600 dongle
@@ -98,7 +98,7 @@ config MA600_DONGLE
98 98
99config GIRBIL_DONGLE 99config GIRBIL_DONGLE
100 tristate "Greenwich GIrBIL dongle" 100 tristate "Greenwich GIrBIL dongle"
101 depends on DONGLE && IRDA && EXPERIMENTAL 101 depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
102 help 102 help
103 Say Y here if you want to build support for the Greenwich GIrBIL 103 Say Y here if you want to build support for the Greenwich GIrBIL
104 dongle. If you want to compile it as a module, choose M here. 104 dongle. If you want to compile it as a module, choose M here.
@@ -109,7 +109,7 @@ config GIRBIL_DONGLE
109 109
110config MCP2120_DONGLE 110config MCP2120_DONGLE
111 tristate "Microchip MCP2120" 111 tristate "Microchip MCP2120"
112 depends on DONGLE && IRDA && EXPERIMENTAL 112 depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
113 help 113 help
114 Say Y here if you want to build support for the Microchip MCP2120 114 Say Y here if you want to build support for the Microchip MCP2120
115 dongle. If you want to compile it as a module, choose M here. 115 dongle. If you want to compile it as a module, choose M here.
@@ -123,7 +123,7 @@ config MCP2120_DONGLE
123 123
124config OLD_BELKIN_DONGLE 124config OLD_BELKIN_DONGLE
125 tristate "Old Belkin dongle" 125 tristate "Old Belkin dongle"
126 depends on DONGLE && IRDA && EXPERIMENTAL 126 depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
127 help 127 help
128 Say Y here if you want to build support for the Adaptec Airport 1000 128 Say Y here if you want to build support for the Adaptec Airport 1000
129 and 2000 dongles. If you want to compile it as a module, choose 129 and 2000 dongles. If you want to compile it as a module, choose
@@ -132,7 +132,7 @@ config OLD_BELKIN_DONGLE
132 132
133config ACT200L_DONGLE 133config ACT200L_DONGLE
134 tristate "ACTiSYS IR-200L dongle" 134 tristate "ACTiSYS IR-200L dongle"
135 depends on DONGLE && IRDA && EXPERIMENTAL 135 depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
136 help 136 help
137 Say Y here if you want to build support for the ACTiSYS IR-200L 137 Say Y here if you want to build support for the ACTiSYS IR-200L
138 dongle. If you want to compile it as a module, choose M here. 138 dongle. If you want to compile it as a module, choose M here.
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 27ab75f20799..c1ce2398efea 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -46,4 +46,4 @@ obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
46obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o 46obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
47 47
48# The SIR helper module 48# The SIR helper module
49sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o 49sir-dev-objs := sir_dev.o sir_dongle.o
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 606243d11793..cd87593e4e8a 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1778,7 +1778,7 @@ static int irda_usb_probe(struct usb_interface *intf,
1778 1778
1779 if (self->needspatch) { 1779 if (self->needspatch) {
1780 ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0), 1780 ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0),
1781 0x02, 0x40, 0, 0, 0, 0, msecs_to_jiffies(500)); 1781 0x02, 0x40, 0, 0, NULL, 0, 500);
1782 if (ret < 0) { 1782 if (ret < 0) {
1783 IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret); 1783 IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret);
1784 goto err_out_3; 1784 goto err_out_3;
@@ -1815,14 +1815,14 @@ static int irda_usb_probe(struct usb_interface *intf,
1815 self->needspatch = (ret < 0); 1815 self->needspatch = (ret < 0);
1816 if (ret < 0) { 1816 if (ret < 0) {
1817 printk("patch_device failed\n"); 1817 printk("patch_device failed\n");
1818 goto err_out_4; 1818 goto err_out_5;
1819 } 1819 }
1820 1820
1821 /* replace IrDA class descriptor with what patched device is now reporting */ 1821 /* replace IrDA class descriptor with what patched device is now reporting */
1822 irda_desc = irda_usb_find_class_desc (self->usbintf); 1822 irda_desc = irda_usb_find_class_desc (self->usbintf);
1823 if (irda_desc == NULL) { 1823 if (irda_desc == NULL) {
1824 ret = -ENODEV; 1824 ret = -ENODEV;
1825 goto err_out_4; 1825 goto err_out_5;
1826 } 1826 }
1827 if (self->irda_desc) 1827 if (self->irda_desc)
1828 kfree (self->irda_desc); 1828 kfree (self->irda_desc);
@@ -1832,6 +1832,8 @@ static int irda_usb_probe(struct usb_interface *intf,
1832 1832
1833 return 0; 1833 return 0;
1834 1834
1835err_out_5:
1836 unregister_netdev(self->netdev);
1835err_out_4: 1837err_out_4:
1836 kfree(self->speed_buff); 1838 kfree(self->speed_buff);
1837err_out_3: 1839err_out_3:
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index f69fb4cec76f..9fa294a546d6 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -15,23 +15,14 @@
15#define IRDA_SIR_H 15#define IRDA_SIR_H
16 16
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/workqueue.h>
18 19
19#include <net/irda/irda.h> 20#include <net/irda/irda.h>
20#include <net/irda/irda_device.h> // iobuff_t 21#include <net/irda/irda_device.h> // iobuff_t
21 22
22/* FIXME: unify irda_request with sir_fsm! */
23
24struct irda_request {
25 struct list_head lh_request;
26 unsigned long pending;
27 void (*func)(void *);
28 void *data;
29 struct timer_list timer;
30};
31
32struct sir_fsm { 23struct sir_fsm {
33 struct semaphore sem; 24 struct semaphore sem;
34 struct irda_request rq; 25 struct work_struct work;
35 unsigned state, substate; 26 unsigned state, substate;
36 int param; 27 int param;
37 int result; 28 int result;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index ea7c9464d46a..3b5854d10c17 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -23,6 +23,298 @@
23 23
24#include "sir-dev.h" 24#include "sir-dev.h"
25 25
26
27static struct workqueue_struct *irda_sir_wq;
28
29/* STATE MACHINE */
30
31/* substate handler of the config-fsm to handle the cases where we want
32 * to wait for transmit completion before changing the port configuration
33 */
34
35static int sirdev_tx_complete_fsm(struct sir_dev *dev)
36{
37 struct sir_fsm *fsm = &dev->fsm;
38 unsigned next_state, delay;
39 unsigned bytes_left;
40
41 do {
42 next_state = fsm->substate; /* default: stay in current substate */
43 delay = 0;
44
45 switch(fsm->substate) {
46
47 case SIRDEV_STATE_WAIT_XMIT:
48 if (dev->drv->chars_in_buffer)
49 bytes_left = dev->drv->chars_in_buffer(dev);
50 else
51 bytes_left = 0;
52 if (!bytes_left) {
53 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
54 break;
55 }
56
57 if (dev->speed > 115200)
58 delay = (bytes_left*8*10000) / (dev->speed/100);
59 else if (dev->speed > 0)
60 delay = (bytes_left*10*10000) / (dev->speed/100);
61 else
62 delay = 0;
63 /* expected delay (usec) until remaining bytes are sent */
64 if (delay < 100) {
65 udelay(delay);
66 delay = 0;
67 break;
68 }
69 /* sleep some longer delay (msec) */
70 delay = (delay+999) / 1000;
71 break;
72
73 case SIRDEV_STATE_WAIT_UNTIL_SENT:
74 /* block until underlaying hardware buffer are empty */
75 if (dev->drv->wait_until_sent)
76 dev->drv->wait_until_sent(dev);
77 next_state = SIRDEV_STATE_TX_DONE;
78 break;
79
80 case SIRDEV_STATE_TX_DONE:
81 return 0;
82
83 default:
84 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
85 return -EINVAL;
86 }
87 fsm->substate = next_state;
88 } while (delay == 0);
89 return delay;
90}
91
92/*
93 * Function sirdev_config_fsm
94 *
95 * State machine to handle the configuration of the device (and attached dongle, if any).
96 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
97 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
98 * long. Instead, for longer delays we start a timer to reschedule us later.
99 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
100 * Both must be unlocked/restarted on completion - but only on final exit.
101 */
102
103static void sirdev_config_fsm(void *data)
104{
105 struct sir_dev *dev = data;
106 struct sir_fsm *fsm = &dev->fsm;
107 int next_state;
108 int ret = -1;
109 unsigned delay;
110
111 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
112
113 do {
114 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
115 __FUNCTION__, fsm->state, fsm->substate);
116
117 next_state = fsm->state;
118 delay = 0;
119
120 switch(fsm->state) {
121
122 case SIRDEV_STATE_DONGLE_OPEN:
123 if (dev->dongle_drv != NULL) {
124 ret = sirdev_put_dongle(dev);
125 if (ret) {
126 fsm->result = -EINVAL;
127 next_state = SIRDEV_STATE_ERROR;
128 break;
129 }
130 }
131
132 /* Initialize dongle */
133 ret = sirdev_get_dongle(dev, fsm->param);
134 if (ret) {
135 fsm->result = ret;
136 next_state = SIRDEV_STATE_ERROR;
137 break;
138 }
139
140 /* Dongles are powered through the modem control lines which
141 * were just set during open. Before resetting, let's wait for
142 * the power to stabilize. This is what some dongle drivers did
143 * in open before, while others didn't - should be safe anyway.
144 */
145
146 delay = 50;
147 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
148 next_state = SIRDEV_STATE_DONGLE_RESET;
149
150 fsm->param = 9600;
151
152 break;
153
154 case SIRDEV_STATE_DONGLE_CLOSE:
155 /* shouldn't we just treat this as success=? */
156 if (dev->dongle_drv == NULL) {
157 fsm->result = -EINVAL;
158 next_state = SIRDEV_STATE_ERROR;
159 break;
160 }
161
162 ret = sirdev_put_dongle(dev);
163 if (ret) {
164 fsm->result = ret;
165 next_state = SIRDEV_STATE_ERROR;
166 break;
167 }
168 next_state = SIRDEV_STATE_DONE;
169 break;
170
171 case SIRDEV_STATE_SET_DTR_RTS:
172 ret = sirdev_set_dtr_rts(dev,
173 (fsm->param&0x02) ? TRUE : FALSE,
174 (fsm->param&0x01) ? TRUE : FALSE);
175 next_state = SIRDEV_STATE_DONE;
176 break;
177
178 case SIRDEV_STATE_SET_SPEED:
179 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
180 next_state = SIRDEV_STATE_DONGLE_CHECK;
181 break;
182
183 case SIRDEV_STATE_DONGLE_CHECK:
184 ret = sirdev_tx_complete_fsm(dev);
185 if (ret < 0) {
186 fsm->result = ret;
187 next_state = SIRDEV_STATE_ERROR;
188 break;
189 }
190 if ((delay=ret) != 0)
191 break;
192
193 if (dev->dongle_drv) {
194 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
195 next_state = SIRDEV_STATE_DONGLE_RESET;
196 }
197 else {
198 dev->speed = fsm->param;
199 next_state = SIRDEV_STATE_PORT_SPEED;
200 }
201 break;
202
203 case SIRDEV_STATE_DONGLE_RESET:
204 if (dev->dongle_drv->reset) {
205 ret = dev->dongle_drv->reset(dev);
206 if (ret < 0) {
207 fsm->result = ret;
208 next_state = SIRDEV_STATE_ERROR;
209 break;
210 }
211 }
212 else
213 ret = 0;
214 if ((delay=ret) == 0) {
215 /* set serial port according to dongle default speed */
216 if (dev->drv->set_speed)
217 dev->drv->set_speed(dev, dev->speed);
218 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
219 next_state = SIRDEV_STATE_DONGLE_SPEED;
220 }
221 break;
222
223 case SIRDEV_STATE_DONGLE_SPEED:
224 if (dev->dongle_drv->reset) {
225 ret = dev->dongle_drv->set_speed(dev, fsm->param);
226 if (ret < 0) {
227 fsm->result = ret;
228 next_state = SIRDEV_STATE_ERROR;
229 break;
230 }
231 }
232 else
233 ret = 0;
234 if ((delay=ret) == 0)
235 next_state = SIRDEV_STATE_PORT_SPEED;
236 break;
237
238 case SIRDEV_STATE_PORT_SPEED:
239 /* Finally we are ready to change the serial port speed */
240 if (dev->drv->set_speed)
241 dev->drv->set_speed(dev, dev->speed);
242 dev->new_speed = 0;
243 next_state = SIRDEV_STATE_DONE;
244 break;
245
246 case SIRDEV_STATE_DONE:
247 /* Signal network layer so it can send more frames */
248 netif_wake_queue(dev->netdev);
249 next_state = SIRDEV_STATE_COMPLETE;
250 break;
251
252 default:
253 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
254 fsm->result = -EINVAL;
255 /* fall thru */
256
257 case SIRDEV_STATE_ERROR:
258 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
259
260#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
261 netif_stop_queue(dev->netdev);
262#else
263 netif_wake_queue(dev->netdev);
264#endif
265 /* fall thru */
266
267 case SIRDEV_STATE_COMPLETE:
268 /* config change finished, so we are not busy any longer */
269 sirdev_enable_rx(dev);
270 up(&fsm->sem);
271 return;
272 }
273 fsm->state = next_state;
274 } while(!delay);
275
276 queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
277}
278
279/* schedule some device configuration task for execution by kIrDAd
280 * on behalf of the above state machine.
281 * can be called from process or interrupt/tasklet context.
282 */
283
284int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
285{
286 struct sir_fsm *fsm = &dev->fsm;
287
288 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
289
290 if (down_trylock(&fsm->sem)) {
291 if (in_interrupt() || in_atomic() || irqs_disabled()) {
292 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
293 return -EWOULDBLOCK;
294 } else
295 down(&fsm->sem);
296 }
297
298 if (fsm->state == SIRDEV_STATE_DEAD) {
299 /* race with sirdev_close should never happen */
300 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
301 up(&fsm->sem);
302 return -ESTALE; /* or better EPIPE? */
303 }
304
305 netif_stop_queue(dev->netdev);
306 atomic_set(&dev->enable_rx, 0);
307
308 fsm->state = initial_state;
309 fsm->param = param;
310 fsm->result = 0;
311
312 INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
313 queue_work(irda_sir_wq, &fsm->work);
314 return 0;
315}
316
317
26/***************************************************************************/ 318/***************************************************************************/
27 319
28void sirdev_enable_rx(struct sir_dev *dev) 320void sirdev_enable_rx(struct sir_dev *dev)
@@ -619,10 +911,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
619 spin_lock_init(&dev->tx_lock); 911 spin_lock_init(&dev->tx_lock);
620 init_MUTEX(&dev->fsm.sem); 912 init_MUTEX(&dev->fsm.sem);
621 913
622 INIT_LIST_HEAD(&dev->fsm.rq.lh_request);
623 dev->fsm.rq.pending = 0;
624 init_timer(&dev->fsm.rq.timer);
625
626 dev->drv = drv; 914 dev->drv = drv;
627 dev->netdev = ndev; 915 dev->netdev = ndev;
628 916
@@ -682,3 +970,22 @@ int sirdev_put_instance(struct sir_dev *dev)
682} 970}
683EXPORT_SYMBOL(sirdev_put_instance); 971EXPORT_SYMBOL(sirdev_put_instance);
684 972
973static int __init sir_wq_init(void)
974{
975 irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
976 if (!irda_sir_wq)
977 return -ENOMEM;
978 return 0;
979}
980
981static void __exit sir_wq_exit(void)
982{
983 destroy_workqueue(irda_sir_wq);
984}
985
986module_init(sir_wq_init);
987module_exit(sir_wq_exit);
988
989MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
990MODULE_DESCRIPTION("IrDA SIR core");
991MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c
deleted file mode 100644
index e3904d6bfecd..000000000000
--- a/drivers/net/irda/sir_kthread.c
+++ /dev/null
@@ -1,508 +0,0 @@
1/*********************************************************************
2 *
3 * sir_kthread.c: dedicated thread to process scheduled
4 * sir device setup requests
5 *
6 * Copyright (c) 2002 Martin Diehl
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 ********************************************************************/
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/version.h>
18#include <linux/init.h>
19#include <linux/smp_lock.h>
20#include <linux/completion.h>
21#include <linux/delay.h>
22
23#include <net/irda/irda.h>
24
25#include "sir-dev.h"
26
27/**************************************************************************
28 *
29 * kIrDAd kernel thread and config state machine
30 *
31 */
32
33struct irda_request_queue {
34 struct list_head request_list;
35 spinlock_t lock;
36 task_t *thread;
37 struct completion exit;
38 wait_queue_head_t kick, done;
39 atomic_t num_pending;
40};
41
42static struct irda_request_queue irda_rq_queue;
43
44static int irda_queue_request(struct irda_request *rq)
45{
46 int ret = 0;
47 unsigned long flags;
48
49 if (!test_and_set_bit(0, &rq->pending)) {
50 spin_lock_irqsave(&irda_rq_queue.lock, flags);
51 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
52 wake_up(&irda_rq_queue.kick);
53 atomic_inc(&irda_rq_queue.num_pending);
54 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
55 ret = 1;
56 }
57 return ret;
58}
59
60static void irda_request_timer(unsigned long data)
61{
62 struct irda_request *rq = (struct irda_request *)data;
63 unsigned long flags;
64
65 spin_lock_irqsave(&irda_rq_queue.lock, flags);
66 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
67 wake_up(&irda_rq_queue.kick);
68 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
69}
70
71static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
72{
73 int ret = 0;
74 struct timer_list *timer = &rq->timer;
75
76 if (!test_and_set_bit(0, &rq->pending)) {
77 timer->expires = jiffies + delay;
78 timer->function = irda_request_timer;
79 timer->data = (unsigned long)rq;
80 atomic_inc(&irda_rq_queue.num_pending);
81 add_timer(timer);
82 ret = 1;
83 }
84 return ret;
85}
86
87static void run_irda_queue(void)
88{
89 unsigned long flags;
90 struct list_head *entry, *tmp;
91 struct irda_request *rq;
92
93 spin_lock_irqsave(&irda_rq_queue.lock, flags);
94 list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
95 rq = list_entry(entry, struct irda_request, lh_request);
96 list_del_init(entry);
97 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
98
99 clear_bit(0, &rq->pending);
100 rq->func(rq->data);
101
102 if (atomic_dec_and_test(&irda_rq_queue.num_pending))
103 wake_up(&irda_rq_queue.done);
104
105 spin_lock_irqsave(&irda_rq_queue.lock, flags);
106 }
107 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
108}
109
110static int irda_thread(void *startup)
111{
112 DECLARE_WAITQUEUE(wait, current);
113
114 daemonize("kIrDAd");
115
116 irda_rq_queue.thread = current;
117
118 complete((struct completion *)startup);
119
120 while (irda_rq_queue.thread != NULL) {
121
122 /* We use TASK_INTERRUPTIBLE, rather than
123 * TASK_UNINTERRUPTIBLE. Andrew Morton made this
124 * change ; he told me that it is safe, because "signal
125 * blocking is now handled in daemonize()", he added
126 * that the problem is that "uninterruptible sleep
127 * contributes to load average", making user worry.
128 * Jean II */
129 set_task_state(current, TASK_INTERRUPTIBLE);
130 add_wait_queue(&irda_rq_queue.kick, &wait);
131 if (list_empty(&irda_rq_queue.request_list))
132 schedule();
133 else
134 __set_task_state(current, TASK_RUNNING);
135 remove_wait_queue(&irda_rq_queue.kick, &wait);
136
137 /* make swsusp happy with our thread */
138 try_to_freeze();
139
140 run_irda_queue();
141 }
142
143#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
144 reparent_to_init();
145#endif
146 complete_and_exit(&irda_rq_queue.exit, 0);
147 /* never reached */
148 return 0;
149}
150
151
152static void flush_irda_queue(void)
153{
154 if (atomic_read(&irda_rq_queue.num_pending)) {
155
156 DECLARE_WAITQUEUE(wait, current);
157
158 if (!list_empty(&irda_rq_queue.request_list))
159 run_irda_queue();
160
161 set_task_state(current, TASK_UNINTERRUPTIBLE);
162 add_wait_queue(&irda_rq_queue.done, &wait);
163 if (atomic_read(&irda_rq_queue.num_pending))
164 schedule();
165 else
166 __set_task_state(current, TASK_RUNNING);
167 remove_wait_queue(&irda_rq_queue.done, &wait);
168 }
169}
170
171/* substate handler of the config-fsm to handle the cases where we want
172 * to wait for transmit completion before changing the port configuration
173 */
174
175static int irda_tx_complete_fsm(struct sir_dev *dev)
176{
177 struct sir_fsm *fsm = &dev->fsm;
178 unsigned next_state, delay;
179 unsigned bytes_left;
180
181 do {
182 next_state = fsm->substate; /* default: stay in current substate */
183 delay = 0;
184
185 switch(fsm->substate) {
186
187 case SIRDEV_STATE_WAIT_XMIT:
188 if (dev->drv->chars_in_buffer)
189 bytes_left = dev->drv->chars_in_buffer(dev);
190 else
191 bytes_left = 0;
192 if (!bytes_left) {
193 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
194 break;
195 }
196
197 if (dev->speed > 115200)
198 delay = (bytes_left*8*10000) / (dev->speed/100);
199 else if (dev->speed > 0)
200 delay = (bytes_left*10*10000) / (dev->speed/100);
201 else
202 delay = 0;
203 /* expected delay (usec) until remaining bytes are sent */
204 if (delay < 100) {
205 udelay(delay);
206 delay = 0;
207 break;
208 }
209 /* sleep some longer delay (msec) */
210 delay = (delay+999) / 1000;
211 break;
212
213 case SIRDEV_STATE_WAIT_UNTIL_SENT:
214 /* block until underlaying hardware buffer are empty */
215 if (dev->drv->wait_until_sent)
216 dev->drv->wait_until_sent(dev);
217 next_state = SIRDEV_STATE_TX_DONE;
218 break;
219
220 case SIRDEV_STATE_TX_DONE:
221 return 0;
222
223 default:
224 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
225 return -EINVAL;
226 }
227 fsm->substate = next_state;
228 } while (delay == 0);
229 return delay;
230}
231
232/*
233 * Function irda_config_fsm
234 *
235 * State machine to handle the configuration of the device (and attached dongle, if any).
236 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
237 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
238 * long. Instead, for longer delays we start a timer to reschedule us later.
239 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
240 * Both must be unlocked/restarted on completion - but only on final exit.
241 */
242
243static void irda_config_fsm(void *data)
244{
245 struct sir_dev *dev = data;
246 struct sir_fsm *fsm = &dev->fsm;
247 int next_state;
248 int ret = -1;
249 unsigned delay;
250
251 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
252
253 do {
254 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
255 __FUNCTION__, fsm->state, fsm->substate);
256
257 next_state = fsm->state;
258 delay = 0;
259
260 switch(fsm->state) {
261
262 case SIRDEV_STATE_DONGLE_OPEN:
263 if (dev->dongle_drv != NULL) {
264 ret = sirdev_put_dongle(dev);
265 if (ret) {
266 fsm->result = -EINVAL;
267 next_state = SIRDEV_STATE_ERROR;
268 break;
269 }
270 }
271
272 /* Initialize dongle */
273 ret = sirdev_get_dongle(dev, fsm->param);
274 if (ret) {
275 fsm->result = ret;
276 next_state = SIRDEV_STATE_ERROR;
277 break;
278 }
279
280 /* Dongles are powered through the modem control lines which
281 * were just set during open. Before resetting, let's wait for
282 * the power to stabilize. This is what some dongle drivers did
283 * in open before, while others didn't - should be safe anyway.
284 */
285
286 delay = 50;
287 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
288 next_state = SIRDEV_STATE_DONGLE_RESET;
289
290 fsm->param = 9600;
291
292 break;
293
294 case SIRDEV_STATE_DONGLE_CLOSE:
295 /* shouldn't we just treat this as success=? */
296 if (dev->dongle_drv == NULL) {
297 fsm->result = -EINVAL;
298 next_state = SIRDEV_STATE_ERROR;
299 break;
300 }
301
302 ret = sirdev_put_dongle(dev);
303 if (ret) {
304 fsm->result = ret;
305 next_state = SIRDEV_STATE_ERROR;
306 break;
307 }
308 next_state = SIRDEV_STATE_DONE;
309 break;
310
311 case SIRDEV_STATE_SET_DTR_RTS:
312 ret = sirdev_set_dtr_rts(dev,
313 (fsm->param&0x02) ? TRUE : FALSE,
314 (fsm->param&0x01) ? TRUE : FALSE);
315 next_state = SIRDEV_STATE_DONE;
316 break;
317
318 case SIRDEV_STATE_SET_SPEED:
319 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
320 next_state = SIRDEV_STATE_DONGLE_CHECK;
321 break;
322
323 case SIRDEV_STATE_DONGLE_CHECK:
324 ret = irda_tx_complete_fsm(dev);
325 if (ret < 0) {
326 fsm->result = ret;
327 next_state = SIRDEV_STATE_ERROR;
328 break;
329 }
330 if ((delay=ret) != 0)
331 break;
332
333 if (dev->dongle_drv) {
334 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
335 next_state = SIRDEV_STATE_DONGLE_RESET;
336 }
337 else {
338 dev->speed = fsm->param;
339 next_state = SIRDEV_STATE_PORT_SPEED;
340 }
341 break;
342
343 case SIRDEV_STATE_DONGLE_RESET:
344 if (dev->dongle_drv->reset) {
345 ret = dev->dongle_drv->reset(dev);
346 if (ret < 0) {
347 fsm->result = ret;
348 next_state = SIRDEV_STATE_ERROR;
349 break;
350 }
351 }
352 else
353 ret = 0;
354 if ((delay=ret) == 0) {
355 /* set serial port according to dongle default speed */
356 if (dev->drv->set_speed)
357 dev->drv->set_speed(dev, dev->speed);
358 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
359 next_state = SIRDEV_STATE_DONGLE_SPEED;
360 }
361 break;
362
363 case SIRDEV_STATE_DONGLE_SPEED:
364 if (dev->dongle_drv->reset) {
365 ret = dev->dongle_drv->set_speed(dev, fsm->param);
366 if (ret < 0) {
367 fsm->result = ret;
368 next_state = SIRDEV_STATE_ERROR;
369 break;
370 }
371 }
372 else
373 ret = 0;
374 if ((delay=ret) == 0)
375 next_state = SIRDEV_STATE_PORT_SPEED;
376 break;
377
378 case SIRDEV_STATE_PORT_SPEED:
379 /* Finally we are ready to change the serial port speed */
380 if (dev->drv->set_speed)
381 dev->drv->set_speed(dev, dev->speed);
382 dev->new_speed = 0;
383 next_state = SIRDEV_STATE_DONE;
384 break;
385
386 case SIRDEV_STATE_DONE:
387 /* Signal network layer so it can send more frames */
388 netif_wake_queue(dev->netdev);
389 next_state = SIRDEV_STATE_COMPLETE;
390 break;
391
392 default:
393 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
394 fsm->result = -EINVAL;
395 /* fall thru */
396
397 case SIRDEV_STATE_ERROR:
398 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
399
400#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
401 netif_stop_queue(dev->netdev);
402#else
403 netif_wake_queue(dev->netdev);
404#endif
405 /* fall thru */
406
407 case SIRDEV_STATE_COMPLETE:
408 /* config change finished, so we are not busy any longer */
409 sirdev_enable_rx(dev);
410 up(&fsm->sem);
411 return;
412 }
413 fsm->state = next_state;
414 } while(!delay);
415
416 irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
417}
418
419/* schedule some device configuration task for execution by kIrDAd
420 * on behalf of the above state machine.
421 * can be called from process or interrupt/tasklet context.
422 */
423
424int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
425{
426 struct sir_fsm *fsm = &dev->fsm;
427 int xmit_was_down;
428
429 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
430
431 if (down_trylock(&fsm->sem)) {
432 if (in_interrupt() || in_atomic() || irqs_disabled()) {
433 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
434 return -EWOULDBLOCK;
435 } else
436 down(&fsm->sem);
437 }
438
439 if (fsm->state == SIRDEV_STATE_DEAD) {
440 /* race with sirdev_close should never happen */
441 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
442 up(&fsm->sem);
443 return -ESTALE; /* or better EPIPE? */
444 }
445
446 xmit_was_down = netif_queue_stopped(dev->netdev);
447 netif_stop_queue(dev->netdev);
448 atomic_set(&dev->enable_rx, 0);
449
450 fsm->state = initial_state;
451 fsm->param = param;
452 fsm->result = 0;
453
454 INIT_LIST_HEAD(&fsm->rq.lh_request);
455 fsm->rq.pending = 0;
456 fsm->rq.func = irda_config_fsm;
457 fsm->rq.data = dev;
458
459 if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */
460 atomic_set(&dev->enable_rx, 1);
461 if (!xmit_was_down)
462 netif_wake_queue(dev->netdev);
463 up(&fsm->sem);
464 return -EAGAIN;
465 }
466 return 0;
467}
468
469static int __init irda_thread_create(void)
470{
471 struct completion startup;
472 int pid;
473
474 spin_lock_init(&irda_rq_queue.lock);
475 irda_rq_queue.thread = NULL;
476 INIT_LIST_HEAD(&irda_rq_queue.request_list);
477 init_waitqueue_head(&irda_rq_queue.kick);
478 init_waitqueue_head(&irda_rq_queue.done);
479 atomic_set(&irda_rq_queue.num_pending, 0);
480
481 init_completion(&startup);
482 pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
483 if (pid <= 0)
484 return -EAGAIN;
485 else
486 wait_for_completion(&startup);
487
488 return 0;
489}
490
491static void __exit irda_thread_join(void)
492{
493 if (irda_rq_queue.thread) {
494 flush_irda_queue();
495 init_completion(&irda_rq_queue.exit);
496 irda_rq_queue.thread = NULL;
497 wake_up(&irda_rq_queue.kick);
498 wait_for_completion(&irda_rq_queue.exit);
499 }
500}
501
502module_init(irda_thread_create);
503module_exit(irda_thread_join);
504
505MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
506MODULE_DESCRIPTION("IrDA SIR core");
507MODULE_LICENSE("GPL");
508
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index bbcfc8ec35a1..a4674044bd6f 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -54,6 +54,7 @@
54#include <linux/rtnetlink.h> 54#include <linux/rtnetlink.h>
55#include <linux/serial_reg.h> 55#include <linux/serial_reg.h>
56#include <linux/dma-mapping.h> 56#include <linux/dma-mapping.h>
57#include <linux/pnp.h>
57#include <linux/platform_device.h> 58#include <linux/platform_device.h>
58 59
59#include <asm/io.h> 60#include <asm/io.h>
@@ -225,6 +226,8 @@ static int __init smsc_superio_lpc(unsigned short cfg_base);
225#ifdef CONFIG_PCI 226#ifdef CONFIG_PCI
226static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf); 227static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
227static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); 228static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
229static void __init preconfigure_ali_port(struct pci_dev *dev,
230 unsigned short port);
228static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); 231static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
229static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, 232static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
230 unsigned short ircc_fir, 233 unsigned short ircc_fir,
@@ -356,6 +359,16 @@ static inline void register_bank(int iobase, int bank)
356 iobase + IRCC_MASTER); 359 iobase + IRCC_MASTER);
357} 360}
358 361
362#ifdef CONFIG_PNP
363/* PNP hotplug support */
364static const struct pnp_device_id smsc_ircc_pnp_table[] = {
365 { .id = "SMCf010", .driver_data = 0 },
366 /* and presumably others */
367 { }
368};
369MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
370#endif
371
359 372
360/******************************************************************************* 373/*******************************************************************************
361 * 374 *
@@ -2070,7 +2083,8 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
2070 2083
2071/* PROBING 2084/* PROBING
2072 * 2085 *
2073 * 2086 * REVISIT we can be told about the device by PNP, and should use that info
2087 * instead of probing hardware and creating a platform_device ...
2074 */ 2088 */
2075 2089
2076static int __init smsc_ircc_look_for_chips(void) 2090static int __init smsc_ircc_look_for_chips(void)
@@ -2327,9 +2341,14 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
2327 * pre-configuration not properly done by the BIOS (especially laptops) 2341 * pre-configuration not properly done by the BIOS (especially laptops)
2328 * This code is based in part on smcinit.c, tosh1800-smcinit.c 2342 * This code is based in part on smcinit.c, tosh1800-smcinit.c
2329 * and tosh2450-smcinit.c. The table lists the device entries 2343 * and tosh2450-smcinit.c. The table lists the device entries
2330 * for ISA bridges with an LPC (Local Peripheral Configurator) 2344 * for ISA bridges with an LPC (Low Pin Count) controller which
2331 * that are in turn used to configure the SMSC device with default 2345 * handles the communication with the SMSC device. After the LPC
2332 * SIR and FIR I/O ports, DMA and IRQ. 2346 * controller is initialized through PCI, the SMSC device is initialized
2347 * through a dedicated port in the ISA port-mapped I/O area, this latter
2348 * area is used to configure the SMSC device with default
2349 * SIR and FIR I/O ports, DMA and IRQ. Different vendors have
2350 * used different sets of parameters and different control port
2351 * addresses making a subsystem device table necessary.
2333 */ 2352 */
2334#ifdef CONFIG_PCI 2353#ifdef CONFIG_PCI
2335#define PCIID_VENDOR_INTEL 0x8086 2354#define PCIID_VENDOR_INTEL 0x8086
@@ -2340,9 +2359,10 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __dev
2340 .device = 0x24cc, 2359 .device = 0x24cc,
2341 .subvendor = 0x103c, 2360 .subvendor = 0x103c,
2342 .subdevice = 0x088c, 2361 .subdevice = 0x088c,
2343 .sir_io = 0x02f8, /* Quite certain these are the same for nc8000 as for nc6000 */ 2362 /* Quite certain these are the same for nc8000 as for nc6000 */
2363 .sir_io = 0x02f8,
2344 .fir_io = 0x0130, 2364 .fir_io = 0x0130,
2345 .fir_irq = 0x09, 2365 .fir_irq = 0x05,
2346 .fir_dma = 0x03, 2366 .fir_dma = 0x03,
2347 .cfg_base = 0x004e, 2367 .cfg_base = 0x004e,
2348 .preconfigure = preconfigure_through_82801, 2368 .preconfigure = preconfigure_through_82801,
@@ -2355,60 +2375,79 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __dev
2355 .subdevice = 0x0890, 2375 .subdevice = 0x0890,
2356 .sir_io = 0x02f8, 2376 .sir_io = 0x02f8,
2357 .fir_io = 0x0130, 2377 .fir_io = 0x0130,
2358 .fir_irq = 0x09, 2378 .fir_irq = 0x05,
2359 .fir_dma = 0x03, 2379 .fir_dma = 0x03,
2360 .cfg_base = 0x004e, 2380 .cfg_base = 0x004e,
2361 .preconfigure = preconfigure_through_82801, 2381 .preconfigure = preconfigure_through_82801,
2362 .name = "HP nc6000", 2382 .name = "HP nc6000",
2363 }, 2383 },
2364 { 2384 {
2365 .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */ 2385 /* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */
2386 .vendor = PCIID_VENDOR_INTEL,
2366 .device = 0x24c0, 2387 .device = 0x24c0,
2367 .subvendor = 0x1179, 2388 .subvendor = 0x1179,
2368 .subdevice = 0xffff, /* 0xffff is "any", Not sure, 0x0001 or 0x0002 */ 2389 .subdevice = 0xffff, /* 0xffff is "any" */
2369 .sir_io = 0x03f8, 2390 .sir_io = 0x03f8,
2370 .fir_io = 0x0130, 2391 .fir_io = 0x0130,
2371 .fir_irq = 0x07, 2392 .fir_irq = 0x07,
2372 .fir_dma = 0x01, 2393 .fir_dma = 0x01,
2373 .cfg_base = 0x002e, 2394 .cfg_base = 0x002e,
2374 .preconfigure = preconfigure_through_82801, 2395 .preconfigure = preconfigure_through_82801,
2375 .name = "Toshiba Satellite 2450", 2396 .name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge",
2376 }, 2397 },
2377 { 2398 {
2378 .vendor = PCIID_VENDOR_INTEL, /* Intel 82801CAM ISA bridge */ 2399 .vendor = PCIID_VENDOR_INTEL, /* Intel 82801CAM ISA bridge */
2379 .device = 0x248c, /* Some use 24cc? */ 2400 .device = 0x248c,
2401 .subvendor = 0x1179,
2402 .subdevice = 0xffff, /* 0xffff is "any" */
2403 .sir_io = 0x03f8,
2404 .fir_io = 0x0130,
2405 .fir_irq = 0x03,
2406 .fir_dma = 0x03,
2407 .cfg_base = 0x002e,
2408 .preconfigure = preconfigure_through_82801,
2409 .name = "Toshiba laptop with Intel 82801CAM ISA bridge",
2410 },
2411 {
2412 /* 82801DBM (ICH4-M) LPC Interface Bridge */
2413 .vendor = PCIID_VENDOR_INTEL,
2414 .device = 0x24cc,
2380 .subvendor = 0x1179, 2415 .subvendor = 0x1179,
2381 .subdevice = 0xffff, /* 0xffff is "any", Not sure, 0x0001 or 0x0002 */ 2416 .subdevice = 0xffff, /* 0xffff is "any" */
2382 .sir_io = 0x03f8, 2417 .sir_io = 0x03f8,
2383 .fir_io = 0x0130, 2418 .fir_io = 0x0130,
2384 .fir_irq = 0x03, 2419 .fir_irq = 0x03,
2385 .fir_dma = 0x03, 2420 .fir_dma = 0x03,
2386 .cfg_base = 0x002e, 2421 .cfg_base = 0x002e,
2387 .preconfigure = preconfigure_through_82801, 2422 .preconfigure = preconfigure_through_82801,
2388 .name = "Toshiba Satellite 5100/5200, Tecra 9100", 2423 .name = "Toshiba laptop with Intel 8281DBM LPC bridge",
2389 }, 2424 },
2390 { 2425 {
2391 .vendor = PCIID_VENDOR_ALI, /* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */ 2426 /* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */
2427 .vendor = PCIID_VENDOR_ALI,
2392 .device = 0x1533, 2428 .device = 0x1533,
2393 .subvendor = 0x1179, 2429 .subvendor = 0x1179,
2394 .subdevice = 0xffff, /* 0xffff is "any", Not sure, 0x0001 or 0x0002 */ 2430 .subdevice = 0xffff, /* 0xffff is "any" */
2395 .sir_io = 0x02e8, 2431 .sir_io = 0x02e8,
2396 .fir_io = 0x02f8, 2432 .fir_io = 0x02f8,
2397 .fir_irq = 0x07, 2433 .fir_irq = 0x07,
2398 .fir_dma = 0x03, 2434 .fir_dma = 0x03,
2399 .cfg_base = 0x002e, 2435 .cfg_base = 0x002e,
2400 .preconfigure = preconfigure_through_ali, 2436 .preconfigure = preconfigure_through_ali,
2401 .name = "Toshiba Satellite 1800", 2437 .name = "Toshiba laptop with ALi ISA bridge",
2402 }, 2438 },
2403 { } // Terminator 2439 { } // Terminator
2404}; 2440};
2405 2441
2406 2442
2407/* 2443/*
2408 * This sets up the basic SMSC parameters (FIR port, SIR port, FIR DMA, FIR IRQ) 2444 * This sets up the basic SMSC parameters
2445 * (FIR port, SIR port, FIR DMA, FIR IRQ)
2409 * through the chip configuration port. 2446 * through the chip configuration port.
2410 */ 2447 */
2411static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf) 2448static int __init preconfigure_smsc_chip(struct
2449 smsc_ircc_subsystem_configuration
2450 *conf)
2412{ 2451{
2413 unsigned short iobase = conf->cfg_base; 2452 unsigned short iobase = conf->cfg_base;
2414 unsigned char tmpbyte; 2453 unsigned char tmpbyte;
@@ -2416,7 +2455,9 @@ static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuratio
2416 outb(LPC47N227_CFGACCESSKEY, iobase); // enter configuration state 2455 outb(LPC47N227_CFGACCESSKEY, iobase); // enter configuration state
2417 outb(SMSCSIOFLAT_DEVICEID_REG, iobase); // set for device ID 2456 outb(SMSCSIOFLAT_DEVICEID_REG, iobase); // set for device ID
2418 tmpbyte = inb(iobase +1); // Read device ID 2457 tmpbyte = inb(iobase +1); // Read device ID
2419 IRDA_DEBUG(0, "Detected Chip id: 0x%02x, setting up registers...\n",tmpbyte); 2458 IRDA_DEBUG(0,
2459 "Detected Chip id: 0x%02x, setting up registers...\n",
2460 tmpbyte);
2420 2461
2421 /* Disable UART1 and set up SIR I/O port */ 2462 /* Disable UART1 and set up SIR I/O port */
2422 outb(0x24, iobase); // select CR24 - UART1 base addr 2463 outb(0x24, iobase); // select CR24 - UART1 base addr
@@ -2426,6 +2467,7 @@ static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuratio
2426 tmpbyte = inb(iobase + 1); 2467 tmpbyte = inb(iobase + 1);
2427 if (tmpbyte != (conf->sir_io >> 2) ) { 2468 if (tmpbyte != (conf->sir_io >> 2) ) {
2428 IRDA_WARNING("ERROR: could not configure SIR ioport.\n"); 2469 IRDA_WARNING("ERROR: could not configure SIR ioport.\n");
2470 IRDA_WARNING("Try to supply ircc_cfg argument.\n");
2429 return -ENXIO; 2471 return -ENXIO;
2430 } 2472 }
2431 2473
@@ -2461,7 +2503,8 @@ static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuratio
2461 2503
2462 outb(SMSCSIOFLAT_UARTMODE0C_REG, iobase); // CR0C - UART mode 2504 outb(SMSCSIOFLAT_UARTMODE0C_REG, iobase); // CR0C - UART mode
2463 tmpbyte = inb(iobase + 1); 2505 tmpbyte = inb(iobase + 1);
2464 tmpbyte &= ~SMSCSIOFLAT_UART2MODE_MASK | SMSCSIOFLAT_UART2MODE_VAL_IRDA; 2506 tmpbyte &= ~SMSCSIOFLAT_UART2MODE_MASK |
2507 SMSCSIOFLAT_UART2MODE_VAL_IRDA;
2465 outb(tmpbyte, iobase + 1); // enable IrDA (HPSIR) mode, high speed 2508 outb(tmpbyte, iobase + 1); // enable IrDA (HPSIR) mode, high speed
2466 2509
2467 outb(LPC47N227_APMBOOTDRIVE_REG, iobase); // CR07 - Auto Pwr Mgt/boot drive sel 2510 outb(LPC47N227_APMBOOTDRIVE_REG, iobase); // CR07 - Auto Pwr Mgt/boot drive sel
@@ -2486,53 +2529,226 @@ static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuratio
2486 return 0; 2529 return 0;
2487} 2530}
2488 2531
2489/* 82801CAM registers */ 2532/* 82801CAM generic registers */
2490#define VID 0x00 2533#define VID 0x00
2491#define DID 0x02 2534#define DID 0x02
2492#define PIRQA_ROUT 0x60 2535#define PIRQ_A_D_ROUT 0x60
2536#define SIRQ_CNTL 0x64
2537#define PIRQ_E_H_ROUT 0x68
2493#define PCI_DMA_C 0x90 2538#define PCI_DMA_C 0x90
2539/* LPC-specific registers */
2494#define COM_DEC 0xe0 2540#define COM_DEC 0xe0
2541#define GEN1_DEC 0xe4
2495#define LPC_EN 0xe6 2542#define LPC_EN 0xe6
2496#define GEN2_DEC 0xec 2543#define GEN2_DEC 0xec
2497/* 2544/*
2498 * Sets up the I/O range using the 82801CAM ISA bridge, 82801DBM LPC bridge or 2545 * Sets up the I/O range using the 82801CAM ISA bridge, 82801DBM LPC bridge
2499 * Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge. They all work the same way! 2546 * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
2547 * They all work the same way!
2500 */ 2548 */
2501static int __init preconfigure_through_82801(struct pci_dev *dev, 2549static int __init preconfigure_through_82801(struct pci_dev *dev,
2502 struct smsc_ircc_subsystem_configuration *conf) 2550 struct
2551 smsc_ircc_subsystem_configuration
2552 *conf)
2503{ 2553{
2504 unsigned short tmpword; 2554 unsigned short tmpword;
2505 int ret; 2555 unsigned char tmpbyte;
2506 2556
2507 IRDA_MESSAGE("Setting up the SMSC device via the 82801 controller.\n"); 2557 IRDA_MESSAGE("Setting up Intel 82801 controller and SMSC device\n");
2508 pci_write_config_byte(dev, COM_DEC, 0x10); 2558 /*
2559 * Select the range for the COMA COM port (SIR)
2560 * Register COM_DEC:
2561 * Bit 7: reserved
2562 * Bit 6-4, COMB decode range
2563 * Bit 3: reserved
2564 * Bit 2-0, COMA decode range
2565 *
2566 * Decode ranges:
2567 * 000 = 0x3f8-0x3ff (COM1)
2568 * 001 = 0x2f8-0x2ff (COM2)
2569 * 010 = 0x220-0x227
2570 * 011 = 0x228-0x22f
2571 * 100 = 0x238-0x23f
2572 * 101 = 0x2e8-0x2ef (COM4)
2573 * 110 = 0x338-0x33f
2574 * 111 = 0x3e8-0x3ef (COM3)
2575 */
2576 pci_read_config_byte(dev, COM_DEC, &tmpbyte);
2577 tmpbyte &= 0xf8; /* mask COMA bits */
2578 switch(conf->sir_io) {
2579 case 0x3f8:
2580 tmpbyte |= 0x00;
2581 break;
2582 case 0x2f8:
2583 tmpbyte |= 0x01;
2584 break;
2585 case 0x220:
2586 tmpbyte |= 0x02;
2587 break;
2588 case 0x228:
2589 tmpbyte |= 0x03;
2590 break;
2591 case 0x238:
2592 tmpbyte |= 0x04;
2593 break;
2594 case 0x2e8:
2595 tmpbyte |= 0x05;
2596 break;
2597 case 0x338:
2598 tmpbyte |= 0x06;
2599 break;
2600 case 0x3e8:
2601 tmpbyte |= 0x07;
2602 break;
2603 default:
2604 tmpbyte |= 0x01; /* COM2 default */
2605 }
2606 IRDA_DEBUG(1, "COM_DEC (write): 0x%02x\n", tmpbyte);
2607 pci_write_config_byte(dev, COM_DEC, tmpbyte);
2509 2608
2510 /* Enable LPC */ 2609 /* Enable Low Pin Count interface */
2511 pci_read_config_word(dev, LPC_EN, &tmpword); /* LPC_EN register */ 2610 pci_read_config_word(dev, LPC_EN, &tmpword);
2512 tmpword &= 0xfffd; /* mask bit 1 */ 2611 /* These seem to be set up at all times,
2513 tmpword |= 0x0001; /* set bit 0 : COMA addr range enable */ 2612 * just make sure it is properly set.
2613 */
2614 switch(conf->cfg_base) {
2615 case 0x04e:
2616 tmpword |= 0x2000;
2617 break;
2618 case 0x02e:
2619 tmpword |= 0x1000;
2620 break;
2621 case 0x062:
2622 tmpword |= 0x0800;
2623 break;
2624 case 0x060:
2625 tmpword |= 0x0400;
2626 break;
2627 default:
2628 IRDA_WARNING("Uncommon I/O base address: 0x%04x\n",
2629 conf->cfg_base);
2630 break;
2631 }
2632 tmpword &= 0xfffd; /* disable LPC COMB */
2633 tmpword |= 0x0001; /* set bit 0 : enable LPC COMA addr range (GEN2) */
2634 IRDA_DEBUG(1, "LPC_EN (write): 0x%04x\n", tmpword);
2514 pci_write_config_word(dev, LPC_EN, tmpword); 2635 pci_write_config_word(dev, LPC_EN, tmpword);
2515 2636
2516 /* Setup DMA */ 2637 /*
2517 pci_write_config_word(dev, PCI_DMA_C, 0xc0c0); /* LPC I/F DMA on, channel 3 -- rtm (?? PCI DMA ?) */ 2638 * Configure LPC DMA channel
2518 pci_write_config_word(dev, GEN2_DEC, 0x131); /* LPC I/F 2nd decode range */ 2639 * PCI_DMA_C bits:
2640 * Bit 15-14: DMA channel 7 select
2641 * Bit 13-12: DMA channel 6 select
2642 * Bit 11-10: DMA channel 5 select
2643 * Bit 9-8: Reserved
2644 * Bit 7-6: DMA channel 3 select
2645 * Bit 5-4: DMA channel 2 select
2646 * Bit 3-2: DMA channel 1 select
2647 * Bit 1-0: DMA channel 0 select
2648 * 00 = Reserved value
2649 * 01 = PC/PCI DMA
2650 * 10 = Reserved value
2651 * 11 = LPC I/F DMA
2652 */
2653 pci_read_config_word(dev, PCI_DMA_C, &tmpword);
2654 switch(conf->fir_dma) {
2655 case 0x07:
2656 tmpword |= 0xc000;
2657 break;
2658 case 0x06:
2659 tmpword |= 0x3000;
2660 break;
2661 case 0x05:
2662 tmpword |= 0x0c00;
2663 break;
2664 case 0x03:
2665 tmpword |= 0x00c0;
2666 break;
2667 case 0x02:
2668 tmpword |= 0x0030;
2669 break;
2670 case 0x01:
2671 tmpword |= 0x000c;
2672 break;
2673 case 0x00:
2674 tmpword |= 0x0003;
2675 break;
2676 default:
2677 break; /* do not change settings */
2678 }
2679 IRDA_DEBUG(1, "PCI_DMA_C (write): 0x%04x\n", tmpword);
2680 pci_write_config_word(dev, PCI_DMA_C, tmpword);
2681
2682 /*
2683 * GEN2_DEC bits:
2684 * Bit 15-4: Generic I/O range
2685 * Bit 3-1: reserved (read as 0)
2686 * Bit 0: enable GEN2 range on LPC I/F
2687 */
2688 tmpword = conf->fir_io & 0xfff8;
2689 tmpword |= 0x0001;
2690 IRDA_DEBUG(1, "GEN2_DEC (write): 0x%04x\n", tmpword);
2691 pci_write_config_word(dev, GEN2_DEC, tmpword);
2519 2692
2520 /* Pre-configure chip */ 2693 /* Pre-configure chip */
2521 ret = preconfigure_smsc_chip(conf); 2694 return preconfigure_smsc_chip(conf);
2695}
2522 2696
2523 /* Disable LPC */ 2697/*
2524 pci_read_config_word(dev, LPC_EN, &tmpword); /* LPC_EN register */ 2698 * Pre-configure a certain port on the ALi 1533 bridge.
2525 tmpword &= 0xfffc; /* mask bit 1 and bit 0, COMA addr range disable */ 2699 * This is based on reverse-engineering since ALi does not
2526 pci_write_config_word(dev, LPC_EN, tmpword); 2700 * provide any data sheet for the 1533 chip.
2527 return ret; 2701 */
2702static void __init preconfigure_ali_port(struct pci_dev *dev,
2703 unsigned short port)
2704{
2705 unsigned char reg;
2706 /* These bits obviously control the different ports */
2707 unsigned char mask;
2708 unsigned char tmpbyte;
2709
2710 switch(port) {
2711 case 0x0130:
2712 case 0x0178:
2713 reg = 0xb0;
2714 mask = 0x80;
2715 break;
2716 case 0x03f8:
2717 reg = 0xb4;
2718 mask = 0x80;
2719 break;
2720 case 0x02f8:
2721 reg = 0xb4;
2722 mask = 0x30;
2723 break;
2724 case 0x02e8:
2725 reg = 0xb4;
2726 mask = 0x08;
2727 break;
2728 default:
2729 IRDA_ERROR("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n", port);
2730 return;
2731 }
2732
2733 pci_read_config_byte(dev, reg, &tmpbyte);
2734 /* Turn on the right bits */
2735 tmpbyte |= mask;
2736 pci_write_config_byte(dev, reg, tmpbyte);
2737 IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
2738 return;
2528} 2739}
2529 2740
2530static int __init preconfigure_through_ali(struct pci_dev *dev, 2741static int __init preconfigure_through_ali(struct pci_dev *dev,
2531 struct smsc_ircc_subsystem_configuration *conf) 2742 struct
2743 smsc_ircc_subsystem_configuration
2744 *conf)
2532{ 2745{
2533 /* TODO: put in ALi 1533 configuration here. */ 2746 /* Configure the two ports on the ALi 1533 */
2534 IRDA_MESSAGE("SORRY: %s has an unsupported bridge controller (ALi): not pre-configured.\n", conf->name); 2747 preconfigure_ali_port(dev, conf->sir_io);
2535 return -ENODEV; 2748 preconfigure_ali_port(dev, conf->fir_io);
2749
2750 /* Pre-configure chip */
2751 return preconfigure_smsc_chip(conf);
2536} 2752}
2537 2753
2538static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, 2754static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
@@ -2552,9 +2768,10 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
2552 struct smsc_ircc_subsystem_configuration *conf; 2768 struct smsc_ircc_subsystem_configuration *conf;
2553 2769
2554 /* 2770 /*
2555 * Cache the subsystem vendor/device: some manufacturers fail to set 2771 * Cache the subsystem vendor/device:
2556 * this for all components, so we save it in case there is just 2772 * some manufacturers fail to set this for all components,
2557 * 0x0000 0x0000 on the device we want to check. 2773 * so we save it in case there is just 0x0000 0x0000 on the
2774 * device we want to check.
2558 */ 2775 */
2559 if (dev->subsystem_vendor != 0x0000U) { 2776 if (dev->subsystem_vendor != 0x0000U) {
2560 ss_vendor = dev->subsystem_vendor; 2777 ss_vendor = dev->subsystem_vendor;
@@ -2564,13 +2781,20 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
2564 for( ; conf->subvendor; conf++) { 2781 for( ; conf->subvendor; conf++) {
2565 if(conf->vendor == dev->vendor && 2782 if(conf->vendor == dev->vendor &&
2566 conf->device == dev->device && 2783 conf->device == dev->device &&
2567 conf->subvendor == ss_vendor && /* Sometimes these are cached values */ 2784 conf->subvendor == ss_vendor &&
2568 (conf->subdevice == ss_device || conf->subdevice == 0xffff)) { 2785 /* Sometimes these are cached values */
2569 struct smsc_ircc_subsystem_configuration tmpconf; 2786 (conf->subdevice == ss_device ||
2787 conf->subdevice == 0xffff)) {
2788 struct smsc_ircc_subsystem_configuration
2789 tmpconf;
2570 2790
2571 memcpy(&tmpconf, conf, sizeof(struct smsc_ircc_subsystem_configuration)); 2791 memcpy(&tmpconf, conf,
2792 sizeof(struct smsc_ircc_subsystem_configuration));
2572 2793
2573 /* Override the default values with anything passed in as parameter */ 2794 /*
2795 * Override the default values with anything
2796 * passed in as parameter
2797 */
2574 if (ircc_cfg != 0) 2798 if (ircc_cfg != 0)
2575 tmpconf.cfg_base = ircc_cfg; 2799 tmpconf.cfg_base = ircc_cfg;
2576 if (ircc_fir != 0) 2800 if (ircc_fir != 0)
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
index 6f7dce8eba51..b67f586d7392 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ixp2000/enp2611.c
@@ -149,6 +149,8 @@ static void enp2611_check_link_status(unsigned long __dummy)
149 int status; 149 int status;
150 150
151 dev = nds[i]; 151 dev = nds[i];
152 if (dev == NULL)
153 continue;
152 154
153 status = pm3386_is_link_up(i); 155 status = pm3386_is_link_up(i);
154 if (status && !netif_carrier_ok(dev)) { 156 if (status && !netif_carrier_ok(dev)) {
@@ -191,6 +193,7 @@ static void enp2611_set_port_admin_status(int port, int up)
191 193
192static int __init enp2611_init_module(void) 194static int __init enp2611_init_module(void)
193{ 195{
196 int ports;
194 int i; 197 int i;
195 198
196 if (!machine_is_enp2611()) 199 if (!machine_is_enp2611())
@@ -199,7 +202,8 @@ static int __init enp2611_init_module(void)
199 caleb_reset(); 202 caleb_reset();
200 pm3386_reset(); 203 pm3386_reset();
201 204
202 for (i = 0; i < 3; i++) { 205 ports = pm3386_port_count();
206 for (i = 0; i < ports; i++) {
203 nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); 207 nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
204 if (nds[i] == NULL) { 208 if (nds[i] == NULL) {
205 while (--i >= 0) 209 while (--i >= 0)
@@ -215,9 +219,10 @@ static int __init enp2611_init_module(void)
215 219
216 ixp2400_msf_init(&enp2611_msf_parameters); 220 ixp2400_msf_init(&enp2611_msf_parameters);
217 221
218 if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) { 222 if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
219 for (i = 0; i < 3; i++) 223 for (i = 0; i < ports; i++)
220 free_netdev(nds[i]); 224 if (nds[i])
225 free_netdev(nds[i]);
221 return -EINVAL; 226 return -EINVAL;
222 } 227 }
223 228
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ixp2000/pm3386.c
index 5c7ab7564053..5224651c9aac 100644
--- a/drivers/net/ixp2000/pm3386.c
+++ b/drivers/net/ixp2000/pm3386.c
@@ -86,40 +86,53 @@ static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
86 pm3386_reg_write(port >> 1, reg, value); 86 pm3386_reg_write(port >> 1, reg, value);
87} 87}
88 88
89int pm3386_secondary_present(void)
90{
91 return pm3386_reg_read(1, 0) == 0x3386;
92}
89 93
90void pm3386_reset(void) 94void pm3386_reset(void)
91{ 95{
92 u8 mac[3][6]; 96 u8 mac[3][6];
97 int secondary;
98
99 secondary = pm3386_secondary_present();
93 100
94 /* Save programmed MAC addresses. */ 101 /* Save programmed MAC addresses. */
95 pm3386_get_mac(0, mac[0]); 102 pm3386_get_mac(0, mac[0]);
96 pm3386_get_mac(1, mac[1]); 103 pm3386_get_mac(1, mac[1]);
97 pm3386_get_mac(2, mac[2]); 104 if (secondary)
105 pm3386_get_mac(2, mac[2]);
98 106
99 /* Assert analog and digital reset. */ 107 /* Assert analog and digital reset. */
100 pm3386_reg_write(0, 0x002, 0x0060); 108 pm3386_reg_write(0, 0x002, 0x0060);
101 pm3386_reg_write(1, 0x002, 0x0060); 109 if (secondary)
110 pm3386_reg_write(1, 0x002, 0x0060);
102 mdelay(1); 111 mdelay(1);
103 112
104 /* Deassert analog reset. */ 113 /* Deassert analog reset. */
105 pm3386_reg_write(0, 0x002, 0x0062); 114 pm3386_reg_write(0, 0x002, 0x0062);
106 pm3386_reg_write(1, 0x002, 0x0062); 115 if (secondary)
116 pm3386_reg_write(1, 0x002, 0x0062);
107 mdelay(10); 117 mdelay(10);
108 118
109 /* Deassert digital reset. */ 119 /* Deassert digital reset. */
110 pm3386_reg_write(0, 0x002, 0x0063); 120 pm3386_reg_write(0, 0x002, 0x0063);
111 pm3386_reg_write(1, 0x002, 0x0063); 121 if (secondary)
122 pm3386_reg_write(1, 0x002, 0x0063);
112 mdelay(10); 123 mdelay(10);
113 124
114 /* Restore programmed MAC addresses. */ 125 /* Restore programmed MAC addresses. */
115 pm3386_set_mac(0, mac[0]); 126 pm3386_set_mac(0, mac[0]);
116 pm3386_set_mac(1, mac[1]); 127 pm3386_set_mac(1, mac[1]);
117 pm3386_set_mac(2, mac[2]); 128 if (secondary)
129 pm3386_set_mac(2, mac[2]);
118 130
119 /* Disable carrier on all ports. */ 131 /* Disable carrier on all ports. */
120 pm3386_set_carrier(0, 0); 132 pm3386_set_carrier(0, 0);
121 pm3386_set_carrier(1, 0); 133 pm3386_set_carrier(1, 0);
122 pm3386_set_carrier(2, 0); 134 if (secondary)
135 pm3386_set_carrier(2, 0);
123} 136}
124 137
125static u16 swaph(u16 x) 138static u16 swaph(u16 x)
@@ -127,6 +140,11 @@ static u16 swaph(u16 x)
127 return ((x << 8) | (x >> 8)) & 0xffff; 140 return ((x << 8) | (x >> 8)) & 0xffff;
128} 141}
129 142
143int pm3386_port_count(void)
144{
145 return 2 + pm3386_secondary_present();
146}
147
130void pm3386_init_port(int port) 148void pm3386_init_port(int port)
131{ 149{
132 int pm = port >> 1; 150 int pm = port >> 1;
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ixp2000/pm3386.h
index fe92bb056ac4..cc4183dca911 100644
--- a/drivers/net/ixp2000/pm3386.h
+++ b/drivers/net/ixp2000/pm3386.h
@@ -13,6 +13,7 @@
13#define __PM3386_H 13#define __PM3386_H
14 14
15void pm3386_reset(void); 15void pm3386_reset(void);
16int pm3386_port_count(void);
16void pm3386_init_port(int port); 17void pm3386_init_port(int port);
17void pm3386_get_mac(int port, u8 *mac); 18void pm3386_get_mac(int port, u8 *mac);
18void pm3386_set_mac(int port, u8 *mac); 19void pm3386_set_mac(int port, u8 *mac);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index ea62a3e7d586..411f4d809c47 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1419,6 +1419,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1419 mv643xx_eth_update_pscr(dev, &cmd); 1419 mv643xx_eth_update_pscr(dev, &cmd);
1420 mv643xx_set_settings(dev, &cmd); 1420 mv643xx_set_settings(dev, &cmd);
1421 1421
1422 SET_MODULE_OWNER(dev);
1423 SET_NETDEV_DEV(dev, &pdev->dev);
1422 err = register_netdev(dev); 1424 err = register_netdev(dev);
1423 if (err) 1425 if (err)
1424 goto out; 1426 goto out;
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 08b218c5bfbc..b32765215f75 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -139,8 +139,9 @@ bad_clone_list[] __initdata = {
139 139
140#if defined(CONFIG_PLAT_MAPPI) 140#if defined(CONFIG_PLAT_MAPPI)
141# define DCR_VAL 0x4b 141# define DCR_VAL 0x4b
142#elif defined(CONFIG_PLAT_OAKS32R) 142#elif defined(CONFIG_PLAT_OAKS32R) || \
143# define DCR_VAL 0x48 143 defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
144# define DCR_VAL 0x48 /* 8-bit mode */
144#else 145#else
145# define DCR_VAL 0x49 146# define DCR_VAL 0x49
146#endif 147#endif
@@ -226,7 +227,7 @@ struct net_device * __init ne_probe(int unit)
226 netdev_boot_setup_check(dev); 227 netdev_boot_setup_check(dev);
227 228
228#ifdef CONFIG_TOSHIBA_RBTX4938 229#ifdef CONFIG_TOSHIBA_RBTX4938
229 dev->base_addr = 0x07f20280; 230 dev->base_addr = RBTX4938_RTL_8019_BASE;
230 dev->irq = RBTX4938_RTL_8019_IRQ; 231 dev->irq = RBTX4938_RTL_8019_IRQ;
231#endif 232#endif
232 err = do_ne_probe(dev); 233 err = do_ne_probe(dev);
@@ -396,10 +397,22 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
396 /* We must set the 8390 for word mode. */ 397 /* We must set the 8390 for word mode. */
397 outb_p(DCR_VAL, ioaddr + EN0_DCFG); 398 outb_p(DCR_VAL, ioaddr + EN0_DCFG);
398 start_page = NESM_START_PG; 399 start_page = NESM_START_PG;
399 stop_page = NESM_STOP_PG; 400
401 /*
402 * Realtek RTL8019AS datasheet says that the PSTOP register
403 * shouldn't exceed 0x60 in 8-bit mode.
404 * This chip can be identified by reading the signature from
405 * the remote byte count registers (otherwise write-only)...
406 */
407 if ((DCR_VAL & 0x01) == 0 && /* 8-bit mode */
408 inb(ioaddr + EN0_RCNTLO) == 0x50 &&
409 inb(ioaddr + EN0_RCNTHI) == 0x70)
410 stop_page = 0x60;
411 else
412 stop_page = NESM_STOP_PG;
400 } else { 413 } else {
401 start_page = NE1SM_START_PG; 414 start_page = NE1SM_START_PG;
402 stop_page = NE1SM_STOP_PG; 415 stop_page = NE1SM_STOP_PG;
403 } 416 }
404 417
405#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R) 418#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R)
@@ -509,15 +522,9 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
509 ei_status.name = name; 522 ei_status.name = name;
510 ei_status.tx_start_page = start_page; 523 ei_status.tx_start_page = start_page;
511 ei_status.stop_page = stop_page; 524 ei_status.stop_page = stop_page;
512#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
513 wordlength = 1;
514#endif
515 525
516#ifdef CONFIG_PLAT_OAKS32R 526 /* Use 16-bit mode only if this wasn't overridden by DCR_VAL */
517 ei_status.word16 = 0; 527 ei_status.word16 = (wordlength == 2 && (DCR_VAL & 0x01));
518#else
519 ei_status.word16 = (wordlength == 2);
520#endif
521 528
522 ei_status.rx_start_page = start_page + TX_PAGES; 529 ei_status.rx_start_page = start_page + TX_PAGES;
523#ifdef PACKETBUF_MEMSIZE 530#ifdef PACKETBUF_MEMSIZE
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 66e74f740261..bf58db29e2ed 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -107,7 +107,7 @@ static int init_netconsole(void)
107 107
108 if(!configured) { 108 if(!configured) {
109 printk("netconsole: not configured, aborting\n"); 109 printk("netconsole: not configured, aborting\n");
110 return -EINVAL; 110 return 0;
111 } 111 }
112 112
113 if(netpoll_setup(&np)) 113 if(netpoll_setup(&np))
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 448a09488529..2ea66aca648b 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1691,17 +1691,6 @@ static void do_set_multicast_list(struct net_device *dev)
1691 memset(ei_local->mcfilter, 0xFF, 8); 1691 memset(ei_local->mcfilter, 0xFF, 8);
1692 } 1692 }
1693 1693
1694 /*
1695 * DP8390 manuals don't specify any magic sequence for altering
1696 * the multicast regs on an already running card. To be safe, we
1697 * ensure multicast mode is off prior to loading up the new hash
1698 * table. If this proves to be not enough, we can always resort
1699 * to stopping the NIC, loading the table and then restarting.
1700 */
1701
1702 if (netif_running(dev))
1703 outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
1704
1705 outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); 1694 outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
1706 for(i = 0; i < 8; i++) 1695 for(i = 0; i < 8; i++)
1707 { 1696 {
@@ -1715,6 +1704,8 @@ static void do_set_multicast_list(struct net_device *dev)
1715 outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); 1704 outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
1716 else 1705 else
1717 outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); 1706 outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
1707
1708 outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1718} 1709}
1719 1710
1720/* 1711/*
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 4260c2128f47..a8f6bfc96fd2 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1204,7 +1204,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
1204 1204
1205 dev->last_rx = jiffies; 1205 dev->last_rx = jiffies;
1206 lp->linux_stats.rx_packets++; 1206 lp->linux_stats.rx_packets++;
1207 lp->linux_stats.rx_bytes += skb->len; 1207 lp->linux_stats.rx_bytes += pkt_len;
1208 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ 1208 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
1209 continue; 1209 continue;
1210 } else { 1210 } else {
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 506e777c5f06..d090df413049 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1639,6 +1639,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1639 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), 1639 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
1640 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), 1640 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
1641 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), 1641 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
1642 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
1642 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), 1643 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
1643 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-T", 0x5261440f, 0x6705fcaa), 1644 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-T", 0x5261440f, 0x6705fcaa),
1644 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), 1645 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9),
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 07c31f19c6ba..fc08c4af506c 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1774,8 +1774,6 @@ static int pcnet32_open(struct net_device *dev)
1774 lp->rx_dma_addr[i] = 0; 1774 lp->rx_dma_addr[i] = 0;
1775 } 1775 }
1776 1776
1777 pcnet32_free_ring(dev);
1778
1779 /* 1777 /*
1780 * Switch back to 16bit mode to avoid problems with dumb 1778 * Switch back to 16bit mode to avoid problems with dumb
1781 * DOS packet driver after a warm reboot 1779 * DOS packet driver after a warm reboot
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 459443b572ce..1b236bdf6b92 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -60,8 +60,10 @@ int mdiobus_register(struct mii_bus *bus)
60 for (i = 0; i < PHY_MAX_ADDR; i++) { 60 for (i = 0; i < PHY_MAX_ADDR; i++) {
61 struct phy_device *phydev; 61 struct phy_device *phydev;
62 62
63 if (bus->phy_mask & (1 << i)) 63 if (bus->phy_mask & (1 << i)) {
64 bus->phy_map[i] = NULL;
64 continue; 65 continue;
66 }
65 67
66 phydev = get_phy_device(bus, i); 68 phydev = get_phy_device(bus, i);
67 69
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 475dc930380f..0d101a18026a 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -861,6 +861,9 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
861 * give dev_queue_xmit something it can free. 861 * give dev_queue_xmit something it can free.
862 */ 862 */
863 skb2 = skb_clone(skb, GFP_ATOMIC); 863 skb2 = skb_clone(skb, GFP_ATOMIC);
864
865 if (skb2 == NULL)
866 goto abort;
864 } 867 }
865 868
866 ph = (struct pppoe_hdr *) skb_push(skb2, sizeof(struct pppoe_hdr)); 869 ph = (struct pppoe_hdr *) skb_push(skb2, sizeof(struct pppoe_hdr));
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index b82191d2bee1..f5a3bf4d959a 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -127,6 +127,7 @@ static const struct mii_chip_info {
127} mii_chip_table[] = { 127} mii_chip_table[] = {
128 { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN }, 128 { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
129 { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN }, 129 { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
130 { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN },
130 { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN }, 131 { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN },
131 { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN }, 132 { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN },
132 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN }, 133 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index a70c2b0cc104..5ca5a1b546a1 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -78,8 +78,7 @@ static const struct pci_device_id skge_id_table[] = {
78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, 78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, 79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, 80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
81 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, 81 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */
82 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },
83 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
84 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ 83 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
85 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
@@ -402,7 +401,7 @@ static int skge_set_ring_param(struct net_device *dev,
402 int err; 401 int err;
403 402
404 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || 403 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
405 p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE) 404 p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE)
406 return -EINVAL; 405 return -EINVAL;
407 406
408 skge->rx_ring.count = p->rx_pending; 407 skge->rx_ring.count = p->rx_pending;
@@ -2717,8 +2716,7 @@ static int skge_poll(struct net_device *dev, int *budget)
2717 if (control & BMU_OWN) 2716 if (control & BMU_OWN)
2718 break; 2717 break;
2719 2718
2720 skb = skge_rx_get(skge, e, control, rd->status, 2719 skb = skge_rx_get(skge, e, control, rd->status, rd->csum2);
2721 le16_to_cpu(rd->csum2));
2722 if (likely(skb)) { 2720 if (likely(skb)) {
2723 dev->last_rx = jiffies; 2721 dev->last_rx = jiffies;
2724 netif_receive_skb(skb); 2722 netif_receive_skb(skb);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 67b0eab16589..959109609d85 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
51#include "sky2.h" 51#include "sky2.h"
52 52
53#define DRV_NAME "sky2" 53#define DRV_NAME "sky2"
54#define DRV_VERSION "1.1" 54#define DRV_VERSION "1.4"
55#define PFX DRV_NAME " " 55#define PFX DRV_NAME " "
56 56
57/* 57/*
@@ -79,6 +79,8 @@
79#define NAPI_WEIGHT 64 79#define NAPI_WEIGHT 64
80#define PHY_RETRIES 1000 80#define PHY_RETRIES 1000
81 81
82#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
83
82static const u32 default_msg = 84static const u32 default_msg =
83 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 85 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
84 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR 86 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
@@ -96,9 +98,14 @@ static int disable_msi = 0;
96module_param(disable_msi, int, 0); 98module_param(disable_msi, int, 0);
97MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); 99MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
98 100
101static int idle_timeout = 100;
102module_param(idle_timeout, int, 0);
103MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)");
104
99static const struct pci_device_id sky2_id_table[] = { 105static const struct pci_device_id sky2_id_table[] = {
100 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, 106 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
101 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, 107 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
102 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, 109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
103 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, 110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
104 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, 111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
@@ -122,6 +129,7 @@ MODULE_DEVICE_TABLE(pci, sky2_id_table);
122/* Avoid conditionals by using array */ 129/* Avoid conditionals by using array */
123static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; 130static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
124static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; 131static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
132static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
125 133
126/* This driver supports yukon2 chipset only */ 134/* This driver supports yukon2 chipset only */
127static const char *yukon2_name[] = { 135static const char *yukon2_name[] = {
@@ -228,6 +236,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
228 } 236 }
229 237
230 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 238 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
239 sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
231 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 240 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
232 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); 241 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
233 reg1 &= P_ASPM_CONTROL_MSK; 242 reg1 &= P_ASPM_CONTROL_MSK;
@@ -298,7 +307,8 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
298 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 307 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
299 u16 ctrl, ct1000, adv, pg, ledctrl, ledover; 308 u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
300 309
301 if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) { 310 if (sky2->autoneg == AUTONEG_ENABLE &&
311 !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
302 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 312 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
303 313
304 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 314 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -326,7 +336,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
326 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); 336 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
327 337
328 if (sky2->autoneg == AUTONEG_ENABLE && 338 if (sky2->autoneg == AUTONEG_ENABLE &&
329 hw->chip_id == CHIP_ID_YUKON_XL) { 339 (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
330 ctrl &= ~PHY_M_PC_DSC_MSK; 340 ctrl &= ~PHY_M_PC_DSC_MSK;
331 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; 341 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
332 } 342 }
@@ -442,10 +452,11 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
442 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 452 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
443 453
444 /* set LED Function Control register */ 454 /* set LED Function Control register */
445 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ 455 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
446 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ 456 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
447 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ 457 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
448 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ 458 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
459 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
449 460
450 /* set Polarity Control register */ 461 /* set Polarity Control register */
451 gm_phy_write(hw, port, PHY_MARV_PHY_STAT, 462 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
@@ -459,6 +470,25 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
459 /* restore page register */ 470 /* restore page register */
460 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 471 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
461 break; 472 break;
473 case CHIP_ID_YUKON_EC_U:
474 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
475
476 /* select page 3 to access LED control register */
477 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
478
479 /* set LED Function Control register */
480 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
481 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
482 PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
483 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
484 PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
485
486 /* set Blink Rate in LED Timer Control Register */
487 gm_phy_write(hw, port, PHY_MARV_INT_MASK,
488 ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
489 /* restore page register */
490 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
491 break;
462 492
463 default: 493 default:
464 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ 494 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
@@ -467,19 +497,21 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
467 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); 497 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
468 } 498 }
469 499
470 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) { 500 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) {
471 /* apply fixes in PHY AFE */ 501 /* apply fixes in PHY AFE */
472 gm_phy_write(hw, port, 22, 255); 502 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
503 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
504
473 /* increase differential signal amplitude in 10BASE-T */ 505 /* increase differential signal amplitude in 10BASE-T */
474 gm_phy_write(hw, port, 24, 0xaa99); 506 gm_phy_write(hw, port, 0x18, 0xaa99);
475 gm_phy_write(hw, port, 23, 0x2011); 507 gm_phy_write(hw, port, 0x17, 0x2011);
476 508
477 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ 509 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
478 gm_phy_write(hw, port, 24, 0xa204); 510 gm_phy_write(hw, port, 0x18, 0xa204);
479 gm_phy_write(hw, port, 23, 0x2002); 511 gm_phy_write(hw, port, 0x17, 0x2002);
480 512
481 /* set page register to 0 */ 513 /* set page register to 0 */
482 gm_phy_write(hw, port, 22, 0); 514 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
483 } else { 515 } else {
484 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 516 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
485 517
@@ -553,6 +585,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
553 585
554 if (sky2->duplex == DUPLEX_FULL) 586 if (sky2->duplex == DUPLEX_FULL)
555 reg |= GM_GPCR_DUP_FULL; 587 reg |= GM_GPCR_DUP_FULL;
588
589 /* turn off pause in 10/100mbps half duplex */
590 else if (sky2->speed != SPEED_1000 &&
591 hw->chip_id != CHIP_ID_YUKON_EC_U)
592 sky2->tx_pause = sky2->rx_pause = 0;
556 } else 593 } else
557 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 594 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
558 595
@@ -719,7 +756,7 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
719{ 756{
720 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; 757 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
721 758
722 sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE; 759 sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
723 return le; 760 return le;
724} 761}
725 762
@@ -735,7 +772,7 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
735static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) 772static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
736{ 773{
737 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; 774 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
738 sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE; 775 sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
739 return le; 776 return le;
740} 777}
741 778
@@ -925,8 +962,7 @@ static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
925 skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask); 962 skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
926 if (likely(skb)) { 963 if (likely(skb)) {
927 unsigned long p = (unsigned long) skb->data; 964 unsigned long p = (unsigned long) skb->data;
928 skb_reserve(skb, 965 skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);
929 ((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p);
930 } 966 }
931 967
932 return skb; 968 return skb;
@@ -943,6 +979,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
943 struct sky2_hw *hw = sky2->hw; 979 struct sky2_hw *hw = sky2->hw;
944 unsigned rxq = rxqaddr[sky2->port]; 980 unsigned rxq = rxqaddr[sky2->port];
945 int i; 981 int i;
982 unsigned thresh;
946 983
947 sky2->rx_put = sky2->rx_next = 0; 984 sky2->rx_put = sky2->rx_next = 0;
948 sky2_qset(hw, rxq); 985 sky2_qset(hw, rxq);
@@ -967,9 +1004,21 @@ static int sky2_rx_start(struct sky2_port *sky2)
967 sky2_rx_add(sky2, re->mapaddr); 1004 sky2_rx_add(sky2, re->mapaddr);
968 } 1005 }
969 1006
970 /* Truncate oversize frames */ 1007
971 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), sky2->rx_bufsize - 8); 1008 /*
972 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); 1009 * The receiver hangs if it receives frames larger than the
1010 * packet buffer. As a workaround, truncate oversize frames, but
1011 * the register is limited to 9 bits, so if you do frames > 2052
1012 * you better get the MTU right!
1013 */
1014 thresh = (sky2->rx_bufsize - 8) / sizeof(u32);
1015 if (thresh > 0x1ff)
1016 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
1017 else {
1018 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
1019 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1020 }
1021
973 1022
974 /* Tell chip about available buffers */ 1023 /* Tell chip about available buffers */
975 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); 1024 sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
@@ -986,7 +1035,25 @@ static int sky2_up(struct net_device *dev)
986 struct sky2_hw *hw = sky2->hw; 1035 struct sky2_hw *hw = sky2->hw;
987 unsigned port = sky2->port; 1036 unsigned port = sky2->port;
988 u32 ramsize, rxspace, imask; 1037 u32 ramsize, rxspace, imask;
989 int err = -ENOMEM; 1038 int cap, err = -ENOMEM;
1039 struct net_device *otherdev = hw->dev[sky2->port^1];
1040
1041 /*
1042 * On dual port PCI-X card, there is an problem where status
1043 * can be received out of order due to split transactions
1044 */
1045 if (otherdev && netif_running(otherdev) &&
1046 (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
1047 struct sky2_port *osky2 = netdev_priv(otherdev);
1048 u16 cmd;
1049
1050 cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
1051 cmd &= ~PCI_X_CMD_MAX_SPLIT;
1052 sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
1053
1054 sky2->rx_csum = 0;
1055 osky2->rx_csum = 0;
1056 }
990 1057
991 if (netif_msg_ifup(sky2)) 1058 if (netif_msg_ifup(sky2))
992 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 1059 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
@@ -1051,7 +1118,7 @@ static int sky2_up(struct net_device *dev)
1051 1118
1052 /* Enable interrupts from phy/mac for port */ 1119 /* Enable interrupts from phy/mac for port */
1053 imask = sky2_read32(hw, B0_IMSK); 1120 imask = sky2_read32(hw, B0_IMSK);
1054 imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; 1121 imask |= portirq_msk[port];
1055 sky2_write32(hw, B0_IMSK, imask); 1122 sky2_write32(hw, B0_IMSK, imask);
1056 1123
1057 return 0; 1124 return 0;
@@ -1079,7 +1146,7 @@ err_out:
1079/* Modular subtraction in ring */ 1146/* Modular subtraction in ring */
1080static inline int tx_dist(unsigned tail, unsigned head) 1147static inline int tx_dist(unsigned tail, unsigned head)
1081{ 1148{
1082 return (head - tail) % TX_RING_SIZE; 1149 return (head - tail) & (TX_RING_SIZE - 1);
1083} 1150}
1084 1151
1085/* Number of list elements available for next tx */ 1152/* Number of list elements available for next tx */
@@ -1256,7 +1323,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1256 le->opcode = OP_BUFFER | HW_OWNER; 1323 le->opcode = OP_BUFFER | HW_OWNER;
1257 1324
1258 fre = sky2->tx_ring 1325 fre = sky2->tx_ring
1259 + ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE; 1326 + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
1260 pci_unmap_addr_set(fre, mapaddr, mapping); 1327 pci_unmap_addr_set(fre, mapaddr, mapping);
1261 } 1328 }
1262 1329
@@ -1316,7 +1383,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1316 1383
1317 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1384 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1318 struct tx_ring_info *fre; 1385 struct tx_ring_info *fre;
1319 fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE; 1386 fre = sky2->tx_ring + RING_NEXT(put + i, TX_RING_SIZE);
1320 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), 1387 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
1321 skb_shinfo(skb)->frags[i].size, 1388 skb_shinfo(skb)->frags[i].size,
1322 PCI_DMA_TODEVICE); 1389 PCI_DMA_TODEVICE);
@@ -1402,7 +1469,7 @@ static int sky2_down(struct net_device *dev)
1402 1469
1403 /* Disable port IRQ */ 1470 /* Disable port IRQ */
1404 imask = sky2_read32(hw, B0_IMSK); 1471 imask = sky2_read32(hw, B0_IMSK);
1405 imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; 1472 imask &= ~portirq_msk[port];
1406 sky2_write32(hw, B0_IMSK, imask); 1473 sky2_write32(hw, B0_IMSK, imask);
1407 1474
1408 /* turn off LED's */ 1475 /* turn off LED's */
@@ -1499,17 +1566,26 @@ static void sky2_link_up(struct sky2_port *sky2)
1499 sky2_write8(hw, SK_REG(port, LNK_LED_REG), 1566 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1500 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); 1567 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1501 1568
1502 if (hw->chip_id == CHIP_ID_YUKON_XL) { 1569 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) {
1503 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 1570 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1571 u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */
1572
1573 switch(sky2->speed) {
1574 case SPEED_10:
1575 led |= PHY_M_LEDC_INIT_CTRL(7);
1576 break;
1577
1578 case SPEED_100:
1579 led |= PHY_M_LEDC_STA1_CTRL(7);
1580 break;
1581
1582 case SPEED_1000:
1583 led |= PHY_M_LEDC_STA0_CTRL(7);
1584 break;
1585 }
1504 1586
1505 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 1587 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
1506 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ 1588 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, led);
1507 PHY_M_LEDC_INIT_CTRL(sky2->speed ==
1508 SPEED_10 ? 7 : 0) |
1509 PHY_M_LEDC_STA1_CTRL(sky2->speed ==
1510 SPEED_100 ? 7 : 0) |
1511 PHY_M_LEDC_STA0_CTRL(sky2->speed ==
1512 SPEED_1000 ? 7 : 0));
1513 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 1589 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
1514 } 1590 }
1515 1591
@@ -1584,7 +1660,7 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1584 sky2->speed = sky2_phy_speed(hw, aux); 1660 sky2->speed = sky2_phy_speed(hw, aux);
1585 1661
1586 /* Pause bits are offset (9..8) */ 1662 /* Pause bits are offset (9..8) */
1587 if (hw->chip_id == CHIP_ID_YUKON_XL) 1663 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)
1588 aux >>= 6; 1664 aux >>= 6;
1589 1665
1590 sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0; 1666 sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
@@ -1686,13 +1762,12 @@ static void sky2_tx_timeout(struct net_device *dev)
1686} 1762}
1687 1763
1688 1764
1689#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1690/* Want receive buffer size to be multiple of 64 bits 1765/* Want receive buffer size to be multiple of 64 bits
1691 * and incl room for vlan and truncation 1766 * and incl room for vlan and truncation
1692 */ 1767 */
1693static inline unsigned sky2_buf_size(int mtu) 1768static inline unsigned sky2_buf_size(int mtu)
1694{ 1769{
1695 return roundup(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8; 1770 return ALIGN(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8;
1696} 1771}
1697 1772
1698static int sky2_change_mtu(struct net_device *dev, int new_mtu) 1773static int sky2_change_mtu(struct net_device *dev, int new_mtu)
@@ -1857,39 +1932,38 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
1857 } 1932 }
1858} 1933}
1859 1934
1935/* Is status ring empty or is there more to do? */
1936static inline int sky2_more_work(const struct sky2_hw *hw)
1937{
1938 return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX));
1939}
1940
1860/* Process status response ring */ 1941/* Process status response ring */
1861static int sky2_status_intr(struct sky2_hw *hw, int to_do) 1942static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1862{ 1943{
1863 int work_done = 0; 1944 int work_done = 0;
1945 u16 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1864 1946
1865 rmb(); 1947 rmb();
1866 1948
1867 for(;;) { 1949 while (hw->st_idx != hwidx) {
1868 struct sky2_status_le *le = hw->st_le + hw->st_idx; 1950 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1869 struct net_device *dev; 1951 struct net_device *dev;
1870 struct sky2_port *sky2; 1952 struct sky2_port *sky2;
1871 struct sk_buff *skb; 1953 struct sk_buff *skb;
1872 u32 status; 1954 u32 status;
1873 u16 length; 1955 u16 length;
1874 u8 link, opcode;
1875 1956
1876 opcode = le->opcode; 1957 hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
1877 if (!opcode)
1878 break;
1879 opcode &= ~HW_OWNER;
1880 1958
1881 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; 1959 BUG_ON(le->link >= 2);
1882 le->opcode = 0; 1960 dev = hw->dev[le->link];
1883
1884 link = le->link;
1885 BUG_ON(link >= 2);
1886 dev = hw->dev[link];
1887 1961
1888 sky2 = netdev_priv(dev); 1962 sky2 = netdev_priv(dev);
1889 length = le->length; 1963 length = le->length;
1890 status = le->status; 1964 status = le->status;
1891 1965
1892 switch (opcode) { 1966 switch (le->opcode & ~HW_OWNER) {
1893 case OP_RXSTAT: 1967 case OP_RXSTAT:
1894 skb = sky2_receive(sky2, length, status); 1968 skb = sky2_receive(sky2, length, status);
1895 if (!skb) 1969 if (!skb)
@@ -1929,7 +2003,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1929 2003
1930 case OP_TXINDEXLE: 2004 case OP_TXINDEXLE:
1931 /* TX index reports status for both ports */ 2005 /* TX index reports status for both ports */
1932 sky2_tx_done(hw->dev[0], status & 0xffff); 2006 BUILD_BUG_ON(TX_RING_SIZE > 0x1000);
2007 sky2_tx_done(hw->dev[0], status & 0xfff);
1933 if (hw->dev[1]) 2008 if (hw->dev[1])
1934 sky2_tx_done(hw->dev[1], 2009 sky2_tx_done(hw->dev[1],
1935 ((status >> 24) & 0xff) 2010 ((status >> 24) & 0xff)
@@ -1939,8 +2014,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1939 default: 2014 default:
1940 if (net_ratelimit()) 2015 if (net_ratelimit())
1941 printk(KERN_WARNING PFX 2016 printk(KERN_WARNING PFX
1942 "unknown status opcode 0x%x\n", opcode); 2017 "unknown status opcode 0x%x\n", le->opcode);
1943 break; 2018 goto exit_loop;
1944 } 2019 }
1945 } 2020 }
1946 2021
@@ -2086,6 +2161,21 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
2086 } 2161 }
2087} 2162}
2088 2163
2164/* If idle then force a fake soft NAPI poll once a second
2165 * to work around cases where sharing an edge triggered interrupt.
2166 */
2167static void sky2_idle(unsigned long arg)
2168{
2169 struct sky2_hw *hw = (struct sky2_hw *) arg;
2170 struct net_device *dev = hw->dev[0];
2171
2172 if (__netif_rx_schedule_prep(dev))
2173 __netif_rx_schedule(dev);
2174
2175 mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
2176}
2177
2178
2089static int sky2_poll(struct net_device *dev0, int *budget) 2179static int sky2_poll(struct net_device *dev0, int *budget)
2090{ 2180{
2091 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; 2181 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
@@ -2093,49 +2183,46 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2093 int work_done = 0; 2183 int work_done = 0;
2094 u32 status = sky2_read32(hw, B0_Y2_SP_EISR); 2184 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2095 2185
2096 if (unlikely(status & ~Y2_IS_STAT_BMU)) { 2186 if (status & Y2_IS_HW_ERR)
2097 if (status & Y2_IS_HW_ERR) 2187 sky2_hw_intr(hw);
2098 sky2_hw_intr(hw);
2099
2100 if (status & Y2_IS_IRQ_PHY1)
2101 sky2_phy_intr(hw, 0);
2102 2188
2103 if (status & Y2_IS_IRQ_PHY2) 2189 if (status & Y2_IS_IRQ_PHY1)
2104 sky2_phy_intr(hw, 1); 2190 sky2_phy_intr(hw, 0);
2105 2191
2106 if (status & Y2_IS_IRQ_MAC1) 2192 if (status & Y2_IS_IRQ_PHY2)
2107 sky2_mac_intr(hw, 0); 2193 sky2_phy_intr(hw, 1);
2108 2194
2109 if (status & Y2_IS_IRQ_MAC2) 2195 if (status & Y2_IS_IRQ_MAC1)
2110 sky2_mac_intr(hw, 1); 2196 sky2_mac_intr(hw, 0);
2111 2197
2112 if (status & Y2_IS_CHK_RX1) 2198 if (status & Y2_IS_IRQ_MAC2)
2113 sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); 2199 sky2_mac_intr(hw, 1);
2114 2200
2115 if (status & Y2_IS_CHK_RX2) 2201 if (status & Y2_IS_CHK_RX1)
2116 sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); 2202 sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
2117 2203
2118 if (status & Y2_IS_CHK_TXA1) 2204 if (status & Y2_IS_CHK_RX2)
2119 sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); 2205 sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
2120 2206
2121 if (status & Y2_IS_CHK_TXA2) 2207 if (status & Y2_IS_CHK_TXA1)
2122 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); 2208 sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
2123 }
2124 2209
2125 if (status & Y2_IS_STAT_BMU) { 2210 if (status & Y2_IS_CHK_TXA2)
2126 work_done = sky2_status_intr(hw, work_limit); 2211 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
2127 *budget -= work_done;
2128 dev0->quota -= work_done;
2129 2212
2130 if (work_done >= work_limit) 2213 work_done = sky2_status_intr(hw, work_limit);
2131 return 1; 2214 *budget -= work_done;
2215 dev0->quota -= work_done;
2132 2216
2217 if (status & Y2_IS_STAT_BMU)
2133 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 2218 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2134 } 2219
2220 if (sky2_more_work(hw))
2221 return 1;
2135 2222
2136 netif_rx_complete(dev0); 2223 netif_rx_complete(dev0);
2137 2224
2138 status = sky2_read32(hw, B0_Y2_SP_LISR); 2225 sky2_read32(hw, B0_Y2_SP_LISR);
2139 return 0; 2226 return 0;
2140} 2227}
2141 2228
@@ -2153,8 +2240,6 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2153 prefetch(&hw->st_le[hw->st_idx]); 2240 prefetch(&hw->st_le[hw->st_idx]);
2154 if (likely(__netif_rx_schedule_prep(dev0))) 2241 if (likely(__netif_rx_schedule_prep(dev0)))
2155 __netif_rx_schedule(dev0); 2242 __netif_rx_schedule(dev0);
2156 else
2157 printk(KERN_DEBUG PFX "irq race detected\n");
2158 2243
2159 return IRQ_HANDLED; 2244 return IRQ_HANDLED;
2160} 2245}
@@ -2193,7 +2278,7 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
2193} 2278}
2194 2279
2195 2280
2196static int sky2_reset(struct sky2_hw *hw) 2281static int __devinit sky2_reset(struct sky2_hw *hw)
2197{ 2282{
2198 u16 status; 2283 u16 status;
2199 u8 t8, pmd_type; 2284 u8 t8, pmd_type;
@@ -2218,13 +2303,6 @@ static int sky2_reset(struct sky2_hw *hw)
2218 return -EOPNOTSUPP; 2303 return -EOPNOTSUPP;
2219 } 2304 }
2220 2305
2221 /* This chip is new and not tested yet */
2222 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
2223 pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n",
2224 pci_name(hw->pdev));
2225 pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n");
2226 }
2227
2228 /* disable ASF */ 2306 /* disable ASF */
2229 if (hw->chip_id <= CHIP_ID_YUKON_EC) { 2307 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2230 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 2308 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
@@ -3028,12 +3106,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3028 sky2->duplex = -1; 3106 sky2->duplex = -1;
3029 sky2->speed = -1; 3107 sky2->speed = -1;
3030 sky2->advertising = sky2_supported_modes(hw); 3108 sky2->advertising = sky2_supported_modes(hw);
3031 3109 sky2->rx_csum = 1;
3032 /* Receive checksum disabled for Yukon XL
3033 * because of observed problems with incorrect
3034 * values when multiple packets are received in one interrupt
3035 */
3036 sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
3037 3110
3038 spin_lock_init(&sky2->phy_lock); 3111 spin_lock_init(&sky2->phy_lock);
3039 sky2->tx_pending = TX_DEF_PENDING; 3112 sky2->tx_pending = TX_DEF_PENDING;
@@ -3276,6 +3349,11 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3276 3349
3277 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 3350 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3278 3351
3352 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
3353 if (idle_timeout > 0)
3354 mod_timer(&hw->idle_timer,
3355 jiffies + msecs_to_jiffies(idle_timeout));
3356
3279 pci_set_drvdata(pdev, hw); 3357 pci_set_drvdata(pdev, hw);
3280 3358
3281 return 0; 3359 return 0;
@@ -3311,13 +3389,17 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3311 if (!hw) 3389 if (!hw)
3312 return; 3390 return;
3313 3391
3392 del_timer_sync(&hw->idle_timer);
3393
3394 sky2_write32(hw, B0_IMSK, 0);
3395 synchronize_irq(hw->pdev->irq);
3396
3314 dev0 = hw->dev[0]; 3397 dev0 = hw->dev[0];
3315 dev1 = hw->dev[1]; 3398 dev1 = hw->dev[1];
3316 if (dev1) 3399 if (dev1)
3317 unregister_netdev(dev1); 3400 unregister_netdev(dev1);
3318 unregister_netdev(dev0); 3401 unregister_netdev(dev0);
3319 3402
3320 sky2_write32(hw, B0_IMSK, 0);
3321 sky2_set_power_state(hw, PCI_D3hot); 3403 sky2_set_power_state(hw, PCI_D3hot);
3322 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 3404 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
3323 sky2_write8(hw, B0_CTST, CS_RST_SET); 3405 sky2_write8(hw, B0_CTST, CS_RST_SET);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 89dd18cd12f0..8a0bc5525f0a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -214,6 +214,8 @@ enum csr_regs {
214enum { 214enum {
215 Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ 215 Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */
216 Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ 216 Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */
217 Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */
218 Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */
217 Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ 219 Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */
218 Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ 220 Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */
219 Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ 221 Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */
@@ -378,6 +380,9 @@ enum {
378 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ 380 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
379 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ 381 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
380 CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ 382 CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
383
384 CHIP_REV_YU_EC_U_A0 = 0,
385 CHIP_REV_YU_EC_U_A1 = 1,
381}; 386};
382 387
383/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ 388/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
@@ -1880,6 +1885,8 @@ struct sky2_hw {
1880 struct sky2_status_le *st_le; 1885 struct sky2_status_le *st_le;
1881 u32 st_idx; 1886 u32 st_idx;
1882 dma_addr_t st_dma; 1887 dma_addr_t st_dma;
1888
1889 struct timer_list idle_timer;
1883 int msi_detected; 1890 int msi_detected;
1884 wait_queue_head_t msi_wait; 1891 wait_queue_head_t msi_wait;
1885}; 1892};
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 43f5e86fc559..394339d5e87c 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1652,6 +1652,8 @@ spider_net_enable_card(struct spider_net_card *card)
1652 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE }, 1652 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
1653 1653
1654 { SPIDER_NET_GMRWOLCTRL, 0 }, 1654 { SPIDER_NET_GMRWOLCTRL, 0 },
1655 { SPIDER_NET_GTESTMD, 0x10000000 },
1656 { SPIDER_NET_GTTQMSK, 0x00400040 },
1655 { SPIDER_NET_GTESTMD, 0 }, 1657 { SPIDER_NET_GTESTMD, 0 },
1656 1658
1657 { SPIDER_NET_GMACINTEN, 0 }, 1659 { SPIDER_NET_GMACINTEN, 0 },
@@ -1792,15 +1794,7 @@ spider_net_setup_phy(struct spider_net_card *card)
1792 if (phy->def->ops->setup_forced) 1794 if (phy->def->ops->setup_forced)
1793 phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL); 1795 phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
1794 1796
1795 /* the following two writes could be moved to sungem_phy.c */ 1797 phy->def->ops->enable_fiber(phy);
1796 /* enable fiber mode */
1797 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x9020);
1798 /* LEDs active in both modes, autosense prio = fiber */
1799 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f);
1800
1801 /* switch off fibre autoneg */
1802 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0xfc01);
1803 spider_net_write_phy(card->netdev, 1, 0x0b, 0x0004);
1804 1798
1805 phy->def->ops->read_link(phy); 1799 phy->def->ops->read_link(phy);
1806 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name, 1800 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 5922b529a048..3b8d951cf73c 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -120,6 +120,8 @@ extern char spider_net_driver_name[];
120#define SPIDER_NET_GMRUAFILnR 0x00000500 120#define SPIDER_NET_GMRUAFILnR 0x00000500
121#define SPIDER_NET_GMRUA0FIL15R 0x00000578 121#define SPIDER_NET_GMRUA0FIL15R 0x00000578
122 122
123#define SPIDER_NET_GTTQMSK 0x00000934
124
123/* RX DMA controller registers, all 0x00000a.. are for DMA controller A, 125/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
124 * 0x00000b.. for DMA controller B, etc. */ 126 * 0x00000b.. for DMA controller B, etc. */
125#define SPIDER_NET_GDADCHA 0x00000a00 127#define SPIDER_NET_GDADCHA 0x00000a00
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index cb0aba95d4e3..b2ddd5e79303 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -275,7 +275,7 @@ static int bcm5411_init(struct mii_phy* phy)
275 return 0; 275 return 0;
276} 276}
277 277
278static int bcm5411_suspend(struct mii_phy* phy) 278static int generic_suspend(struct mii_phy* phy)
279{ 279{
280 phy_write(phy, MII_BMCR, BMCR_PDOWN); 280 phy_write(phy, MII_BMCR, BMCR_PDOWN);
281 281
@@ -329,6 +329,30 @@ static int bcm5421_init(struct mii_phy* phy)
329 return 0; 329 return 0;
330} 330}
331 331
332static int bcm5421_enable_fiber(struct mii_phy* phy)
333{
334 /* enable fiber mode */
335 phy_write(phy, MII_NCONFIG, 0x9020);
336 /* LEDs active in both modes, autosense prio = fiber */
337 phy_write(phy, MII_NCONFIG, 0x945f);
338
339 /* switch off fibre autoneg */
340 phy_write(phy, MII_NCONFIG, 0xfc01);
341 phy_write(phy, 0x0b, 0x0004);
342
343 return 0;
344}
345
346static int bcm5461_enable_fiber(struct mii_phy* phy)
347{
348 phy_write(phy, MII_NCONFIG, 0xfc0c);
349 phy_write(phy, MII_BMCR, 0x4140);
350 phy_write(phy, MII_NCONFIG, 0xfc0b);
351 phy_write(phy, MII_BMCR, 0x0140);
352
353 return 0;
354}
355
332static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) 356static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
333{ 357{
334 u16 ctl, adv; 358 u16 ctl, adv;
@@ -738,7 +762,7 @@ static struct mii_phy_def bcm5401_phy_def = {
738/* Broadcom BCM 5411 */ 762/* Broadcom BCM 5411 */
739static struct mii_phy_ops bcm5411_phy_ops = { 763static struct mii_phy_ops bcm5411_phy_ops = {
740 .init = bcm5411_init, 764 .init = bcm5411_init,
741 .suspend = bcm5411_suspend, 765 .suspend = generic_suspend,
742 .setup_aneg = bcm54xx_setup_aneg, 766 .setup_aneg = bcm54xx_setup_aneg,
743 .setup_forced = bcm54xx_setup_forced, 767 .setup_forced = bcm54xx_setup_forced,
744 .poll_link = genmii_poll_link, 768 .poll_link = genmii_poll_link,
@@ -757,11 +781,12 @@ static struct mii_phy_def bcm5411_phy_def = {
757/* Broadcom BCM 5421 */ 781/* Broadcom BCM 5421 */
758static struct mii_phy_ops bcm5421_phy_ops = { 782static struct mii_phy_ops bcm5421_phy_ops = {
759 .init = bcm5421_init, 783 .init = bcm5421_init,
760 .suspend = bcm5411_suspend, 784 .suspend = generic_suspend,
761 .setup_aneg = bcm54xx_setup_aneg, 785 .setup_aneg = bcm54xx_setup_aneg,
762 .setup_forced = bcm54xx_setup_forced, 786 .setup_forced = bcm54xx_setup_forced,
763 .poll_link = genmii_poll_link, 787 .poll_link = genmii_poll_link,
764 .read_link = bcm54xx_read_link, 788 .read_link = bcm54xx_read_link,
789 .enable_fiber = bcm5421_enable_fiber,
765}; 790};
766 791
767static struct mii_phy_def bcm5421_phy_def = { 792static struct mii_phy_def bcm5421_phy_def = {
@@ -776,7 +801,7 @@ static struct mii_phy_def bcm5421_phy_def = {
776/* Broadcom BCM 5421 built-in K2 */ 801/* Broadcom BCM 5421 built-in K2 */
777static struct mii_phy_ops bcm5421k2_phy_ops = { 802static struct mii_phy_ops bcm5421k2_phy_ops = {
778 .init = bcm5421_init, 803 .init = bcm5421_init,
779 .suspend = bcm5411_suspend, 804 .suspend = generic_suspend,
780 .setup_aneg = bcm54xx_setup_aneg, 805 .setup_aneg = bcm54xx_setup_aneg,
781 .setup_forced = bcm54xx_setup_forced, 806 .setup_forced = bcm54xx_setup_forced,
782 .poll_link = genmii_poll_link, 807 .poll_link = genmii_poll_link,
@@ -792,10 +817,29 @@ static struct mii_phy_def bcm5421k2_phy_def = {
792 .ops = &bcm5421k2_phy_ops 817 .ops = &bcm5421k2_phy_ops
793}; 818};
794 819
820static struct mii_phy_ops bcm5461_phy_ops = {
821 .init = bcm5421_init,
822 .suspend = generic_suspend,
823 .setup_aneg = bcm54xx_setup_aneg,
824 .setup_forced = bcm54xx_setup_forced,
825 .poll_link = genmii_poll_link,
826 .read_link = bcm54xx_read_link,
827 .enable_fiber = bcm5461_enable_fiber,
828};
829
830static struct mii_phy_def bcm5461_phy_def = {
831 .phy_id = 0x002060c0,
832 .phy_id_mask = 0xfffffff0,
833 .name = "BCM5461",
834 .features = MII_GBIT_FEATURES,
835 .magic_aneg = 1,
836 .ops = &bcm5461_phy_ops
837};
838
795/* Broadcom BCM 5462 built-in Vesta */ 839/* Broadcom BCM 5462 built-in Vesta */
796static struct mii_phy_ops bcm5462V_phy_ops = { 840static struct mii_phy_ops bcm5462V_phy_ops = {
797 .init = bcm5421_init, 841 .init = bcm5421_init,
798 .suspend = bcm5411_suspend, 842 .suspend = generic_suspend,
799 .setup_aneg = bcm54xx_setup_aneg, 843 .setup_aneg = bcm54xx_setup_aneg,
800 .setup_forced = bcm54xx_setup_forced, 844 .setup_forced = bcm54xx_setup_forced,
801 .poll_link = genmii_poll_link, 845 .poll_link = genmii_poll_link,
@@ -816,6 +860,7 @@ static struct mii_phy_def bcm5462V_phy_def = {
816 * would be useful here) --BenH. 860 * would be useful here) --BenH.
817 */ 861 */
818static struct mii_phy_ops marvell_phy_ops = { 862static struct mii_phy_ops marvell_phy_ops = {
863 .suspend = generic_suspend,
819 .setup_aneg = marvell_setup_aneg, 864 .setup_aneg = marvell_setup_aneg,
820 .setup_forced = marvell_setup_forced, 865 .setup_forced = marvell_setup_forced,
821 .poll_link = genmii_poll_link, 866 .poll_link = genmii_poll_link,
@@ -856,6 +901,7 @@ static struct mii_phy_def* mii_phy_table[] = {
856 &bcm5411_phy_def, 901 &bcm5411_phy_def,
857 &bcm5421_phy_def, 902 &bcm5421_phy_def,
858 &bcm5421k2_phy_def, 903 &bcm5421k2_phy_def,
904 &bcm5461_phy_def,
859 &bcm5462V_phy_def, 905 &bcm5462V_phy_def,
860 &marvell_phy_def, 906 &marvell_phy_def,
861 &genmii_phy_def, 907 &genmii_phy_def,
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h
index 430544496c52..69e125197fcf 100644
--- a/drivers/net/sungem_phy.h
+++ b/drivers/net/sungem_phy.h
@@ -12,6 +12,7 @@ struct mii_phy_ops
12 int (*setup_forced)(struct mii_phy *phy, int speed, int fd); 12 int (*setup_forced)(struct mii_phy *phy, int speed, int fd);
13 int (*poll_link)(struct mii_phy *phy); 13 int (*poll_link)(struct mii_phy *phy);
14 int (*read_link)(struct mii_phy *phy); 14 int (*read_link)(struct mii_phy *phy);
15 int (*enable_fiber)(struct mii_phy *phy);
15}; 16};
16 17
17/* Structure used to statically define an mii/gii based PHY */ 18/* Structure used to statically define an mii/gii based PHY */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 73e271e59c6a..49ad60b72657 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.56" 72#define DRV_MODULE_VERSION "3.58"
73#define DRV_MODULE_RELDATE "Apr 1, 2006" 73#define DRV_MODULE_RELDATE "May 22, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -974,6 +974,8 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
974 return err; 974 return err;
975} 975}
976 976
977static void tg3_link_report(struct tg3 *);
978
977/* This will reset the tigon3 PHY if there is no valid 979/* This will reset the tigon3 PHY if there is no valid
978 * link unless the FORCE argument is non-zero. 980 * link unless the FORCE argument is non-zero.
979 */ 981 */
@@ -987,6 +989,11 @@ static int tg3_phy_reset(struct tg3 *tp)
987 if (err != 0) 989 if (err != 0)
988 return -EBUSY; 990 return -EBUSY;
989 991
992 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
993 netif_carrier_off(tp->dev);
994 tg3_link_report(tp);
995 }
996
990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
@@ -1023,6 +1030,12 @@ out:
1023 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); 1030 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1024 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1031 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1025 } 1032 }
1033 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1034 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1035 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1036 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1037 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1038 }
1026 /* Set Extended packet length bit (bit 14) on all chips that */ 1039 /* Set Extended packet length bit (bit 14) on all chips that */
1027 /* support jumbo frames */ 1040 /* support jumbo frames */
1028 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 1041 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
@@ -3531,7 +3544,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3531 return IRQ_RETVAL(0); 3544 return IRQ_RETVAL(0);
3532} 3545}
3533 3546
3534static int tg3_init_hw(struct tg3 *); 3547static int tg3_init_hw(struct tg3 *, int);
3535static int tg3_halt(struct tg3 *, int, int); 3548static int tg3_halt(struct tg3 *, int, int);
3536 3549
3537#ifdef CONFIG_NET_POLL_CONTROLLER 3550#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3567,7 +3580,7 @@ static void tg3_reset_task(void *_data)
3567 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3580 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3568 3581
3569 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3582 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3570 tg3_init_hw(tp); 3583 tg3_init_hw(tp, 1);
3571 3584
3572 tg3_netif_start(tp); 3585 tg3_netif_start(tp);
3573 3586
@@ -4042,7 +4055,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4042 4055
4043 tg3_set_mtu(dev, tp, new_mtu); 4056 tg3_set_mtu(dev, tp, new_mtu);
4044 4057
4045 tg3_init_hw(tp); 4058 tg3_init_hw(tp, 0);
4046 4059
4047 tg3_netif_start(tp); 4060 tg3_netif_start(tp);
4048 4061
@@ -5719,9 +5732,23 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5719 if (!netif_running(dev)) 5732 if (!netif_running(dev))
5720 return 0; 5733 return 0;
5721 5734
5722 spin_lock_bh(&tp->lock); 5735 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5723 __tg3_set_mac_addr(tp); 5736 /* Reset chip so that ASF can re-init any MAC addresses it
5724 spin_unlock_bh(&tp->lock); 5737 * needs.
5738 */
5739 tg3_netif_stop(tp);
5740 tg3_full_lock(tp, 1);
5741
5742 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5743 tg3_init_hw(tp, 0);
5744
5745 tg3_netif_start(tp);
5746 tg3_full_unlock(tp);
5747 } else {
5748 spin_lock_bh(&tp->lock);
5749 __tg3_set_mac_addr(tp);
5750 spin_unlock_bh(&tp->lock);
5751 }
5725 5752
5726 return 0; 5753 return 0;
5727} 5754}
@@ -5771,7 +5798,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5771} 5798}
5772 5799
5773/* tp->lock is held. */ 5800/* tp->lock is held. */
5774static int tg3_reset_hw(struct tg3 *tp) 5801static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5775{ 5802{
5776 u32 val, rdmac_mode; 5803 u32 val, rdmac_mode;
5777 int i, err, limit; 5804 int i, err, limit;
@@ -5786,7 +5813,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5786 tg3_abort_hw(tp, 1); 5813 tg3_abort_hw(tp, 1);
5787 } 5814 }
5788 5815
5789 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 5816 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5790 tg3_phy_reset(tp); 5817 tg3_phy_reset(tp);
5791 5818
5792 err = tg3_chip_reset(tp); 5819 err = tg3_chip_reset(tp);
@@ -6327,7 +6354,7 @@ static int tg3_reset_hw(struct tg3 *tp)
6327 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 6354 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6328 } 6355 }
6329 6356
6330 err = tg3_setup_phy(tp, 1); 6357 err = tg3_setup_phy(tp, reset_phy);
6331 if (err) 6358 if (err)
6332 return err; 6359 return err;
6333 6360
@@ -6400,7 +6427,7 @@ static int tg3_reset_hw(struct tg3 *tp)
6400/* Called at device open time to get the chip ready for 6427/* Called at device open time to get the chip ready for
6401 * packet processing. Invoked with tp->lock held. 6428 * packet processing. Invoked with tp->lock held.
6402 */ 6429 */
6403static int tg3_init_hw(struct tg3 *tp) 6430static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6404{ 6431{
6405 int err; 6432 int err;
6406 6433
@@ -6413,7 +6440,7 @@ static int tg3_init_hw(struct tg3 *tp)
6413 6440
6414 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 6441 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6415 6442
6416 err = tg3_reset_hw(tp); 6443 err = tg3_reset_hw(tp, reset_phy);
6417 6444
6418out: 6445out:
6419 return err; 6446 return err;
@@ -6461,6 +6488,10 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
6461 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 6488 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6462 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 6489 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6463 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 6490 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6491
6492 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6493 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6494 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6464} 6495}
6465 6496
6466static void tg3_timer(unsigned long __opaque) 6497static void tg3_timer(unsigned long __opaque)
@@ -6683,7 +6714,7 @@ static int tg3_test_msi(struct tg3 *tp)
6683 tg3_full_lock(tp, 1); 6714 tg3_full_lock(tp, 1);
6684 6715
6685 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6716 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6686 err = tg3_init_hw(tp); 6717 err = tg3_init_hw(tp, 1);
6687 6718
6688 tg3_full_unlock(tp); 6719 tg3_full_unlock(tp);
6689 6720
@@ -6748,7 +6779,7 @@ static int tg3_open(struct net_device *dev)
6748 6779
6749 tg3_full_lock(tp, 0); 6780 tg3_full_lock(tp, 0);
6750 6781
6751 err = tg3_init_hw(tp); 6782 err = tg3_init_hw(tp, 1);
6752 if (err) { 6783 if (err) {
6753 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6784 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6754 tg3_free_rings(tp); 6785 tg3_free_rings(tp);
@@ -7626,21 +7657,23 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7626 cmd->supported |= (SUPPORTED_1000baseT_Half | 7657 cmd->supported |= (SUPPORTED_1000baseT_Half |
7627 SUPPORTED_1000baseT_Full); 7658 SUPPORTED_1000baseT_Full);
7628 7659
7629 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 7660 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7630 cmd->supported |= (SUPPORTED_100baseT_Half | 7661 cmd->supported |= (SUPPORTED_100baseT_Half |
7631 SUPPORTED_100baseT_Full | 7662 SUPPORTED_100baseT_Full |
7632 SUPPORTED_10baseT_Half | 7663 SUPPORTED_10baseT_Half |
7633 SUPPORTED_10baseT_Full | 7664 SUPPORTED_10baseT_Full |
7634 SUPPORTED_MII); 7665 SUPPORTED_MII);
7635 else 7666 cmd->port = PORT_TP;
7667 } else {
7636 cmd->supported |= SUPPORTED_FIBRE; 7668 cmd->supported |= SUPPORTED_FIBRE;
7669 cmd->port = PORT_FIBRE;
7670 }
7637 7671
7638 cmd->advertising = tp->link_config.advertising; 7672 cmd->advertising = tp->link_config.advertising;
7639 if (netif_running(dev)) { 7673 if (netif_running(dev)) {
7640 cmd->speed = tp->link_config.active_speed; 7674 cmd->speed = tp->link_config.active_speed;
7641 cmd->duplex = tp->link_config.active_duplex; 7675 cmd->duplex = tp->link_config.active_duplex;
7642 } 7676 }
7643 cmd->port = 0;
7644 cmd->phy_address = PHY_ADDR; 7677 cmd->phy_address = PHY_ADDR;
7645 cmd->transceiver = 0; 7678 cmd->transceiver = 0;
7646 cmd->autoneg = tp->link_config.autoneg; 7679 cmd->autoneg = tp->link_config.autoneg;
@@ -7839,7 +7872,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7839 7872
7840 if (netif_running(dev)) { 7873 if (netif_running(dev)) {
7841 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7874 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7842 tg3_init_hw(tp); 7875 tg3_init_hw(tp, 1);
7843 tg3_netif_start(tp); 7876 tg3_netif_start(tp);
7844 } 7877 }
7845 7878
@@ -7884,7 +7917,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7884 7917
7885 if (netif_running(dev)) { 7918 if (netif_running(dev)) {
7886 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7919 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7887 tg3_init_hw(tp); 7920 tg3_init_hw(tp, 1);
7888 tg3_netif_start(tp); 7921 tg3_netif_start(tp);
7889 } 7922 }
7890 7923
@@ -8427,6 +8460,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8427 8460
8428 tx_len = 1514; 8461 tx_len = 1514;
8429 skb = dev_alloc_skb(tx_len); 8462 skb = dev_alloc_skb(tx_len);
8463 if (!skb)
8464 return -ENOMEM;
8465
8430 tx_data = skb_put(skb, tx_len); 8466 tx_data = skb_put(skb, tx_len);
8431 memcpy(tx_data, tp->dev->dev_addr, 6); 8467 memcpy(tx_data, tp->dev->dev_addr, 6);
8432 memset(tx_data + 6, 0x0, 8); 8468 memset(tx_data + 6, 0x0, 8);
@@ -8522,7 +8558,7 @@ static int tg3_test_loopback(struct tg3 *tp)
8522 if (!netif_running(tp->dev)) 8558 if (!netif_running(tp->dev))
8523 return TG3_LOOPBACK_FAILED; 8559 return TG3_LOOPBACK_FAILED;
8524 8560
8525 tg3_reset_hw(tp); 8561 tg3_reset_hw(tp, 1);
8526 8562
8527 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 8563 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8528 err |= TG3_MAC_LOOPBACK_FAILED; 8564 err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8596,7 +8632,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8596 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8632 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8597 if (netif_running(dev)) { 8633 if (netif_running(dev)) {
8598 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 8634 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8599 tg3_init_hw(tp); 8635 tg3_init_hw(tp, 1);
8600 tg3_netif_start(tp); 8636 tg3_netif_start(tp);
8601 } 8637 }
8602 8638
@@ -9377,7 +9413,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9377 9413
9378 if ((page_off == 0) || (i == 0)) 9414 if ((page_off == 0) || (i == 0))
9379 nvram_cmd |= NVRAM_CMD_FIRST; 9415 nvram_cmd |= NVRAM_CMD_FIRST;
9380 else if (page_off == (tp->nvram_pagesize - 4)) 9416 if (page_off == (tp->nvram_pagesize - 4))
9381 nvram_cmd |= NVRAM_CMD_LAST; 9417 nvram_cmd |= NVRAM_CMD_LAST;
9382 9418
9383 if (i == (len - 4)) 9419 if (i == (len - 4))
@@ -10353,10 +10389,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10353 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) 10389 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10354 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; 10390 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10355 10391
10356 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 10392 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10357 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && 10393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10358 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)) 10394 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10359 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 10395 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10396 else
10397 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10398 }
10360 10399
10361 tp->coalesce_mode = 0; 10400 tp->coalesce_mode = 0;
10362 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 10401 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
@@ -11569,7 +11608,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11569 tg3_full_lock(tp, 0); 11608 tg3_full_lock(tp, 0);
11570 11609
11571 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11610 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11572 tg3_init_hw(tp); 11611 tg3_init_hw(tp, 1);
11573 11612
11574 tp->timer.expires = jiffies + tp->timer_offset; 11613 tp->timer.expires = jiffies + tp->timer_offset;
11575 add_timer(&tp->timer); 11614 add_timer(&tp->timer);
@@ -11603,7 +11642,7 @@ static int tg3_resume(struct pci_dev *pdev)
11603 tg3_full_lock(tp, 0); 11642 tg3_full_lock(tp, 0);
11604 11643
11605 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11644 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11606 tg3_init_hw(tp); 11645 tg3_init_hw(tp, 1);
11607 11646
11608 tp->timer.expires = jiffies + tp->timer_offset; 11647 tp->timer.expires = jiffies + tp->timer_offset;
11609 add_timer(&tp->timer); 11648 add_timer(&tp->timer);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8c8b987d1250..0e29b885d449 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2215,6 +2215,7 @@ struct tg3 {
2215#define TG3_FLG2_HW_TSO_2 0x08000000 2215#define TG3_FLG2_HW_TSO_2 0x08000000
2216#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) 2216#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
2217#define TG3_FLG2_1SHOT_MSI 0x10000000 2217#define TG3_FLG2_1SHOT_MSI 0x10000000
2218#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2218 2219
2219 u32 split_mode_max_reqs; 2220 u32 split_mode_max_reqs;
2220#define SPLIT_MODE_5704_MAX_REQ 3 2221#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index ba05dedf29d3..136a70c4d5e4 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -850,7 +850,7 @@ static void init_rxtx_rings(struct net_device *dev)
850 break; 850 break;
851 skb->dev = dev; /* Mark as being used by this device. */ 851 skb->dev = dev; /* Mark as being used by this device. */
852 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, 852 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
853 skb->len,PCI_DMA_FROMDEVICE); 853 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
854 854
855 np->rx_ring[i].buffer1 = np->rx_addr[i]; 855 np->rx_ring[i].buffer1 = np->rx_addr[i];
856 np->rx_ring[i].status = DescOwn; 856 np->rx_ring[i].status = DescOwn;
@@ -1316,7 +1316,7 @@ static int netdev_rx(struct net_device *dev)
1316 skb->dev = dev; /* Mark as being used by this device. */ 1316 skb->dev = dev; /* Mark as being used by this device. */
1317 np->rx_addr[entry] = pci_map_single(np->pci_dev, 1317 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1318 skb->data, 1318 skb->data,
1319 skb->len, PCI_DMA_FROMDEVICE); 1319 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1320 np->rx_ring[entry].buffer1 = np->rx_addr[entry]; 1320 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1321 } 1321 }
1322 wmb(); 1322 wmb();
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 6a23964c1317..fdc21037f6dc 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -129,6 +129,7 @@
129 - Massive clean-up 129 - Massive clean-up
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff) 130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good 131 - Fix Tx engine race for good
132 - Craig Brind: Zero padded aligned buffers for short packets.
132 133
133*/ 134*/
134 135
@@ -490,8 +491,6 @@ struct rhine_private {
490 u8 tx_thresh, rx_thresh; 491 u8 tx_thresh, rx_thresh;
491 492
492 struct mii_if_info mii_if; 493 struct mii_if_info mii_if;
493 struct work_struct tx_timeout_task;
494 struct work_struct check_media_task;
495 void __iomem *base; 494 void __iomem *base;
496}; 495};
497 496
@@ -499,8 +498,6 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
499static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 498static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
500static int rhine_open(struct net_device *dev); 499static int rhine_open(struct net_device *dev);
501static void rhine_tx_timeout(struct net_device *dev); 500static void rhine_tx_timeout(struct net_device *dev);
502static void rhine_tx_timeout_task(struct net_device *dev);
503static void rhine_check_media_task(struct net_device *dev);
504static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); 501static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
505static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 502static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
506static void rhine_tx(struct net_device *dev); 503static void rhine_tx(struct net_device *dev);
@@ -855,12 +852,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
855 if (rp->quirks & rqRhineI) 852 if (rp->quirks & rqRhineI)
856 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 853 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
857 854
858 INIT_WORK(&rp->tx_timeout_task,
859 (void (*)(void *))rhine_tx_timeout_task, dev);
860
861 INIT_WORK(&rp->check_media_task,
862 (void (*)(void *))rhine_check_media_task, dev);
863
864 /* dev->name not defined before register_netdev()! */ 855 /* dev->name not defined before register_netdev()! */
865 rc = register_netdev(dev); 856 rc = register_netdev(dev);
866 if (rc) 857 if (rc)
@@ -1107,11 +1098,6 @@ static void rhine_set_carrier(struct mii_if_info *mii)
1107 netif_carrier_ok(mii->dev)); 1098 netif_carrier_ok(mii->dev));
1108} 1099}
1109 1100
1110static void rhine_check_media_task(struct net_device *dev)
1111{
1112 rhine_check_media(dev, 0);
1113}
1114
1115static void init_registers(struct net_device *dev) 1101static void init_registers(struct net_device *dev)
1116{ 1102{
1117 struct rhine_private *rp = netdev_priv(dev); 1103 struct rhine_private *rp = netdev_priv(dev);
@@ -1165,8 +1151,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1165 if (quirks & rqRhineI) { 1151 if (quirks & rqRhineI) {
1166 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR 1152 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1167 1153
1168 /* Do not call from ISR! */ 1154 /* Can be called from ISR. Evil. */
1169 msleep(1); 1155 mdelay(1);
1170 1156
1171 /* 0x80 must be set immediately before turning it off */ 1157 /* 0x80 must be set immediately before turning it off */
1172 iowrite8(0x80, ioaddr + MIICmd); 1158 iowrite8(0x80, ioaddr + MIICmd);
@@ -1256,16 +1242,6 @@ static int rhine_open(struct net_device *dev)
1256static void rhine_tx_timeout(struct net_device *dev) 1242static void rhine_tx_timeout(struct net_device *dev)
1257{ 1243{
1258 struct rhine_private *rp = netdev_priv(dev); 1244 struct rhine_private *rp = netdev_priv(dev);
1259
1260 /*
1261 * Move bulk of work outside of interrupt context
1262 */
1263 schedule_work(&rp->tx_timeout_task);
1264}
1265
1266static void rhine_tx_timeout_task(struct net_device *dev)
1267{
1268 struct rhine_private *rp = netdev_priv(dev);
1269 void __iomem *ioaddr = rp->base; 1245 void __iomem *ioaddr = rp->base;
1270 1246
1271 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " 1247 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
@@ -1326,7 +1302,12 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1326 rp->stats.tx_dropped++; 1302 rp->stats.tx_dropped++;
1327 return 0; 1303 return 0;
1328 } 1304 }
1305
1306 /* Padding is not copied and so must be redone. */
1329 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); 1307 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1308 if (skb->len < ETH_ZLEN)
1309 memset(rp->tx_buf[entry] + skb->len, 0,
1310 ETH_ZLEN - skb->len);
1330 rp->tx_skbuff_dma[entry] = 0; 1311 rp->tx_skbuff_dma[entry] = 0;
1331 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + 1312 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1332 (rp->tx_buf[entry] - 1313 (rp->tx_buf[entry] -
@@ -1671,7 +1652,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
1671 spin_lock(&rp->lock); 1652 spin_lock(&rp->lock);
1672 1653
1673 if (intr_status & IntrLinkChange) 1654 if (intr_status & IntrLinkChange)
1674 schedule_work(&rp->check_media_task); 1655 rhine_check_media(dev, 0);
1675 if (intr_status & IntrStatsMax) { 1656 if (intr_status & IntrStatsMax) {
1676 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 1657 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1677 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 1658 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
@@ -1921,9 +1902,6 @@ static int rhine_close(struct net_device *dev)
1921 spin_unlock_irq(&rp->lock); 1902 spin_unlock_irq(&rp->lock);
1922 1903
1923 free_irq(rp->pdev->irq, dev); 1904 free_irq(rp->pdev->irq, dev);
1924
1925 flush_scheduled_work();
1926
1927 free_rbufs(dev); 1905 free_rbufs(dev);
1928 free_tbufs(dev); 1906 free_tbufs(dev);
1929 free_ring(dev); 1907 free_ring(dev);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index bad09ebdb50b..e0874cbfefea 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -6,7 +6,7 @@ menu "Wireless LAN (non-hamradio)"
6 depends on NETDEVICES 6 depends on NETDEVICES
7 7
8config NET_RADIO 8config NET_RADIO
9 bool "Wireless LAN drivers (non-hamradio)" 9 bool "Wireless LAN drivers (non-hamradio) & Wireless Extensions"
10 select WIRELESS_EXT 10 select WIRELESS_EXT
11 ---help--- 11 ---help---
12 Support for wireless LANs and everything having to do with radio, 12 Support for wireless LANs and everything having to do with radio,
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 108d9fed8f07..00764ddd74d8 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -3139,6 +3139,7 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
3139 } 3139 }
3140 if ( status & EV_LINK ) { 3140 if ( status & EV_LINK ) {
3141 union iwreq_data wrqu; 3141 union iwreq_data wrqu;
3142 int scan_forceloss = 0;
3142 /* The link status has changed, if you want to put a 3143 /* The link status has changed, if you want to put a
3143 monitor hook in, do it here. (Remember that 3144 monitor hook in, do it here. (Remember that
3144 interrupts are still disabled!) 3145 interrupts are still disabled!)
@@ -3157,7 +3158,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
3157 code) */ 3158 code) */
3158#define AUTHFAIL 0x0300 /* Authentication failure (low byte is reason 3159#define AUTHFAIL 0x0300 /* Authentication failure (low byte is reason
3159 code) */ 3160 code) */
3160#define ASSOCIATED 0x0400 /* Assocatied */ 3161#define ASSOCIATED 0x0400 /* Associated */
3162#define REASSOCIATED 0x0600 /* Reassociated? Only on firmware >= 5.30.17 */
3161#define RC_RESERVED 0 /* Reserved return code */ 3163#define RC_RESERVED 0 /* Reserved return code */
3162#define RC_NOREASON 1 /* Unspecified reason */ 3164#define RC_NOREASON 1 /* Unspecified reason */
3163#define RC_AUTHINV 2 /* Previous authentication invalid */ 3165#define RC_AUTHINV 2 /* Previous authentication invalid */
@@ -3174,44 +3176,30 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
3174 leaving BSS */ 3176 leaving BSS */
3175#define RC_NOAUTH 9 /* Station requesting (Re)Association is not 3177#define RC_NOAUTH 9 /* Station requesting (Re)Association is not
3176 Authenticated with the responding station */ 3178 Authenticated with the responding station */
3177 if (newStatus != ASSOCIATED) { 3179 if (newStatus == FORCELOSS && apriv->scan_timeout > 0)
3178 if (auto_wep && !apriv->expires) { 3180 scan_forceloss = 1;
3179 apriv->expires = RUN_AT(3*HZ); 3181 if(newStatus == ASSOCIATED || newStatus == REASSOCIATED) {
3180 wake_up_interruptible(&apriv->thr_wait);
3181 }
3182 } else {
3183 struct task_struct *task = apriv->task;
3184 if (auto_wep) 3182 if (auto_wep)
3185 apriv->expires = 0; 3183 apriv->expires = 0;
3186 if (task) 3184 if (apriv->task)
3187 wake_up_process (task); 3185 wake_up_process (apriv->task);
3188 set_bit(FLAG_UPDATE_UNI, &apriv->flags); 3186 set_bit(FLAG_UPDATE_UNI, &apriv->flags);
3189 set_bit(FLAG_UPDATE_MULTI, &apriv->flags); 3187 set_bit(FLAG_UPDATE_MULTI, &apriv->flags);
3190 } 3188
3191 /* Question : is ASSOCIATED the only status
3192 * that is valid ? We want to catch handover
3193 * and reassociations as valid status
3194 * Jean II */
3195 if(newStatus == ASSOCIATED) {
3196#if 0
3197 /* FIXME: Grabbing scan results here
3198 * seems to be too early??? Just wait for
3199 * timeout instead. */
3200 if (apriv->scan_timeout > 0) {
3201 set_bit(JOB_SCAN_RESULTS, &apriv->flags);
3202 wake_up_interruptible(&apriv->thr_wait);
3203 }
3204#endif
3205 if (down_trylock(&apriv->sem) != 0) { 3189 if (down_trylock(&apriv->sem) != 0) {
3206 set_bit(JOB_EVENT, &apriv->flags); 3190 set_bit(JOB_EVENT, &apriv->flags);
3207 wake_up_interruptible(&apriv->thr_wait); 3191 wake_up_interruptible(&apriv->thr_wait);
3208 } else 3192 } else
3209 airo_send_event(dev); 3193 airo_send_event(dev);
3210 } else { 3194 } else if (!scan_forceloss) {
3211 memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN); 3195 if (auto_wep && !apriv->expires) {
3212 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 3196 apriv->expires = RUN_AT(3*HZ);
3197 wake_up_interruptible(&apriv->thr_wait);
3198 }
3213 3199
3214 /* Send event to user space */ 3200 /* Send event to user space */
3201 memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN);
3202 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
3215 wireless_send_event(dev, SIOCGIWAP, &wrqu,NULL); 3203 wireless_send_event(dev, SIOCGIWAP, &wrqu,NULL);
3216 } 3204 }
3217 } 3205 }
@@ -7136,10 +7124,10 @@ static int airo_set_scan(struct net_device *dev,
7136 goto out; 7124 goto out;
7137 7125
7138 /* Initiate a scan command */ 7126 /* Initiate a scan command */
7127 ai->scan_timeout = RUN_AT(3*HZ);
7139 memset(&cmd, 0, sizeof(cmd)); 7128 memset(&cmd, 0, sizeof(cmd));
7140 cmd.cmd=CMD_LISTBSS; 7129 cmd.cmd=CMD_LISTBSS;
7141 issuecommand(ai, &cmd, &rsp); 7130 issuecommand(ai, &cmd, &rsp);
7142 ai->scan_timeout = RUN_AT(3*HZ);
7143 wake = 1; 7131 wake = 1;
7144 7132
7145out: 7133out:
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
index 0e1ac338cac1..bed6823d9809 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/net/wireless/arlan-main.c
@@ -1838,7 +1838,7 @@ struct net_device * __init arlan_probe(int unit)
1838} 1838}
1839 1839
1840#ifdef MODULE 1840#ifdef MODULE
1841int init_module(void) 1841int __init init_module(void)
1842{ 1842{
1843 int i = 0; 1843 int i = 0;
1844 1844
@@ -1860,7 +1860,7 @@ int init_module(void)
1860} 1860}
1861 1861
1862 1862
1863void cleanup_module(void) 1863void __exit cleanup_module(void)
1864{ 1864{
1865 int i = 0; 1865 int i = 0;
1866 struct net_device *dev; 1866 struct net_device *dev;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 87afa6878f26..8606c88886fc 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -3463,6 +3463,7 @@ static void atmel_command_irq(struct atmel_private *priv)
3463 u8 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET)); 3463 u8 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET));
3464 u8 command = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET)); 3464 u8 command = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET));
3465 int fast_scan; 3465 int fast_scan;
3466 union iwreq_data wrqu;
3466 3467
3467 if (status == CMD_STATUS_IDLE || 3468 if (status == CMD_STATUS_IDLE ||
3468 status == CMD_STATUS_IN_PROGRESS) 3469 status == CMD_STATUS_IN_PROGRESS)
@@ -3487,6 +3488,7 @@ static void atmel_command_irq(struct atmel_private *priv)
3487 atmel_scan(priv, 1); 3488 atmel_scan(priv, 1);
3488 } else { 3489 } else {
3489 int bss_index = retrieve_bss(priv); 3490 int bss_index = retrieve_bss(priv);
3491 int notify_scan_complete = 1;
3490 if (bss_index != -1) { 3492 if (bss_index != -1) {
3491 atmel_join_bss(priv, bss_index); 3493 atmel_join_bss(priv, bss_index);
3492 } else if (priv->operating_mode == IW_MODE_ADHOC && 3494 } else if (priv->operating_mode == IW_MODE_ADHOC &&
@@ -3495,8 +3497,14 @@ static void atmel_command_irq(struct atmel_private *priv)
3495 } else { 3497 } else {
3496 priv->fast_scan = !fast_scan; 3498 priv->fast_scan = !fast_scan;
3497 atmel_scan(priv, 1); 3499 atmel_scan(priv, 1);
3500 notify_scan_complete = 0;
3498 } 3501 }
3499 priv->site_survey_state = SITE_SURVEY_COMPLETED; 3502 priv->site_survey_state = SITE_SURVEY_COMPLETED;
3503 if (notify_scan_complete) {
3504 wrqu.data.length = 0;
3505 wrqu.data.flags = 0;
3506 wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL);
3507 }
3500 } 3508 }
3501 break; 3509 break;
3502 3510
@@ -3509,6 +3517,9 @@ static void atmel_command_irq(struct atmel_private *priv)
3509 priv->site_survey_state = SITE_SURVEY_COMPLETED; 3517 priv->site_survey_state = SITE_SURVEY_COMPLETED;
3510 if (priv->station_is_associated) { 3518 if (priv->station_is_associated) {
3511 atmel_enter_state(priv, STATION_STATE_READY); 3519 atmel_enter_state(priv, STATION_STATE_READY);
3520 wrqu.data.length = 0;
3521 wrqu.data.flags = 0;
3522 wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL);
3512 } else { 3523 } else {
3513 atmel_scan(priv, 1); 3524 atmel_scan(priv, 1);
3514 } 3525 }
diff --git a/drivers/net/wireless/bcm43xx/Kconfig b/drivers/net/wireless/bcm43xx/Kconfig
index 418465600a77..25ea4748f0b9 100644
--- a/drivers/net/wireless/bcm43xx/Kconfig
+++ b/drivers/net/wireless/bcm43xx/Kconfig
@@ -17,8 +17,11 @@ config BCM43XX_DEBUG
17 17
18config BCM43XX_DMA 18config BCM43XX_DMA
19 bool 19 bool
20 depends on BCM43XX
21
20config BCM43XX_PIO 22config BCM43XX_PIO
21 bool 23 bool
24 depends on BCM43XX
22 25
23choice 26choice
24 prompt "BCM43xx data transfer mode" 27 prompt "BCM43xx data transfer mode"
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index dcadd295de4f..2e83083935e1 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -15,7 +15,6 @@
15 15
16#include "bcm43xx_debugfs.h" 16#include "bcm43xx_debugfs.h"
17#include "bcm43xx_leds.h" 17#include "bcm43xx_leds.h"
18#include "bcm43xx_sysfs.h"
19 18
20 19
21#define PFX KBUILD_MODNAME ": " 20#define PFX KBUILD_MODNAME ": "
@@ -638,8 +637,6 @@ struct bcm43xx_key {
638}; 637};
639 638
640struct bcm43xx_private { 639struct bcm43xx_private {
641 struct bcm43xx_sysfs sysfs;
642
643 struct ieee80211_device *ieee; 640 struct ieee80211_device *ieee;
644 struct ieee80211softmac_device *softmac; 641 struct ieee80211softmac_device *softmac;
645 642
@@ -772,6 +769,20 @@ struct bcm43xx_private * bcm43xx_priv(struct net_device *dev)
772 return ieee80211softmac_priv(dev); 769 return ieee80211softmac_priv(dev);
773} 770}
774 771
772struct device;
773
774static inline
775struct bcm43xx_private * dev_to_bcm(struct device *dev)
776{
777 struct net_device *net_dev;
778 struct bcm43xx_private *bcm;
779
780 net_dev = dev_get_drvdata(dev);
781 bcm = bcm43xx_priv(net_dev);
782
783 return bcm;
784}
785
775 786
776/* Helper function, which returns a boolean. 787/* Helper function, which returns a boolean.
777 * TRUE, if PIO is used; FALSE, if DMA is used. 788 * TRUE, if PIO is used; FALSE, if DMA is used.
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
index d2c3401e9b70..35a4fcb6d923 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
@@ -452,12 +452,12 @@ void bcm43xx_printk_dump(const char *data,
452 size_t i; 452 size_t i;
453 char c; 453 char c;
454 454
455 printk(KERN_INFO PFX "Data dump (%s, %u bytes):", 455 printk(KERN_INFO PFX "Data dump (%s, %zd bytes):",
456 description, size); 456 description, size);
457 for (i = 0; i < size; i++) { 457 for (i = 0; i < size; i++) {
458 c = data[i]; 458 c = data[i];
459 if (i % 8 == 0) 459 if (i % 8 == 0)
460 printk("\n" KERN_INFO PFX "0x%08x: 0x%02x, ", i, c & 0xff); 460 printk("\n" KERN_INFO PFX "0x%08zx: 0x%02x, ", i, c & 0xff);
461 else 461 else
462 printk("0x%02x, ", c & 0xff); 462 printk("0x%02x, ", c & 0xff);
463 } 463 }
@@ -472,12 +472,12 @@ void bcm43xx_printk_bitdump(const unsigned char *data,
472 int j; 472 int j;
473 const unsigned char *d; 473 const unsigned char *d;
474 474
475 printk(KERN_INFO PFX "*** Bitdump (%s, %u bytes, %s) ***", 475 printk(KERN_INFO PFX "*** Bitdump (%s, %zd bytes, %s) ***",
476 description, bytes, msb_to_lsb ? "MSB to LSB" : "LSB to MSB"); 476 description, bytes, msb_to_lsb ? "MSB to LSB" : "LSB to MSB");
477 for (i = 0; i < bytes; i++) { 477 for (i = 0; i < bytes; i++) {
478 d = data + i; 478 d = data + i;
479 if (i % 8 == 0) 479 if (i % 8 == 0)
480 printk("\n" KERN_INFO PFX "0x%08x: ", i); 480 printk("\n" KERN_INFO PFX "0x%08zx: ", i);
481 if (msb_to_lsb) { 481 if (msb_to_lsb) {
482 for (j = 7; j >= 0; j--) { 482 for (j = 7; j >= 0; j--) {
483 if (*d & (1 << j)) 483 if (*d & (1 << j))
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index c3681b8f09b4..d0318e525ba7 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -196,8 +196,9 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
196 } 196 }
197 if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) { 197 if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
198 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G " 198 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G "
199 "(0x%08x, len: %lu)\n", 199 "(0x%llx, len: %lu)\n",
200 ring->dmabase, BCM43xx_DMA_RINGMEMSIZE); 200 (unsigned long long)ring->dmabase,
201 BCM43xx_DMA_RINGMEMSIZE);
201 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, 202 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
202 ring->vbase, ring->dmabase); 203 ring->vbase, ring->dmabase);
203 return -ENOMEM; 204 return -ENOMEM;
@@ -307,8 +308,8 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
307 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); 308 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
308 dev_kfree_skb_any(skb); 309 dev_kfree_skb_any(skb);
309 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G " 310 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G "
310 "(0x%08x, len: %u)\n", 311 "(0x%llx, len: %u)\n",
311 dmaaddr, ring->rx_buffersize); 312 (unsigned long long)dmaaddr, ring->rx_buffersize);
312 return -ENOMEM; 313 return -ENOMEM;
313 } 314 }
314 meta->skb = skb; 315 meta->skb = skb;
@@ -623,25 +624,28 @@ err_destroy_tx0:
623static u16 generate_cookie(struct bcm43xx_dmaring *ring, 624static u16 generate_cookie(struct bcm43xx_dmaring *ring,
624 int slot) 625 int slot)
625{ 626{
626 u16 cookie = 0x0000; 627 u16 cookie = 0xF000;
627 628
628 /* Use the upper 4 bits of the cookie as 629 /* Use the upper 4 bits of the cookie as
629 * DMA controller ID and store the slot number 630 * DMA controller ID and store the slot number
630 * in the lower 12 bits 631 * in the lower 12 bits.
632 * Note that the cookie must never be 0, as this
633 * is a special value used in RX path.
631 */ 634 */
632 switch (ring->mmio_base) { 635 switch (ring->mmio_base) {
633 default: 636 default:
634 assert(0); 637 assert(0);
635 case BCM43xx_MMIO_DMA1_BASE: 638 case BCM43xx_MMIO_DMA1_BASE:
639 cookie = 0xA000;
636 break; 640 break;
637 case BCM43xx_MMIO_DMA2_BASE: 641 case BCM43xx_MMIO_DMA2_BASE:
638 cookie = 0x1000; 642 cookie = 0xB000;
639 break; 643 break;
640 case BCM43xx_MMIO_DMA3_BASE: 644 case BCM43xx_MMIO_DMA3_BASE:
641 cookie = 0x2000; 645 cookie = 0xC000;
642 break; 646 break;
643 case BCM43xx_MMIO_DMA4_BASE: 647 case BCM43xx_MMIO_DMA4_BASE:
644 cookie = 0x3000; 648 cookie = 0xD000;
645 break; 649 break;
646 } 650 }
647 assert(((u16)slot & 0xF000) == 0x0000); 651 assert(((u16)slot & 0xF000) == 0x0000);
@@ -659,16 +663,16 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
659 struct bcm43xx_dmaring *ring = NULL; 663 struct bcm43xx_dmaring *ring = NULL;
660 664
661 switch (cookie & 0xF000) { 665 switch (cookie & 0xF000) {
662 case 0x0000: 666 case 0xA000:
663 ring = dma->tx_ring0; 667 ring = dma->tx_ring0;
664 break; 668 break;
665 case 0x1000: 669 case 0xB000:
666 ring = dma->tx_ring1; 670 ring = dma->tx_ring1;
667 break; 671 break;
668 case 0x2000: 672 case 0xC000:
669 ring = dma->tx_ring2; 673 ring = dma->tx_ring2;
670 break; 674 break;
671 case 0x3000: 675 case 0xD000:
672 ring = dma->tx_ring3; 676 ring = dma->tx_ring3;
673 break; 677 break;
674 default: 678 default:
@@ -729,8 +733,8 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
729 if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) { 733 if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
730 return_slot(ring, slot); 734 return_slot(ring, slot);
731 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G " 735 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G "
732 "(0x%08x, len: %u)\n", 736 "(0x%llx, len: %u)\n",
733 meta->dmaaddr, skb->len); 737 (unsigned long long)meta->dmaaddr, skb->len);
734 return -ENOMEM; 738 return -ENOMEM;
735 } 739 }
736 740
@@ -838,8 +842,18 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
838 /* We received an xmit status. */ 842 /* We received an xmit status. */
839 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data; 843 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
840 struct bcm43xx_xmitstatus stat; 844 struct bcm43xx_xmitstatus stat;
845 int i = 0;
841 846
842 stat.cookie = le16_to_cpu(hw->cookie); 847 stat.cookie = le16_to_cpu(hw->cookie);
848 while (stat.cookie == 0) {
849 if (unlikely(++i >= 10000)) {
850 assert(0);
851 break;
852 }
853 udelay(2);
854 barrier();
855 stat.cookie = le16_to_cpu(hw->cookie);
856 }
843 stat.flags = hw->flags; 857 stat.flags = hw->flags;
844 stat.cnt1 = hw->cnt1; 858 stat.cnt1 = hw->cnt1;
845 stat.cnt2 = hw->cnt2; 859 stat.cnt2 = hw->cnt2;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
index 2d520e4b0276..b7d77638ba8c 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
@@ -213,6 +213,14 @@ static inline
213void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring) 213void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
214{ 214{
215} 215}
216static inline
217void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
218{
219}
220static inline
221void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
222{
223}
216 224
217#endif /* CONFIG_BCM43XX_DMA */ 225#endif /* CONFIG_BCM43XX_DMA */
218#endif /* BCM43xx_DMA_H_ */ 226#endif /* BCM43xx_DMA_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index c37371fc9e01..7ed18cad29f7 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -52,6 +52,7 @@
52#include "bcm43xx_wx.h" 52#include "bcm43xx_wx.h"
53#include "bcm43xx_ethtool.h" 53#include "bcm43xx_ethtool.h"
54#include "bcm43xx_xmit.h" 54#include "bcm43xx_xmit.h"
55#include "bcm43xx_sysfs.h"
55 56
56 57
57MODULE_DESCRIPTION("Broadcom BCM43xx wireless driver"); 58MODULE_DESCRIPTION("Broadcom BCM43xx wireless driver");
@@ -938,9 +939,9 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
938 return 0; 939 return 0;
939} 940}
940 941
941static void bcm43xx_geo_init(struct bcm43xx_private *bcm) 942static int bcm43xx_geo_init(struct bcm43xx_private *bcm)
942{ 943{
943 struct ieee80211_geo geo; 944 struct ieee80211_geo *geo;
944 struct ieee80211_channel *chan; 945 struct ieee80211_channel *chan;
945 int have_a = 0, have_bg = 0; 946 int have_a = 0, have_bg = 0;
946 int i; 947 int i;
@@ -948,7 +949,10 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
948 struct bcm43xx_phyinfo *phy; 949 struct bcm43xx_phyinfo *phy;
949 const char *iso_country; 950 const char *iso_country;
950 951
951 memset(&geo, 0, sizeof(geo)); 952 geo = kzalloc(sizeof(*geo), GFP_KERNEL);
953 if (!geo)
954 return -ENOMEM;
955
952 for (i = 0; i < bcm->nr_80211_available; i++) { 956 for (i = 0; i < bcm->nr_80211_available; i++) {
953 phy = &(bcm->core_80211_ext[i].phy); 957 phy = &(bcm->core_80211_ext[i].phy);
954 switch (phy->type) { 958 switch (phy->type) {
@@ -966,31 +970,36 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
966 iso_country = bcm43xx_locale_iso(bcm->sprom.locale); 970 iso_country = bcm43xx_locale_iso(bcm->sprom.locale);
967 971
968 if (have_a) { 972 if (have_a) {
969 for (i = 0, channel = 0; channel < 201; channel++) { 973 for (i = 0, channel = IEEE80211_52GHZ_MIN_CHANNEL;
970 chan = &geo.a[i++]; 974 channel <= IEEE80211_52GHZ_MAX_CHANNEL; channel++) {
975 chan = &geo->a[i++];
971 chan->freq = bcm43xx_channel_to_freq_a(channel); 976 chan->freq = bcm43xx_channel_to_freq_a(channel);
972 chan->channel = channel; 977 chan->channel = channel;
973 } 978 }
974 geo.a_channels = i; 979 geo->a_channels = i;
975 } 980 }
976 if (have_bg) { 981 if (have_bg) {
977 for (i = 0, channel = 1; channel < 15; channel++) { 982 for (i = 0, channel = IEEE80211_24GHZ_MIN_CHANNEL;
978 chan = &geo.bg[i++]; 983 channel <= IEEE80211_24GHZ_MAX_CHANNEL; channel++) {
984 chan = &geo->bg[i++];
979 chan->freq = bcm43xx_channel_to_freq_bg(channel); 985 chan->freq = bcm43xx_channel_to_freq_bg(channel);
980 chan->channel = channel; 986 chan->channel = channel;
981 } 987 }
982 geo.bg_channels = i; 988 geo->bg_channels = i;
983 } 989 }
984 memcpy(geo.name, iso_country, 2); 990 memcpy(geo->name, iso_country, 2);
985 if (0 /*TODO: Outdoor use only */) 991 if (0 /*TODO: Outdoor use only */)
986 geo.name[2] = 'O'; 992 geo->name[2] = 'O';
987 else if (0 /*TODO: Indoor use only */) 993 else if (0 /*TODO: Indoor use only */)
988 geo.name[2] = 'I'; 994 geo->name[2] = 'I';
989 else 995 else
990 geo.name[2] = ' '; 996 geo->name[2] = ' ';
991 geo.name[3] = '\0'; 997 geo->name[3] = '\0';
998
999 ieee80211_set_geo(bcm->ieee, geo);
1000 kfree(geo);
992 1001
993 ieee80211_set_geo(bcm->ieee, &geo); 1002 return 0;
994} 1003}
995 1004
996/* DummyTransmission function, as documented on 1005/* DummyTransmission function, as documented on
@@ -3262,6 +3271,9 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
3262 bcm43xx_sysfs_register(bcm); 3271 bcm43xx_sysfs_register(bcm);
3263 //FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though... 3272 //FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though...
3264 3273
3274 /*FIXME: This should be handled by softmac instead. */
3275 schedule_work(&bcm->softmac->associnfo.work);
3276
3265 assert(err == 0); 3277 assert(err == 0);
3266out: 3278out:
3267 return err; 3279 return err;
@@ -3478,16 +3490,17 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm)
3478 goto err_80211_unwind; 3490 goto err_80211_unwind;
3479 bcm43xx_wireless_core_disable(bcm); 3491 bcm43xx_wireless_core_disable(bcm);
3480 } 3492 }
3493 err = bcm43xx_geo_init(bcm);
3494 if (err)
3495 goto err_80211_unwind;
3481 bcm43xx_pctl_set_crystal(bcm, 0); 3496 bcm43xx_pctl_set_crystal(bcm, 0);
3482 3497
3483 /* Set the MAC address in the networking subsystem */ 3498 /* Set the MAC address in the networking subsystem */
3484 if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A) 3499 if (is_valid_ether_addr(bcm->sprom.et1macaddr))
3485 memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6); 3500 memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6);
3486 else 3501 else
3487 memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6); 3502 memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6);
3488 3503
3489 bcm43xx_geo_init(bcm);
3490
3491 snprintf(bcm->nick, IW_ESSID_MAX_SIZE, 3504 snprintf(bcm->nick, IW_ESSID_MAX_SIZE,
3492 "Broadcom %04X", bcm->chip_id); 3505 "Broadcom %04X", bcm->chip_id);
3493 3506
@@ -3522,6 +3535,7 @@ static inline int bcm43xx_tx(struct bcm43xx_private *bcm,
3522 err = bcm43xx_pio_tx(bcm, txb); 3535 err = bcm43xx_pio_tx(bcm, txb);
3523 else 3536 else
3524 err = bcm43xx_dma_tx(bcm, txb); 3537 err = bcm43xx_dma_tx(bcm, txb);
3538 bcm->net_dev->trans_start = jiffies;
3525 3539
3526 return err; 3540 return err;
3527} 3541}
@@ -3935,9 +3949,6 @@ static int bcm43xx_resume(struct pci_dev *pdev)
3935 3949
3936 netif_device_attach(net_dev); 3950 netif_device_attach(net_dev);
3937 3951
3938 /*FIXME: This should be handled by softmac instead. */
3939 schedule_work(&bcm->softmac->associnfo.work);
3940
3941 dprintk(KERN_INFO PFX "Device resumed.\n"); 3952 dprintk(KERN_INFO PFX "Device resumed.\n");
3942 3953
3943 return 0; 3954 return 0;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
index eca79a38594a..30a202b258b5 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
@@ -118,12 +118,14 @@ int bcm43xx_channel_to_freq(struct bcm43xx_private *bcm,
118static inline 118static inline
119int bcm43xx_is_valid_channel_a(u8 channel) 119int bcm43xx_is_valid_channel_a(u8 channel)
120{ 120{
121 return (channel <= 200); 121 return (channel >= IEEE80211_52GHZ_MIN_CHANNEL
122 && channel <= IEEE80211_52GHZ_MAX_CHANNEL);
122} 123}
123static inline 124static inline
124int bcm43xx_is_valid_channel_bg(u8 channel) 125int bcm43xx_is_valid_channel_bg(u8 channel)
125{ 126{
126 return (channel >= 1 && channel <= 14); 127 return (channel >= IEEE80211_24GHZ_MIN_CHANNEL
128 && channel <= IEEE80211_24GHZ_MAX_CHANNEL);
127} 129}
128static inline 130static inline
129int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm, 131int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm,
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
index 0a66f43ca0c0..b0abac515530 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
@@ -1287,7 +1287,7 @@ static void bcm43xx_phy_initg(struct bcm43xx_private *bcm)
1287 if (radio->revision == 8) 1287 if (radio->revision == 8)
1288 bcm43xx_phy_write(bcm, 0x0805, 0x3230); 1288 bcm43xx_phy_write(bcm, 0x0805, 0x3230);
1289 bcm43xx_phy_init_pctl(bcm); 1289 bcm43xx_phy_init_pctl(bcm);
1290 if (bcm->chip_id == 0x4306 && bcm->chip_package != 2) { 1290 if (bcm->chip_id == 0x4306 && bcm->chip_package == 2) {
1291 bcm43xx_phy_write(bcm, 0x0429, 1291 bcm43xx_phy_write(bcm, 0x0429,
1292 bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF); 1292 bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF);
1293 bcm43xx_phy_write(bcm, 0x04C3, 1293 bcm43xx_phy_write(bcm, 0x04C3,
@@ -2151,6 +2151,7 @@ int bcm43xx_phy_init_tssi2dbm_table(struct bcm43xx_private *bcm)
2151 phy->tssi2dbm = NULL; 2151 phy->tssi2dbm = NULL;
2152 printk(KERN_ERR PFX "Could not generate " 2152 printk(KERN_ERR PFX "Could not generate "
2153 "tssi2dBm table\n"); 2153 "tssi2dBm table\n");
2154 kfree(dyn_tssi2dbm);
2154 return -ENODEV; 2155 return -ENODEV;
2155 } 2156 }
2156 phy->tssi2dbm = dyn_tssi2dbm; 2157 phy->tssi2dbm = dyn_tssi2dbm;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
index c59ddd40680d..0aa1bd269a25 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
@@ -27,6 +27,7 @@
27#include "bcm43xx_pio.h" 27#include "bcm43xx_pio.h"
28#include "bcm43xx_main.h" 28#include "bcm43xx_main.h"
29#include "bcm43xx_xmit.h" 29#include "bcm43xx_xmit.h"
30#include "bcm43xx_power.h"
30 31
31#include <linux/delay.h> 32#include <linux/delay.h>
32 33
@@ -44,10 +45,10 @@ static void tx_octet(struct bcm43xx_pioqueue *queue,
44 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, 45 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA,
45 octet); 46 octet);
46 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, 47 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
47 BCM43xx_PIO_TXCTL_WRITEHI); 48 BCM43xx_PIO_TXCTL_WRITELO);
48 } else { 49 } else {
49 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, 50 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
50 BCM43xx_PIO_TXCTL_WRITEHI); 51 BCM43xx_PIO_TXCTL_WRITELO);
51 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, 52 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA,
52 octet); 53 octet);
53 } 54 }
@@ -103,7 +104,7 @@ static void tx_complete(struct bcm43xx_pioqueue *queue,
103 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, 104 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA,
104 skb->data[skb->len - 1]); 105 skb->data[skb->len - 1]);
105 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, 106 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
106 BCM43xx_PIO_TXCTL_WRITEHI | 107 BCM43xx_PIO_TXCTL_WRITELO |
107 BCM43xx_PIO_TXCTL_COMPLETE); 108 BCM43xx_PIO_TXCTL_COMPLETE);
108 } else { 109 } else {
109 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, 110 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
@@ -112,9 +113,10 @@ static void tx_complete(struct bcm43xx_pioqueue *queue,
112} 113}
113 114
114static u16 generate_cookie(struct bcm43xx_pioqueue *queue, 115static u16 generate_cookie(struct bcm43xx_pioqueue *queue,
115 int packetindex) 116 struct bcm43xx_pio_txpacket *packet)
116{ 117{
117 u16 cookie = 0x0000; 118 u16 cookie = 0x0000;
119 int packetindex;
118 120
119 /* We use the upper 4 bits for the PIO 121 /* We use the upper 4 bits for the PIO
120 * controller ID and the lower 12 bits 122 * controller ID and the lower 12 bits
@@ -135,6 +137,7 @@ static u16 generate_cookie(struct bcm43xx_pioqueue *queue,
135 default: 137 default:
136 assert(0); 138 assert(0);
137 } 139 }
140 packetindex = pio_txpacket_getindex(packet);
138 assert(((u16)packetindex & 0xF000) == 0x0000); 141 assert(((u16)packetindex & 0xF000) == 0x0000);
139 cookie |= (u16)packetindex; 142 cookie |= (u16)packetindex;
140 143
@@ -184,7 +187,7 @@ static void pio_tx_write_fragment(struct bcm43xx_pioqueue *queue,
184 bcm43xx_generate_txhdr(queue->bcm, 187 bcm43xx_generate_txhdr(queue->bcm,
185 &txhdr, skb->data, skb->len, 188 &txhdr, skb->data, skb->len,
186 (packet->xmitted_frags == 0), 189 (packet->xmitted_frags == 0),
187 generate_cookie(queue, pio_txpacket_getindex(packet))); 190 generate_cookie(queue, packet));
188 191
189 tx_start(queue); 192 tx_start(queue);
190 octets = skb->len + sizeof(txhdr); 193 octets = skb->len + sizeof(txhdr);
@@ -241,7 +244,7 @@ static int pio_tx_packet(struct bcm43xx_pio_txpacket *packet)
241 queue->tx_devq_packets++; 244 queue->tx_devq_packets++;
242 queue->tx_devq_used += octets; 245 queue->tx_devq_used += octets;
243 246
244 assert(packet->xmitted_frags <= packet->txb->nr_frags); 247 assert(packet->xmitted_frags < packet->txb->nr_frags);
245 packet->xmitted_frags++; 248 packet->xmitted_frags++;
246 packet->xmitted_octets += octets; 249 packet->xmitted_octets += octets;
247 } 250 }
@@ -257,8 +260,14 @@ static void tx_tasklet(unsigned long d)
257 unsigned long flags; 260 unsigned long flags;
258 struct bcm43xx_pio_txpacket *packet, *tmp_packet; 261 struct bcm43xx_pio_txpacket *packet, *tmp_packet;
259 int err; 262 int err;
263 u16 txctl;
260 264
261 bcm43xx_lock_mmio(bcm, flags); 265 bcm43xx_lock_mmio(bcm, flags);
266
267 txctl = bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL);
268 if (txctl & BCM43xx_PIO_TXCTL_SUSPEND)
269 goto out_unlock;
270
262 list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) { 271 list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) {
263 assert(packet->xmitted_frags < packet->txb->nr_frags); 272 assert(packet->xmitted_frags < packet->txb->nr_frags);
264 if (packet->xmitted_frags == 0) { 273 if (packet->xmitted_frags == 0) {
@@ -288,6 +297,7 @@ static void tx_tasklet(unsigned long d)
288 next_packet: 297 next_packet:
289 continue; 298 continue;
290 } 299 }
300out_unlock:
291 bcm43xx_unlock_mmio(bcm, flags); 301 bcm43xx_unlock_mmio(bcm, flags);
292} 302}
293 303
@@ -330,12 +340,19 @@ struct bcm43xx_pioqueue * bcm43xx_setup_pioqueue(struct bcm43xx_private *bcm,
330 (unsigned long)queue); 340 (unsigned long)queue);
331 341
332 value = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); 342 value = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
333 value |= BCM43xx_SBF_XFER_REG_BYTESWAP; 343 value &= ~BCM43xx_SBF_XFER_REG_BYTESWAP;
334 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, value); 344 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, value);
335 345
336 qsize = bcm43xx_read16(bcm, queue->mmio_base + BCM43xx_PIO_TXQBUFSIZE); 346 qsize = bcm43xx_read16(bcm, queue->mmio_base + BCM43xx_PIO_TXQBUFSIZE);
347 if (qsize == 0) {
348 printk(KERN_ERR PFX "ERROR: This card does not support PIO "
349 "operation mode. Please use DMA mode "
350 "(module parameter pio=0).\n");
351 goto err_freequeue;
352 }
337 if (qsize <= BCM43xx_PIO_TXQADJUST) { 353 if (qsize <= BCM43xx_PIO_TXQADJUST) {
338 printk(KERN_ERR PFX "PIO tx device-queue too small (%u)\n", qsize); 354 printk(KERN_ERR PFX "PIO tx device-queue too small (%u)\n",
355 qsize);
339 goto err_freequeue; 356 goto err_freequeue;
340 } 357 }
341 qsize -= BCM43xx_PIO_TXQADJUST; 358 qsize -= BCM43xx_PIO_TXQADJUST;
@@ -444,15 +461,10 @@ int bcm43xx_pio_tx(struct bcm43xx_private *bcm,
444{ 461{
445 struct bcm43xx_pioqueue *queue = bcm43xx_current_pio(bcm)->queue1; 462 struct bcm43xx_pioqueue *queue = bcm43xx_current_pio(bcm)->queue1;
446 struct bcm43xx_pio_txpacket *packet; 463 struct bcm43xx_pio_txpacket *packet;
447 u16 tmp;
448 464
449 assert(!queue->tx_suspended); 465 assert(!queue->tx_suspended);
450 assert(!list_empty(&queue->txfree)); 466 assert(!list_empty(&queue->txfree));
451 467
452 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL);
453 if (tmp & BCM43xx_PIO_TXCTL_SUSPEND)
454 return -EBUSY;
455
456 packet = list_entry(queue->txfree.next, struct bcm43xx_pio_txpacket, list); 468 packet = list_entry(queue->txfree.next, struct bcm43xx_pio_txpacket, list);
457 packet->txb = txb; 469 packet->txb = txb;
458 packet->xmitted_frags = 0; 470 packet->xmitted_frags = 0;
@@ -462,7 +474,7 @@ int bcm43xx_pio_tx(struct bcm43xx_private *bcm,
462 assert(queue->nr_txfree < BCM43xx_PIO_MAXTXPACKETS); 474 assert(queue->nr_txfree < BCM43xx_PIO_MAXTXPACKETS);
463 475
464 /* Suspend TX, if we are out of packets in the "free" queue. */ 476 /* Suspend TX, if we are out of packets in the "free" queue. */
465 if (unlikely(list_empty(&queue->txfree))) { 477 if (list_empty(&queue->txfree)) {
466 netif_stop_queue(queue->bcm->net_dev); 478 netif_stop_queue(queue->bcm->net_dev);
467 queue->tx_suspended = 1; 479 queue->tx_suspended = 1;
468 } 480 }
@@ -480,15 +492,15 @@ void bcm43xx_pio_handle_xmitstatus(struct bcm43xx_private *bcm,
480 492
481 queue = parse_cookie(bcm, status->cookie, &packet); 493 queue = parse_cookie(bcm, status->cookie, &packet);
482 assert(queue); 494 assert(queue);
483//TODO 495
484if (!queue)
485return;
486 free_txpacket(packet, 1); 496 free_txpacket(packet, 1);
487 if (unlikely(queue->tx_suspended)) { 497 if (queue->tx_suspended) {
488 queue->tx_suspended = 0; 498 queue->tx_suspended = 0;
489 netif_wake_queue(queue->bcm->net_dev); 499 netif_wake_queue(queue->bcm->net_dev);
490 } 500 }
491 /* If there are packets on the txqueue, poke the tasklet. */ 501 /* If there are packets on the txqueue, poke the tasklet
502 * to transmit them.
503 */
492 if (!list_empty(&queue->txqueue)) 504 if (!list_empty(&queue->txqueue))
493 tasklet_schedule(&queue->txtask); 505 tasklet_schedule(&queue->txtask);
494} 506}
@@ -519,12 +531,9 @@ void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue)
519 int i, preamble_readwords; 531 int i, preamble_readwords;
520 struct sk_buff *skb; 532 struct sk_buff *skb;
521 533
522return;
523 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXCTL); 534 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXCTL);
524 if (!(tmp & BCM43xx_PIO_RXCTL_DATAAVAILABLE)) { 535 if (!(tmp & BCM43xx_PIO_RXCTL_DATAAVAILABLE))
525 dprintkl(KERN_ERR PFX "PIO RX: No data available\n");//TODO: remove this printk.
526 return; 536 return;
527 }
528 bcm43xx_pio_write(queue, BCM43xx_PIO_RXCTL, 537 bcm43xx_pio_write(queue, BCM43xx_PIO_RXCTL,
529 BCM43xx_PIO_RXCTL_DATAAVAILABLE); 538 BCM43xx_PIO_RXCTL_DATAAVAILABLE);
530 539
@@ -538,8 +547,7 @@ return;
538 return; 547 return;
539data_ready: 548data_ready:
540 549
541//FIXME: endianess in this function. 550 len = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
542 len = le16_to_cpu(bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA));
543 if (unlikely(len > 0x700)) { 551 if (unlikely(len > 0x700)) {
544 pio_rx_error(queue, 0, "len > 0x700"); 552 pio_rx_error(queue, 0, "len > 0x700");
545 return; 553 return;
@@ -555,7 +563,7 @@ data_ready:
555 preamble_readwords = 18 / sizeof(u16); 563 preamble_readwords = 18 / sizeof(u16);
556 for (i = 0; i < preamble_readwords; i++) { 564 for (i = 0; i < preamble_readwords; i++) {
557 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA); 565 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
558 preamble[i + 1] = cpu_to_be16(tmp);//FIXME? 566 preamble[i + 1] = cpu_to_le16(tmp);
559 } 567 }
560 rxhdr = (struct bcm43xx_rxhdr *)preamble; 568 rxhdr = (struct bcm43xx_rxhdr *)preamble;
561 rxflags2 = le16_to_cpu(rxhdr->flags2); 569 rxflags2 = le16_to_cpu(rxhdr->flags2);
@@ -591,16 +599,40 @@ data_ready:
591 } 599 }
592 skb_put(skb, len); 600 skb_put(skb, len);
593 for (i = 0; i < len - 1; i += 2) { 601 for (i = 0; i < len - 1; i += 2) {
594 tmp = cpu_to_be16(bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA)); 602 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
595 *((u16 *)(skb->data + i)) = tmp; 603 *((u16 *)(skb->data + i)) = cpu_to_le16(tmp);
596 } 604 }
597 if (len % 2) { 605 if (len % 2) {
598 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA); 606 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
599 skb->data[len - 1] = (tmp & 0x00FF); 607 skb->data[len - 1] = (tmp & 0x00FF);
608/* The specs say the following is required, but
609 * it is wrong and corrupts the PLCP. If we don't do
610 * this, the PLCP seems to be correct. So ifdef it out for now.
611 */
612#if 0
600 if (rxflags2 & BCM43xx_RXHDR_FLAGS2_TYPE2FRAME) 613 if (rxflags2 & BCM43xx_RXHDR_FLAGS2_TYPE2FRAME)
601 skb->data[0x20] = (tmp & 0xFF00) >> 8; 614 skb->data[2] = (tmp & 0xFF00) >> 8;
602 else 615 else
603 skb->data[0x1E] = (tmp & 0xFF00) >> 8; 616 skb->data[0] = (tmp & 0xFF00) >> 8;
617#endif
604 } 618 }
619 skb_trim(skb, len - IEEE80211_FCS_LEN);
605 bcm43xx_rx(queue->bcm, skb, rxhdr); 620 bcm43xx_rx(queue->bcm, skb, rxhdr);
606} 621}
622
623void bcm43xx_pio_tx_suspend(struct bcm43xx_pioqueue *queue)
624{
625 bcm43xx_power_saving_ctl_bits(queue->bcm, -1, 1);
626 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
627 bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL)
628 | BCM43xx_PIO_TXCTL_SUSPEND);
629}
630
631void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue)
632{
633 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
634 bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL)
635 & ~BCM43xx_PIO_TXCTL_SUSPEND);
636 bcm43xx_power_saving_ctl_bits(queue->bcm, -1, -1);
637 tasklet_schedule(&queue->txtask);
638}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_pio.h b/drivers/net/wireless/bcm43xx/bcm43xx_pio.h
index 970627bc1769..dfc78209e3a3 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_pio.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_pio.h
@@ -14,8 +14,8 @@
14#define BCM43xx_PIO_RXCTL 0x08 14#define BCM43xx_PIO_RXCTL 0x08
15#define BCM43xx_PIO_RXDATA 0x0A 15#define BCM43xx_PIO_RXDATA 0x0A
16 16
17#define BCM43xx_PIO_TXCTL_WRITEHI (1 << 0) 17#define BCM43xx_PIO_TXCTL_WRITELO (1 << 0)
18#define BCM43xx_PIO_TXCTL_WRITELO (1 << 1) 18#define BCM43xx_PIO_TXCTL_WRITEHI (1 << 1)
19#define BCM43xx_PIO_TXCTL_COMPLETE (1 << 2) 19#define BCM43xx_PIO_TXCTL_COMPLETE (1 << 2)
20#define BCM43xx_PIO_TXCTL_INIT (1 << 3) 20#define BCM43xx_PIO_TXCTL_INIT (1 << 3)
21#define BCM43xx_PIO_TXCTL_SUSPEND (1 << 7) 21#define BCM43xx_PIO_TXCTL_SUSPEND (1 << 7)
@@ -95,6 +95,7 @@ void bcm43xx_pio_write(struct bcm43xx_pioqueue *queue,
95 u16 offset, u16 value) 95 u16 offset, u16 value)
96{ 96{
97 bcm43xx_write16(queue->bcm, queue->mmio_base + offset, value); 97 bcm43xx_write16(queue->bcm, queue->mmio_base + offset, value);
98 mmiowb();
98} 99}
99 100
100 101
@@ -107,6 +108,9 @@ void bcm43xx_pio_handle_xmitstatus(struct bcm43xx_private *bcm,
107 struct bcm43xx_xmitstatus *status); 108 struct bcm43xx_xmitstatus *status);
108void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue); 109void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue);
109 110
111void bcm43xx_pio_tx_suspend(struct bcm43xx_pioqueue *queue);
112void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue);
113
110#else /* CONFIG_BCM43XX_PIO */ 114#else /* CONFIG_BCM43XX_PIO */
111 115
112static inline 116static inline
@@ -133,6 +137,14 @@ static inline
133void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue) 137void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue)
134{ 138{
135} 139}
140static inline
141void bcm43xx_pio_tx_suspend(struct bcm43xx_pioqueue *queue)
142{
143}
144static inline
145void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue)
146{
147}
136 148
137#endif /* CONFIG_BCM43XX_PIO */ 149#endif /* CONFIG_BCM43XX_PIO */
138#endif /* BCM43xx_PIO_H_ */ 150#endif /* BCM43xx_PIO_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_power.c b/drivers/net/wireless/bcm43xx/bcm43xx_power.c
index 3c92b62807c5..6569da3a7a39 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_power.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_power.c
@@ -35,77 +35,101 @@
35#include "bcm43xx_main.h" 35#include "bcm43xx_main.h"
36 36
37 37
38/* Get the Slow Clock Source */
39static int bcm43xx_pctl_get_slowclksrc(struct bcm43xx_private *bcm)
40{
41 u32 tmp;
42 int err;
43
44 assert(bcm->current_core == &bcm->core_chipcommon);
45 if (bcm->current_core->rev < 6) {
46 if (bcm->bustype == BCM43xx_BUSTYPE_PCMCIA ||
47 bcm->bustype == BCM43xx_BUSTYPE_SB)
48 return BCM43xx_PCTL_CLKSRC_XTALOS;
49 if (bcm->bustype == BCM43xx_BUSTYPE_PCI) {
50 err = bcm43xx_pci_read_config32(bcm, BCM43xx_PCTL_OUT, &tmp);
51 assert(!err);
52 if (tmp & 0x10)
53 return BCM43xx_PCTL_CLKSRC_PCI;
54 return BCM43xx_PCTL_CLKSRC_XTALOS;
55 }
56 }
57 if (bcm->current_core->rev < 10) {
58 tmp = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SLOWCLKCTL);
59 tmp &= 0x7;
60 if (tmp == 0)
61 return BCM43xx_PCTL_CLKSRC_LOPWROS;
62 if (tmp == 1)
63 return BCM43xx_PCTL_CLKSRC_XTALOS;
64 if (tmp == 2)
65 return BCM43xx_PCTL_CLKSRC_PCI;
66 }
67
68 return BCM43xx_PCTL_CLKSRC_XTALOS;
69}
70
38/* Get max/min slowclock frequency 71/* Get max/min slowclock frequency
39 * as described in http://bcm-specs.sipsolutions.net/PowerControl 72 * as described in http://bcm-specs.sipsolutions.net/PowerControl
40 */ 73 */
41static int bcm43xx_pctl_clockfreqlimit(struct bcm43xx_private *bcm, 74static int bcm43xx_pctl_clockfreqlimit(struct bcm43xx_private *bcm,
42 int get_max) 75 int get_max)
43{ 76{
44 int limit = 0; 77 int limit;
78 int clocksrc;
45 int divisor; 79 int divisor;
46 int selection;
47 int err;
48 u32 tmp; 80 u32 tmp;
49 struct bcm43xx_coreinfo *old_core;
50 81
51 if (!(bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL)) 82 assert(bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL);
52 goto out; 83 assert(bcm->current_core == &bcm->core_chipcommon);
53 old_core = bcm->current_core;
54 err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
55 if (err)
56 goto out;
57 84
85 clocksrc = bcm43xx_pctl_get_slowclksrc(bcm);
58 if (bcm->current_core->rev < 6) { 86 if (bcm->current_core->rev < 6) {
59 if ((bcm->bustype == BCM43xx_BUSTYPE_PCMCIA) || 87 switch (clocksrc) {
60 (bcm->bustype == BCM43xx_BUSTYPE_SB)) { 88 case BCM43xx_PCTL_CLKSRC_PCI:
61 selection = 1; 89 divisor = 64;
90 break;
91 case BCM43xx_PCTL_CLKSRC_XTALOS:
62 divisor = 32; 92 divisor = 32;
63 } else { 93 break;
64 err = bcm43xx_pci_read_config32(bcm, BCM43xx_PCTL_OUT, &tmp); 94 default:
65 if (err) { 95 assert(0);
66 printk(KERN_ERR PFX "clockfreqlimit pcicfg read failure\n"); 96 divisor = 1;
67 goto out_switchback;
68 }
69 if (tmp & 0x10) {
70 /* PCI */
71 selection = 2;
72 divisor = 64;
73 } else {
74 /* XTAL */
75 selection = 1;
76 divisor = 32;
77 }
78 } 97 }
79 } else if (bcm->current_core->rev < 10) { 98 } else if (bcm->current_core->rev < 10) {
80 selection = (tmp & 0x07); 99 switch (clocksrc) {
81 if (selection) { 100 case BCM43xx_PCTL_CLKSRC_LOPWROS:
101 divisor = 1;
102 break;
103 case BCM43xx_PCTL_CLKSRC_XTALOS:
104 case BCM43xx_PCTL_CLKSRC_PCI:
82 tmp = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SLOWCLKCTL); 105 tmp = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SLOWCLKCTL);
83 divisor = 4 * (1 + ((tmp & 0xFFFF0000) >> 16)); 106 divisor = ((tmp & 0xFFFF0000) >> 16) + 1;
84 } else 107 divisor *= 4;
108 break;
109 default:
110 assert(0);
85 divisor = 1; 111 divisor = 1;
112 }
86 } else { 113 } else {
87 tmp = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SYSCLKCTL); 114 tmp = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SYSCLKCTL);
88 divisor = 4 * (1 + ((tmp & 0xFFFF0000) >> 16)); 115 divisor = ((tmp & 0xFFFF0000) >> 16) + 1;
89 selection = 1; 116 divisor *= 4;
90 } 117 }
91 118
92 switch (selection) { 119 switch (clocksrc) {
93 case 0: 120 case BCM43xx_PCTL_CLKSRC_LOPWROS:
94 /* LPO */
95 if (get_max) 121 if (get_max)
96 limit = 43000; 122 limit = 43000;
97 else 123 else
98 limit = 25000; 124 limit = 25000;
99 break; 125 break;
100 case 1: 126 case BCM43xx_PCTL_CLKSRC_XTALOS:
101 /* XTAL */
102 if (get_max) 127 if (get_max)
103 limit = 20200000; 128 limit = 20200000;
104 else 129 else
105 limit = 19800000; 130 limit = 19800000;
106 break; 131 break;
107 case 2: 132 case BCM43xx_PCTL_CLKSRC_PCI:
108 /* PCI */
109 if (get_max) 133 if (get_max)
110 limit = 34000000; 134 limit = 34000000;
111 else 135 else
@@ -113,17 +137,14 @@ static int bcm43xx_pctl_clockfreqlimit(struct bcm43xx_private *bcm,
113 break; 137 break;
114 default: 138 default:
115 assert(0); 139 assert(0);
140 limit = 0;
116 } 141 }
117 limit /= divisor; 142 limit /= divisor;
118 143
119out_switchback:
120 err = bcm43xx_switch_core(bcm, old_core);
121 assert(err == 0);
122
123out:
124 return limit; 144 return limit;
125} 145}
126 146
147
127/* init power control 148/* init power control
128 * as described in http://bcm-specs.sipsolutions.net/PowerControl 149 * as described in http://bcm-specs.sipsolutions.net/PowerControl
129 */ 150 */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_power.h b/drivers/net/wireless/bcm43xx/bcm43xx_power.h
index 5f63640810bd..c966ab3a5a8c 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_power.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_power.h
@@ -33,6 +33,15 @@
33 33
34#include <linux/types.h> 34#include <linux/types.h>
35 35
36/* Clock sources */
37enum {
38 /* PCI clock */
39 BCM43xx_PCTL_CLKSRC_PCI,
40 /* Crystal slow clock oscillator */
41 BCM43xx_PCTL_CLKSRC_XTALOS,
42 /* Low power oscillator */
43 BCM43xx_PCTL_CLKSRC_LOPWROS,
44};
36 45
37struct bcm43xx_private; 46struct bcm43xx_private;
38 47
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
index c44d890b949b..b438f48e891d 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
@@ -71,14 +71,46 @@ static int get_boolean(const char *buf, size_t count)
71 return -EINVAL; 71 return -EINVAL;
72} 72}
73 73
74static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len)
75{
76 int i, pos = 0;
77
78 for (i = 0; i < BCM43xx_SPROM_SIZE; i++) {
79 pos += snprintf(buf + pos, buf_len - pos - 1,
80 "%04X", swab16(sprom[i]) & 0xFFFF);
81 }
82 pos += snprintf(buf + pos, buf_len - pos - 1, "\n");
83
84 return pos + 1;
85}
86
87static int hex2sprom(u16 *sprom, const char *dump, size_t len)
88{
89 char tmp[5] = { 0 };
90 int cnt = 0;
91 unsigned long parsed;
92
93 if (len < BCM43xx_SPROM_SIZE * sizeof(u16) * 2)
94 return -EINVAL;
95
96 while (cnt < BCM43xx_SPROM_SIZE) {
97 memcpy(tmp, dump, 4);
98 dump += 4;
99 parsed = simple_strtoul(tmp, NULL, 16);
100 sprom[cnt++] = swab16((u16)parsed);
101 }
102
103 return 0;
104}
105
74static ssize_t bcm43xx_attr_sprom_show(struct device *dev, 106static ssize_t bcm43xx_attr_sprom_show(struct device *dev,
75 struct device_attribute *attr, 107 struct device_attribute *attr,
76 char *buf) 108 char *buf)
77{ 109{
78 struct bcm43xx_private *bcm = devattr_to_bcm(attr, attr_sprom); 110 struct bcm43xx_private *bcm = dev_to_bcm(dev);
79 u16 *sprom; 111 u16 *sprom;
80 unsigned long flags; 112 unsigned long flags;
81 int i, err; 113 int err;
82 114
83 if (!capable(CAP_NET_ADMIN)) 115 if (!capable(CAP_NET_ADMIN))
84 return -EPERM; 116 return -EPERM;
@@ -91,55 +123,53 @@ static ssize_t bcm43xx_attr_sprom_show(struct device *dev,
91 bcm43xx_lock_mmio(bcm, flags); 123 bcm43xx_lock_mmio(bcm, flags);
92 assert(bcm->initialized); 124 assert(bcm->initialized);
93 err = bcm43xx_sprom_read(bcm, sprom); 125 err = bcm43xx_sprom_read(bcm, sprom);
94 if (!err) { 126 if (!err)
95 for (i = 0; i < BCM43xx_SPROM_SIZE; i++) { 127 err = sprom2hex(sprom, buf, PAGE_SIZE);
96 buf[i * 2] = sprom[i] & 0x00FF;
97 buf[i * 2 + 1] = (sprom[i] & 0xFF00) >> 8;
98 }
99 }
100 bcm43xx_unlock_mmio(bcm, flags); 128 bcm43xx_unlock_mmio(bcm, flags);
101 kfree(sprom); 129 kfree(sprom);
102 130
103 return err ? err : BCM43xx_SPROM_SIZE * sizeof(u16); 131 return err;
104} 132}
105 133
106static ssize_t bcm43xx_attr_sprom_store(struct device *dev, 134static ssize_t bcm43xx_attr_sprom_store(struct device *dev,
107 struct device_attribute *attr, 135 struct device_attribute *attr,
108 const char *buf, size_t count) 136 const char *buf, size_t count)
109{ 137{
110 struct bcm43xx_private *bcm = devattr_to_bcm(attr, attr_sprom); 138 struct bcm43xx_private *bcm = dev_to_bcm(dev);
111 u16 *sprom; 139 u16 *sprom;
112 unsigned long flags; 140 unsigned long flags;
113 int i, err; 141 int err;
114 142
115 if (!capable(CAP_NET_ADMIN)) 143 if (!capable(CAP_NET_ADMIN))
116 return -EPERM; 144 return -EPERM;
117 145
118 if (count != BCM43xx_SPROM_SIZE * sizeof(u16))
119 return -EINVAL;
120 sprom = kmalloc(BCM43xx_SPROM_SIZE * sizeof(*sprom), 146 sprom = kmalloc(BCM43xx_SPROM_SIZE * sizeof(*sprom),
121 GFP_KERNEL); 147 GFP_KERNEL);
122 if (!sprom) 148 if (!sprom)
123 return -ENOMEM; 149 return -ENOMEM;
124 for (i = 0; i < BCM43xx_SPROM_SIZE; i++) { 150 err = hex2sprom(sprom, buf, count);
125 sprom[i] = buf[i * 2] & 0xFF; 151 if (err)
126 sprom[i] |= ((u16)(buf[i * 2 + 1] & 0xFF)) << 8; 152 goto out_kfree;
127 }
128 bcm43xx_lock_mmio(bcm, flags); 153 bcm43xx_lock_mmio(bcm, flags);
129 assert(bcm->initialized); 154 assert(bcm->initialized);
130 err = bcm43xx_sprom_write(bcm, sprom); 155 err = bcm43xx_sprom_write(bcm, sprom);
131 bcm43xx_unlock_mmio(bcm, flags); 156 bcm43xx_unlock_mmio(bcm, flags);
157out_kfree:
132 kfree(sprom); 158 kfree(sprom);
133 159
134 return err ? err : count; 160 return err ? err : count;
135 161
136} 162}
137 163
164static DEVICE_ATTR(sprom, 0600,
165 bcm43xx_attr_sprom_show,
166 bcm43xx_attr_sprom_store);
167
138static ssize_t bcm43xx_attr_interfmode_show(struct device *dev, 168static ssize_t bcm43xx_attr_interfmode_show(struct device *dev,
139 struct device_attribute *attr, 169 struct device_attribute *attr,
140 char *buf) 170 char *buf)
141{ 171{
142 struct bcm43xx_private *bcm = devattr_to_bcm(attr, attr_interfmode); 172 struct bcm43xx_private *bcm = dev_to_bcm(dev);
143 unsigned long flags; 173 unsigned long flags;
144 int err; 174 int err;
145 ssize_t count = 0; 175 ssize_t count = 0;
@@ -175,7 +205,7 @@ static ssize_t bcm43xx_attr_interfmode_store(struct device *dev,
175 struct device_attribute *attr, 205 struct device_attribute *attr,
176 const char *buf, size_t count) 206 const char *buf, size_t count)
177{ 207{
178 struct bcm43xx_private *bcm = devattr_to_bcm(attr, attr_interfmode); 208 struct bcm43xx_private *bcm = dev_to_bcm(dev);
179 unsigned long flags; 209 unsigned long flags;
180 int err; 210 int err;
181 int mode; 211 int mode;
@@ -215,11 +245,15 @@ static ssize_t bcm43xx_attr_interfmode_store(struct device *dev,
215 return err ? err : count; 245 return err ? err : count;
216} 246}
217 247
248static DEVICE_ATTR(interference, 0644,
249 bcm43xx_attr_interfmode_show,
250 bcm43xx_attr_interfmode_store);
251
218static ssize_t bcm43xx_attr_preamble_show(struct device *dev, 252static ssize_t bcm43xx_attr_preamble_show(struct device *dev,
219 struct device_attribute *attr, 253 struct device_attribute *attr,
220 char *buf) 254 char *buf)
221{ 255{
222 struct bcm43xx_private *bcm = devattr_to_bcm(attr, attr_preamble); 256 struct bcm43xx_private *bcm = dev_to_bcm(dev);
223 unsigned long flags; 257 unsigned long flags;
224 int err; 258 int err;
225 ssize_t count; 259 ssize_t count;
@@ -245,7 +279,7 @@ static ssize_t bcm43xx_attr_preamble_store(struct device *dev,
245 struct device_attribute *attr, 279 struct device_attribute *attr,
246 const char *buf, size_t count) 280 const char *buf, size_t count)
247{ 281{
248 struct bcm43xx_private *bcm = devattr_to_bcm(attr, attr_preamble); 282 struct bcm43xx_private *bcm = dev_to_bcm(dev);
249 unsigned long flags; 283 unsigned long flags;
250 int err; 284 int err;
251 int value; 285 int value;
@@ -267,56 +301,41 @@ static ssize_t bcm43xx_attr_preamble_store(struct device *dev,
267 return err ? err : count; 301 return err ? err : count;
268} 302}
269 303
304static DEVICE_ATTR(shortpreamble, 0644,
305 bcm43xx_attr_preamble_show,
306 bcm43xx_attr_preamble_store);
307
270int bcm43xx_sysfs_register(struct bcm43xx_private *bcm) 308int bcm43xx_sysfs_register(struct bcm43xx_private *bcm)
271{ 309{
272 struct device *dev = &bcm->pci_dev->dev; 310 struct device *dev = &bcm->pci_dev->dev;
273 struct bcm43xx_sysfs *sysfs = &bcm->sysfs;
274 int err; 311 int err;
275 312
276 assert(bcm->initialized); 313 assert(bcm->initialized);
277 314
278 sysfs->attr_sprom.attr.name = "sprom"; 315 err = device_create_file(dev, &dev_attr_sprom);
279 sysfs->attr_sprom.attr.owner = THIS_MODULE;
280 sysfs->attr_sprom.attr.mode = 0600;
281 sysfs->attr_sprom.show = bcm43xx_attr_sprom_show;
282 sysfs->attr_sprom.store = bcm43xx_attr_sprom_store;
283 err = device_create_file(dev, &sysfs->attr_sprom);
284 if (err) 316 if (err)
285 goto out; 317 goto out;
286 318 err = device_create_file(dev, &dev_attr_interference);
287 sysfs->attr_interfmode.attr.name = "interference";
288 sysfs->attr_interfmode.attr.owner = THIS_MODULE;
289 sysfs->attr_interfmode.attr.mode = 0600;
290 sysfs->attr_interfmode.show = bcm43xx_attr_interfmode_show;
291 sysfs->attr_interfmode.store = bcm43xx_attr_interfmode_store;
292 err = device_create_file(dev, &sysfs->attr_interfmode);
293 if (err) 319 if (err)
294 goto err_remove_sprom; 320 goto err_remove_sprom;
295 321 err = device_create_file(dev, &dev_attr_shortpreamble);
296 sysfs->attr_preamble.attr.name = "shortpreamble";
297 sysfs->attr_preamble.attr.owner = THIS_MODULE;
298 sysfs->attr_preamble.attr.mode = 0600;
299 sysfs->attr_preamble.show = bcm43xx_attr_preamble_show;
300 sysfs->attr_preamble.store = bcm43xx_attr_preamble_store;
301 err = device_create_file(dev, &sysfs->attr_preamble);
302 if (err) 322 if (err)
303 goto err_remove_interfmode; 323 goto err_remove_interfmode;
304 324
305out: 325out:
306 return err; 326 return err;
307err_remove_interfmode: 327err_remove_interfmode:
308 device_remove_file(dev, &sysfs->attr_interfmode); 328 device_remove_file(dev, &dev_attr_interference);
309err_remove_sprom: 329err_remove_sprom:
310 device_remove_file(dev, &sysfs->attr_sprom); 330 device_remove_file(dev, &dev_attr_sprom);
311 goto out; 331 goto out;
312} 332}
313 333
314void bcm43xx_sysfs_unregister(struct bcm43xx_private *bcm) 334void bcm43xx_sysfs_unregister(struct bcm43xx_private *bcm)
315{ 335{
316 struct device *dev = &bcm->pci_dev->dev; 336 struct device *dev = &bcm->pci_dev->dev;
317 struct bcm43xx_sysfs *sysfs = &bcm->sysfs;
318 337
319 device_remove_file(dev, &sysfs->attr_preamble); 338 device_remove_file(dev, &dev_attr_shortpreamble);
320 device_remove_file(dev, &sysfs->attr_interfmode); 339 device_remove_file(dev, &dev_attr_interference);
321 device_remove_file(dev, &sysfs->attr_sprom); 340 device_remove_file(dev, &dev_attr_sprom);
322} 341}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.h b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.h
index 57f14514e3e0..cc701df71e2a 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.h
@@ -1,22 +1,6 @@
1#ifndef BCM43xx_SYSFS_H_ 1#ifndef BCM43xx_SYSFS_H_
2#define BCM43xx_SYSFS_H_ 2#define BCM43xx_SYSFS_H_
3 3
4#include <linux/device.h>
5
6
7struct bcm43xx_sysfs {
8 struct device_attribute attr_sprom;
9 struct device_attribute attr_interfmode;
10 struct device_attribute attr_preamble;
11};
12
13#define devattr_to_bcm(attr, attr_name) ({ \
14 struct bcm43xx_sysfs *__s; struct bcm43xx_private *__p; \
15 __s = container_of((attr), struct bcm43xx_sysfs, attr_name); \
16 __p = container_of(__s, struct bcm43xx_private, sysfs); \
17 __p; \
18 })
19
20struct bcm43xx_private; 4struct bcm43xx_private;
21 5
22int bcm43xx_sysfs_register(struct bcm43xx_private *bcm); 6int bcm43xx_sysfs_register(struct bcm43xx_private *bcm);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index 3daee828ef4b..b45063974ae9 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -182,8 +182,11 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev,
182 mode = BCM43xx_INITIAL_IWMODE; 182 mode = BCM43xx_INITIAL_IWMODE;
183 183
184 bcm43xx_lock_mmio(bcm, flags); 184 bcm43xx_lock_mmio(bcm, flags);
185 if (bcm->ieee->iw_mode != mode) 185 if (bcm->initialized) {
186 bcm43xx_set_iwmode(bcm, mode); 186 if (bcm->ieee->iw_mode != mode)
187 bcm43xx_set_iwmode(bcm, mode);
188 } else
189 bcm->ieee->iw_mode = mode;
187 bcm43xx_unlock_mmio(bcm, flags); 190 bcm43xx_unlock_mmio(bcm, flags);
188 191
189 return 0; 192 return 0;
@@ -962,22 +965,22 @@ static const struct iw_priv_args bcm43xx_priv_wx_args[] = {
962 { 965 {
963 .cmd = PRIV_WX_SET_SHORTPREAMBLE, 966 .cmd = PRIV_WX_SET_SHORTPREAMBLE,
964 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 967 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
965 .name = "set_shortpreambl", 968 .name = "set_shortpreamb",
966 }, 969 },
967 { 970 {
968 .cmd = PRIV_WX_GET_SHORTPREAMBLE, 971 .cmd = PRIV_WX_GET_SHORTPREAMBLE,
969 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 972 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
970 .name = "get_shortpreambl", 973 .name = "get_shortpreamb",
971 }, 974 },
972 { 975 {
973 .cmd = PRIV_WX_SET_SWENCRYPTION, 976 .cmd = PRIV_WX_SET_SWENCRYPTION,
974 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 977 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
975 .name = "set_swencryption", 978 .name = "set_swencrypt",
976 }, 979 },
977 { 980 {
978 .cmd = PRIV_WX_GET_SWENCRYPTION, 981 .cmd = PRIV_WX_GET_SWENCRYPTION,
979 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 982 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
980 .name = "get_swencryption", 983 .name = "get_swencrypt",
981 }, 984 },
982 { 985 {
983 .cmd = PRIV_WX_SPROM_WRITE, 986 .cmd = PRIV_WX_SPROM_WRITE,
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 8b37e824dfcb..8399de581893 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1860,7 +1860,7 @@ static char * __prism2_translate_scan(local_info_t *local,
1860 memset(&iwe, 0, sizeof(iwe)); 1860 memset(&iwe, 0, sizeof(iwe));
1861 iwe.cmd = SIOCGIWFREQ; 1861 iwe.cmd = SIOCGIWFREQ;
1862 if (scan) { 1862 if (scan) {
1863 chan = scan->chid; 1863 chan = le16_to_cpu(scan->chid);
1864 } else if (bss) { 1864 } else if (bss) {
1865 chan = bss->chan; 1865 chan = bss->chan;
1866 } else { 1866 } else {
@@ -1868,7 +1868,7 @@ static char * __prism2_translate_scan(local_info_t *local,
1868 } 1868 }
1869 1869
1870 if (chan > 0) { 1870 if (chan > 0) {
1871 iwe.u.freq.m = freq_list[le16_to_cpu(chan - 1)] * 100000; 1871 iwe.u.freq.m = freq_list[chan - 1] * 100000;
1872 iwe.u.freq.e = 1; 1872 iwe.u.freq.e = 1;
1873 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 1873 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
1874 IW_EV_FREQ_LEN); 1874 IW_EV_FREQ_LEN);
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 8dfdfbd5966c..c2d0b09e0418 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -390,7 +390,7 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
390 } 390 }
391 } else { 391 } else {
392 struct { 392 struct {
393 __le16 qual, signal, noise; 393 __le16 qual, signal, noise, unused;
394 } __attribute__ ((packed)) cq; 394 } __attribute__ ((packed)) cq;
395 395
396 err = HERMES_READ_RECORD(hw, USER_BAP, 396 err = HERMES_READ_RECORD(hw, USER_BAP,
@@ -812,7 +812,6 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
812 if (datalen > IEEE80211_DATA_LEN + 12) { 812 if (datalen > IEEE80211_DATA_LEN + 12) {
813 printk(KERN_DEBUG "%s: oversized monitor frame, " 813 printk(KERN_DEBUG "%s: oversized monitor frame, "
814 "data length = %d\n", dev->name, datalen); 814 "data length = %d\n", dev->name, datalen);
815 err = -EIO;
816 stats->rx_length_errors++; 815 stats->rx_length_errors++;
817 goto update_stats; 816 goto update_stats;
818 } 817 }
@@ -821,8 +820,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
821 if (!skb) { 820 if (!skb) {
822 printk(KERN_WARNING "%s: Cannot allocate skb for monitor frame\n", 821 printk(KERN_WARNING "%s: Cannot allocate skb for monitor frame\n",
823 dev->name); 822 dev->name);
824 err = -ENOMEM; 823 goto update_stats;
825 goto drop;
826 } 824 }
827 825
828 /* Copy the 802.11 header to the skb */ 826 /* Copy the 802.11 header to the skb */
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index ff192e96268a..dade4b903579 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -4306,7 +4306,7 @@ out:
4306 * Insertion of the module 4306 * Insertion of the module
4307 * I'm now quite proud of the multi-device support. 4307 * I'm now quite proud of the multi-device support.
4308 */ 4308 */
4309int init_module(void) 4309int __init init_module(void)
4310{ 4310{
4311 int ret = -EIO; /* Return error if no cards found */ 4311 int ret = -EIO; /* Return error if no cards found */
4312 int i; 4312 int i;