diff options
Diffstat (limited to 'drivers/net')
34 files changed, 777 insertions, 485 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 824e430486c2..830528dce0ca 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c | |||
@@ -1574,6 +1574,7 @@ MODULE_LICENSE("GPL"); | |||
1574 | 1574 | ||
1575 | static int __init el3_init_module(void) | 1575 | static int __init el3_init_module(void) |
1576 | { | 1576 | { |
1577 | int ret = 0; | ||
1577 | el3_cards = 0; | 1578 | el3_cards = 0; |
1578 | 1579 | ||
1579 | if (debug >= 0) | 1580 | if (debug >= 0) |
@@ -1589,14 +1590,16 @@ static int __init el3_init_module(void) | |||
1589 | } | 1590 | } |
1590 | 1591 | ||
1591 | #ifdef CONFIG_EISA | 1592 | #ifdef CONFIG_EISA |
1592 | if (eisa_driver_register (&el3_eisa_driver) < 0) { | 1593 | ret = eisa_driver_register(&el3_eisa_driver); |
1593 | eisa_driver_unregister (&el3_eisa_driver); | ||
1594 | } | ||
1595 | #endif | 1594 | #endif |
1596 | #ifdef CONFIG_MCA | 1595 | #ifdef CONFIG_MCA |
1597 | mca_register_driver(&el3_mca_driver); | 1596 | { |
1597 | int err = mca_register_driver(&el3_mca_driver); | ||
1598 | if (ret == 0) | ||
1599 | ret = err; | ||
1600 | } | ||
1598 | #endif | 1601 | #endif |
1599 | return 0; | 1602 | return ret; |
1600 | } | 1603 | } |
1601 | 1604 | ||
1602 | static void __exit el3_cleanup_module(void) | 1605 | static void __exit el3_cleanup_module(void) |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index f822cd3025ff..dd410496aadb 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -1118,13 +1118,18 @@ err_out: | |||
1118 | return -ENOMEM; | 1118 | return -ENOMEM; |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | static void cp_init_rings_index (struct cp_private *cp) | ||
1122 | { | ||
1123 | cp->rx_tail = 0; | ||
1124 | cp->tx_head = cp->tx_tail = 0; | ||
1125 | } | ||
1126 | |||
1121 | static int cp_init_rings (struct cp_private *cp) | 1127 | static int cp_init_rings (struct cp_private *cp) |
1122 | { | 1128 | { |
1123 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); | 1129 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); |
1124 | cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); | 1130 | cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); |
1125 | 1131 | ||
1126 | cp->rx_tail = 0; | 1132 | cp_init_rings_index(cp); |
1127 | cp->tx_head = cp->tx_tail = 0; | ||
1128 | 1133 | ||
1129 | return cp_refill_rx (cp); | 1134 | return cp_refill_rx (cp); |
1130 | } | 1135 | } |
@@ -1886,30 +1891,30 @@ static int cp_suspend (struct pci_dev *pdev, pm_message_t state) | |||
1886 | 1891 | ||
1887 | spin_unlock_irqrestore (&cp->lock, flags); | 1892 | spin_unlock_irqrestore (&cp->lock, flags); |
1888 | 1893 | ||
1889 | if (cp->pdev && cp->wol_enabled) { | 1894 | pci_save_state(pdev); |
1890 | pci_save_state (cp->pdev); | 1895 | pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled); |
1891 | cp_set_d3_state (cp); | 1896 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
1892 | } | ||
1893 | 1897 | ||
1894 | return 0; | 1898 | return 0; |
1895 | } | 1899 | } |
1896 | 1900 | ||
1897 | static int cp_resume (struct pci_dev *pdev) | 1901 | static int cp_resume (struct pci_dev *pdev) |
1898 | { | 1902 | { |
1899 | struct net_device *dev; | 1903 | struct net_device *dev = pci_get_drvdata (pdev); |
1900 | struct cp_private *cp; | 1904 | struct cp_private *cp = netdev_priv(dev); |
1901 | unsigned long flags; | 1905 | unsigned long flags; |
1902 | 1906 | ||
1903 | dev = pci_get_drvdata (pdev); | 1907 | if (!netif_running(dev)) |
1904 | cp = netdev_priv(dev); | 1908 | return 0; |
1905 | 1909 | ||
1906 | netif_device_attach (dev); | 1910 | netif_device_attach (dev); |
1907 | 1911 | ||
1908 | if (cp->pdev && cp->wol_enabled) { | 1912 | pci_set_power_state(pdev, PCI_D0); |
1909 | pci_set_power_state (cp->pdev, PCI_D0); | 1913 | pci_restore_state(pdev); |
1910 | pci_restore_state (cp->pdev); | 1914 | pci_enable_wake(pdev, PCI_D0, 0); |
1911 | } | 1915 | |
1912 | 1916 | /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ | |
1917 | cp_init_rings_index (cp); | ||
1913 | cp_init_hw (cp); | 1918 | cp_init_hw (cp); |
1914 | netif_start_queue (dev); | 1919 | netif_start_queue (dev); |
1915 | 1920 | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 47c72a63dfe1..8c1ad0fac7b6 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1087,7 +1087,8 @@ config NE2000 | |||
1087 | without a specific driver are compatible with NE2000. | 1087 | without a specific driver are compatible with NE2000. |
1088 | 1088 | ||
1089 | If you have a PCI NE2000 card however, say N here and Y to "PCI | 1089 | If you have a PCI NE2000 card however, say N here and Y to "PCI |
1090 | NE2000 support", above. If you have a NE2000 card and are running on | 1090 | NE2000 and clone support" under "EISA, VLB, PCI and on board |
1091 | controllers" below. If you have a NE2000 card and are running on | ||
1091 | an MCA system (a bus system used on some IBM PS/2 computers and | 1092 | an MCA system (a bus system used on some IBM PS/2 computers and |
1092 | laptops), say N here and Y to "NE/2 (ne2000 MCA version) support", | 1093 | laptops), say N here and Y to "NE/2 (ne2000 MCA version) support", |
1093 | below. | 1094 | below. |
@@ -2020,8 +2021,8 @@ config SIS190 | |||
2020 | will be called sis190. This is recommended. | 2021 | will be called sis190. This is recommended. |
2021 | 2022 | ||
2022 | config SKGE | 2023 | config SKGE |
2023 | tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)" | 2024 | tristate "New SysKonnect GigaEthernet support" |
2024 | depends on PCI && EXPERIMENTAL | 2025 | depends on PCI |
2025 | select CRC32 | 2026 | select CRC32 |
2026 | ---help--- | 2027 | ---help--- |
2027 | This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx | 2028 | This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx |
@@ -2082,7 +2083,6 @@ config SK98LIN | |||
2082 | - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter | 2083 | - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter |
2083 | - Allied Telesyn AT-2971T Gigabit Ethernet Adapter | 2084 | - Allied Telesyn AT-2971T Gigabit Ethernet Adapter |
2084 | - Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45 | 2085 | - Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45 |
2085 | - DGE-530T Gigabit Ethernet Adapter | ||
2086 | - EG1032 v2 Instant Gigabit Network Adapter | 2086 | - EG1032 v2 Instant Gigabit Network Adapter |
2087 | - EG1064 v2 Instant Gigabit Network Adapter | 2087 | - EG1064 v2 Instant Gigabit Network Adapter |
2088 | - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit) | 2088 | - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit) |
@@ -2176,6 +2176,7 @@ config BNX2 | |||
2176 | config SPIDER_NET | 2176 | config SPIDER_NET |
2177 | tristate "Spider Gigabit Ethernet driver" | 2177 | tristate "Spider Gigabit Ethernet driver" |
2178 | depends on PCI && PPC_CELL | 2178 | depends on PCI && PPC_CELL |
2179 | select FW_LOADER | ||
2179 | help | 2180 | help |
2180 | This driver supports the Gigabit Ethernet chips present on the | 2181 | This driver supports the Gigabit Ethernet chips present on the |
2181 | Cell Processor-Based Blades from IBM. | 2182 | Cell Processor-Based Blades from IBM. |
diff --git a/drivers/net/appletalk/cops.h b/drivers/net/appletalk/cops.h index c68ba9c2ef46..fd2750b269c8 100644 --- a/drivers/net/appletalk/cops.h +++ b/drivers/net/appletalk/cops.h | |||
@@ -51,7 +51,7 @@ | |||
51 | struct ltfirmware | 51 | struct ltfirmware |
52 | { | 52 | { |
53 | unsigned int length; | 53 | unsigned int length; |
54 | unsigned char * data; | 54 | const unsigned char *data; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | #define DAYNA 1 | 57 | #define DAYNA 1 |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e0f51afec778..bcf9f17daf0d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1581,6 +1581,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1581 | printk(KERN_INFO DRV_NAME | 1581 | printk(KERN_INFO DRV_NAME |
1582 | ": %s: %s not enslaved\n", | 1582 | ": %s: %s not enslaved\n", |
1583 | bond_dev->name, slave_dev->name); | 1583 | bond_dev->name, slave_dev->name); |
1584 | write_unlock_bh(&bond->lock); | ||
1584 | return -EINVAL; | 1585 | return -EINVAL; |
1585 | } | 1586 | } |
1586 | 1587 | ||
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c index 230642571c92..e824acaf188a 100644 --- a/drivers/net/chelsio/espi.c +++ b/drivers/net/chelsio/espi.c | |||
@@ -296,9 +296,7 @@ void t1_espi_destroy(struct peespi *espi) | |||
296 | 296 | ||
297 | struct peespi *t1_espi_create(adapter_t *adapter) | 297 | struct peespi *t1_espi_create(adapter_t *adapter) |
298 | { | 298 | { |
299 | struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL); | 299 | struct peespi *espi = kzalloc(sizeof(*espi), GFP_KERNEL); |
300 | |||
301 | memset(espi, 0, sizeof(*espi)); | ||
302 | 300 | ||
303 | if (espi) | 301 | if (espi) |
304 | espi->adapter = adapter; | 302 | espi->adapter = adapter; |
diff --git a/drivers/net/de620.c b/drivers/net/de620.c index 0069f5fa973a..22fc5b869a60 100644 --- a/drivers/net/de620.c +++ b/drivers/net/de620.c | |||
@@ -1012,7 +1012,7 @@ static int __init read_eeprom(struct net_device *dev) | |||
1012 | #ifdef MODULE | 1012 | #ifdef MODULE |
1013 | static struct net_device *de620_dev; | 1013 | static struct net_device *de620_dev; |
1014 | 1014 | ||
1015 | int init_module(void) | 1015 | int __init init_module(void) |
1016 | { | 1016 | { |
1017 | de620_dev = de620_probe(-1); | 1017 | de620_dev = de620_probe(-1); |
1018 | if (IS_ERR(de620_dev)) | 1018 | if (IS_ERR(de620_dev)) |
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 430c628279b3..fb9dae302dcc 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
@@ -50,8 +50,8 @@ | |||
50 | 50 | ||
51 | */ | 51 | */ |
52 | #define DRV_NAME "D-Link DL2000-based linux driver" | 52 | #define DRV_NAME "D-Link DL2000-based linux driver" |
53 | #define DRV_VERSION "v1.17a" | 53 | #define DRV_VERSION "v1.17b" |
54 | #define DRV_RELDATE "2002/10/04" | 54 | #define DRV_RELDATE "2006/03/10" |
55 | #include "dl2k.h" | 55 | #include "dl2k.h" |
56 | 56 | ||
57 | static char version[] __devinitdata = | 57 | static char version[] __devinitdata = |
@@ -765,7 +765,7 @@ rio_free_tx (struct net_device *dev, int irq) | |||
765 | break; | 765 | break; |
766 | skb = np->tx_skbuff[entry]; | 766 | skb = np->tx_skbuff[entry]; |
767 | pci_unmap_single (np->pdev, | 767 | pci_unmap_single (np->pdev, |
768 | np->tx_ring[entry].fraginfo, | 768 | np->tx_ring[entry].fraginfo & 0xffffffffffff, |
769 | skb->len, PCI_DMA_TODEVICE); | 769 | skb->len, PCI_DMA_TODEVICE); |
770 | if (irq) | 770 | if (irq) |
771 | dev_kfree_skb_irq (skb); | 771 | dev_kfree_skb_irq (skb); |
@@ -892,14 +892,16 @@ receive_packet (struct net_device *dev) | |||
892 | 892 | ||
893 | /* Small skbuffs for short packets */ | 893 | /* Small skbuffs for short packets */ |
894 | if (pkt_len > copy_thresh) { | 894 | if (pkt_len > copy_thresh) { |
895 | pci_unmap_single (np->pdev, desc->fraginfo, | 895 | pci_unmap_single (np->pdev, |
896 | desc->fraginfo & 0xffffffffffff, | ||
896 | np->rx_buf_sz, | 897 | np->rx_buf_sz, |
897 | PCI_DMA_FROMDEVICE); | 898 | PCI_DMA_FROMDEVICE); |
898 | skb_put (skb = np->rx_skbuff[entry], pkt_len); | 899 | skb_put (skb = np->rx_skbuff[entry], pkt_len); |
899 | np->rx_skbuff[entry] = NULL; | 900 | np->rx_skbuff[entry] = NULL; |
900 | } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { | 901 | } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { |
901 | pci_dma_sync_single_for_cpu(np->pdev, | 902 | pci_dma_sync_single_for_cpu(np->pdev, |
902 | desc->fraginfo, | 903 | desc->fraginfo & |
904 | 0xffffffffffff, | ||
903 | np->rx_buf_sz, | 905 | np->rx_buf_sz, |
904 | PCI_DMA_FROMDEVICE); | 906 | PCI_DMA_FROMDEVICE); |
905 | skb->dev = dev; | 907 | skb->dev = dev; |
@@ -910,7 +912,8 @@ receive_packet (struct net_device *dev) | |||
910 | pkt_len, 0); | 912 | pkt_len, 0); |
911 | skb_put (skb, pkt_len); | 913 | skb_put (skb, pkt_len); |
912 | pci_dma_sync_single_for_device(np->pdev, | 914 | pci_dma_sync_single_for_device(np->pdev, |
913 | desc->fraginfo, | 915 | desc->fraginfo & |
916 | 0xffffffffffff, | ||
914 | np->rx_buf_sz, | 917 | np->rx_buf_sz, |
915 | PCI_DMA_FROMDEVICE); | 918 | PCI_DMA_FROMDEVICE); |
916 | } | 919 | } |
@@ -1796,8 +1799,9 @@ rio_close (struct net_device *dev) | |||
1796 | np->rx_ring[i].fraginfo = 0; | 1799 | np->rx_ring[i].fraginfo = 0; |
1797 | skb = np->rx_skbuff[i]; | 1800 | skb = np->rx_skbuff[i]; |
1798 | if (skb) { | 1801 | if (skb) { |
1799 | pci_unmap_single (np->pdev, np->rx_ring[i].fraginfo, | 1802 | pci_unmap_single(np->pdev, |
1800 | skb->len, PCI_DMA_FROMDEVICE); | 1803 | np->rx_ring[i].fraginfo & 0xffffffffffff, |
1804 | skb->len, PCI_DMA_FROMDEVICE); | ||
1801 | dev_kfree_skb (skb); | 1805 | dev_kfree_skb (skb); |
1802 | np->rx_skbuff[i] = NULL; | 1806 | np->rx_skbuff[i] = NULL; |
1803 | } | 1807 | } |
@@ -1805,8 +1809,9 @@ rio_close (struct net_device *dev) | |||
1805 | for (i = 0; i < TX_RING_SIZE; i++) { | 1809 | for (i = 0; i < TX_RING_SIZE; i++) { |
1806 | skb = np->tx_skbuff[i]; | 1810 | skb = np->tx_skbuff[i]; |
1807 | if (skb) { | 1811 | if (skb) { |
1808 | pci_unmap_single (np->pdev, np->tx_ring[i].fraginfo, | 1812 | pci_unmap_single(np->pdev, |
1809 | skb->len, PCI_DMA_TODEVICE); | 1813 | np->tx_ring[i].fraginfo & 0xffffffffffff, |
1814 | skb->len, PCI_DMA_TODEVICE); | ||
1810 | dev_kfree_skb (skb); | 1815 | dev_kfree_skb (skb); |
1811 | np->tx_skbuff[i] = NULL; | 1816 | np->tx_skbuff[i] = NULL; |
1812 | } | 1817 | } |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 27c77306193b..99baf0e099fc 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -225,9 +225,6 @@ struct e1000_rx_ring { | |||
225 | struct e1000_ps_page *ps_page; | 225 | struct e1000_ps_page *ps_page; |
226 | struct e1000_ps_page_dma *ps_page_dma; | 226 | struct e1000_ps_page_dma *ps_page_dma; |
227 | 227 | ||
228 | struct sk_buff *rx_skb_top; | ||
229 | struct sk_buff *rx_skb_prev; | ||
230 | |||
231 | /* cpu for rx queue */ | 228 | /* cpu for rx queue */ |
232 | int cpu; | 229 | int cpu; |
233 | 230 | ||
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 31e332935e5a..4c4db96d0b7b 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -103,7 +103,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |||
103 | #else | 103 | #else |
104 | #define DRIVERNAPI "-NAPI" | 104 | #define DRIVERNAPI "-NAPI" |
105 | #endif | 105 | #endif |
106 | #define DRV_VERSION "6.3.9-k2"DRIVERNAPI | 106 | #define DRV_VERSION "6.3.9-k4"DRIVERNAPI |
107 | char e1000_driver_version[] = DRV_VERSION; | 107 | char e1000_driver_version[] = DRV_VERSION; |
108 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; | 108 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; |
109 | 109 | ||
@@ -1635,8 +1635,6 @@ setup_rx_desc_die: | |||
1635 | 1635 | ||
1636 | rxdr->next_to_clean = 0; | 1636 | rxdr->next_to_clean = 0; |
1637 | rxdr->next_to_use = 0; | 1637 | rxdr->next_to_use = 0; |
1638 | rxdr->rx_skb_top = NULL; | ||
1639 | rxdr->rx_skb_prev = NULL; | ||
1640 | 1638 | ||
1641 | return 0; | 1639 | return 0; |
1642 | } | 1640 | } |
@@ -1713,8 +1711,23 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1713 | rctl |= adapter->rx_buffer_len << 0x11; | 1711 | rctl |= adapter->rx_buffer_len << 0x11; |
1714 | } else { | 1712 | } else { |
1715 | rctl &= ~E1000_RCTL_SZ_4096; | 1713 | rctl &= ~E1000_RCTL_SZ_4096; |
1716 | rctl &= ~E1000_RCTL_BSEX; | 1714 | rctl |= E1000_RCTL_BSEX; |
1717 | rctl |= E1000_RCTL_SZ_2048; | 1715 | switch (adapter->rx_buffer_len) { |
1716 | case E1000_RXBUFFER_2048: | ||
1717 | default: | ||
1718 | rctl |= E1000_RCTL_SZ_2048; | ||
1719 | rctl &= ~E1000_RCTL_BSEX; | ||
1720 | break; | ||
1721 | case E1000_RXBUFFER_4096: | ||
1722 | rctl |= E1000_RCTL_SZ_4096; | ||
1723 | break; | ||
1724 | case E1000_RXBUFFER_8192: | ||
1725 | rctl |= E1000_RCTL_SZ_8192; | ||
1726 | break; | ||
1727 | case E1000_RXBUFFER_16384: | ||
1728 | rctl |= E1000_RCTL_SZ_16384; | ||
1729 | break; | ||
1730 | } | ||
1718 | } | 1731 | } |
1719 | 1732 | ||
1720 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT | 1733 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT |
@@ -2107,16 +2120,6 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
2107 | } | 2120 | } |
2108 | } | 2121 | } |
2109 | 2122 | ||
2110 | /* there also may be some cached data in our adapter */ | ||
2111 | if (rx_ring->rx_skb_top) { | ||
2112 | dev_kfree_skb(rx_ring->rx_skb_top); | ||
2113 | |||
2114 | /* rx_skb_prev will be wiped out by rx_skb_top */ | ||
2115 | rx_ring->rx_skb_top = NULL; | ||
2116 | rx_ring->rx_skb_prev = NULL; | ||
2117 | } | ||
2118 | |||
2119 | |||
2120 | size = sizeof(struct e1000_buffer) * rx_ring->count; | 2123 | size = sizeof(struct e1000_buffer) * rx_ring->count; |
2121 | memset(rx_ring->buffer_info, 0, size); | 2124 | memset(rx_ring->buffer_info, 0, size); |
2122 | size = sizeof(struct e1000_ps_page) * rx_ring->count; | 2125 | size = sizeof(struct e1000_ps_page) * rx_ring->count; |
@@ -2914,7 +2917,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2914 | if (!__pskb_pull_tail(skb, pull_size)) { | 2917 | if (!__pskb_pull_tail(skb, pull_size)) { |
2915 | printk(KERN_ERR "__pskb_pull_tail failed.\n"); | 2918 | printk(KERN_ERR "__pskb_pull_tail failed.\n"); |
2916 | dev_kfree_skb_any(skb); | 2919 | dev_kfree_skb_any(skb); |
2917 | return -EFAULT; | 2920 | return NETDEV_TX_OK; |
2918 | } | 2921 | } |
2919 | len = skb->len - skb->data_len; | 2922 | len = skb->len - skb->data_len; |
2920 | } | 2923 | } |
@@ -3106,24 +3109,27 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3106 | break; | 3109 | break; |
3107 | } | 3110 | } |
3108 | 3111 | ||
3109 | /* since the driver code now supports splitting a packet across | 3112 | |
3110 | * multiple descriptors, most of the fifo related limitations on | ||
3111 | * jumbo frame traffic have gone away. | ||
3112 | * simply use 2k descriptors for everything. | ||
3113 | * | ||
3114 | * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | ||
3115 | * means we reserve 2 more, this pushes us to allocate from the next | ||
3116 | * larger slab size | ||
3117 | * i.e. RXBUFFER_2048 --> size-4096 slab */ | ||
3118 | |||
3119 | /* recent hardware supports 1KB granularity */ | ||
3120 | if (adapter->hw.mac_type > e1000_82547_rev_2) { | 3113 | if (adapter->hw.mac_type > e1000_82547_rev_2) { |
3121 | adapter->rx_buffer_len = | 3114 | adapter->rx_buffer_len = max_frame; |
3122 | ((max_frame < E1000_RXBUFFER_2048) ? | ||
3123 | max_frame : E1000_RXBUFFER_2048); | ||
3124 | E1000_ROUNDUP(adapter->rx_buffer_len, 1024); | 3115 | E1000_ROUNDUP(adapter->rx_buffer_len, 1024); |
3125 | } else | 3116 | } else { |
3126 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | 3117 | if(unlikely((adapter->hw.mac_type < e1000_82543) && |
3118 | (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { | ||
3119 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported " | ||
3120 | "on 82542\n"); | ||
3121 | return -EINVAL; | ||
3122 | } else { | ||
3123 | if(max_frame <= E1000_RXBUFFER_2048) | ||
3124 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | ||
3125 | else if(max_frame <= E1000_RXBUFFER_4096) | ||
3126 | adapter->rx_buffer_len = E1000_RXBUFFER_4096; | ||
3127 | else if(max_frame <= E1000_RXBUFFER_8192) | ||
3128 | adapter->rx_buffer_len = E1000_RXBUFFER_8192; | ||
3129 | else if(max_frame <= E1000_RXBUFFER_16384) | ||
3130 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; | ||
3131 | } | ||
3132 | } | ||
3127 | 3133 | ||
3128 | netdev->mtu = new_mtu; | 3134 | netdev->mtu = new_mtu; |
3129 | 3135 | ||
@@ -3620,7 +3626,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3620 | uint8_t last_byte; | 3626 | uint8_t last_byte; |
3621 | unsigned int i; | 3627 | unsigned int i; |
3622 | int cleaned_count = 0; | 3628 | int cleaned_count = 0; |
3623 | boolean_t cleaned = FALSE, multi_descriptor = FALSE; | 3629 | boolean_t cleaned = FALSE; |
3624 | 3630 | ||
3625 | i = rx_ring->next_to_clean; | 3631 | i = rx_ring->next_to_clean; |
3626 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 3632 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
@@ -3652,43 +3658,12 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3652 | 3658 | ||
3653 | length = le16_to_cpu(rx_desc->length); | 3659 | length = le16_to_cpu(rx_desc->length); |
3654 | 3660 | ||
3655 | skb_put(skb, length); | 3661 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) { |
3656 | 3662 | /* All receives must fit into a single buffer */ | |
3657 | if (!(status & E1000_RXD_STAT_EOP)) { | 3663 | E1000_DBG("%s: Receive packet consumed multiple" |
3658 | if (!rx_ring->rx_skb_top) { | 3664 | " buffers\n", netdev->name); |
3659 | rx_ring->rx_skb_top = skb; | 3665 | dev_kfree_skb_irq(skb); |
3660 | rx_ring->rx_skb_top->len = length; | ||
3661 | rx_ring->rx_skb_prev = skb; | ||
3662 | } else { | ||
3663 | if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) { | ||
3664 | rx_ring->rx_skb_prev->next = skb; | ||
3665 | skb->prev = rx_ring->rx_skb_prev; | ||
3666 | } else { | ||
3667 | skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb; | ||
3668 | } | ||
3669 | rx_ring->rx_skb_prev = skb; | ||
3670 | rx_ring->rx_skb_top->data_len += length; | ||
3671 | } | ||
3672 | goto next_desc; | 3666 | goto next_desc; |
3673 | } else { | ||
3674 | if (rx_ring->rx_skb_top) { | ||
3675 | if (skb_shinfo(rx_ring->rx_skb_top) | ||
3676 | ->frag_list) { | ||
3677 | rx_ring->rx_skb_prev->next = skb; | ||
3678 | skb->prev = rx_ring->rx_skb_prev; | ||
3679 | } else | ||
3680 | skb_shinfo(rx_ring->rx_skb_top) | ||
3681 | ->frag_list = skb; | ||
3682 | |||
3683 | rx_ring->rx_skb_top->data_len += length; | ||
3684 | rx_ring->rx_skb_top->len += | ||
3685 | rx_ring->rx_skb_top->data_len; | ||
3686 | |||
3687 | skb = rx_ring->rx_skb_top; | ||
3688 | multi_descriptor = TRUE; | ||
3689 | rx_ring->rx_skb_top = NULL; | ||
3690 | rx_ring->rx_skb_prev = NULL; | ||
3691 | } | ||
3692 | } | 3667 | } |
3693 | 3668 | ||
3694 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { | 3669 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { |
@@ -3712,10 +3687,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3712 | * performance for small packets with large amounts | 3687 | * performance for small packets with large amounts |
3713 | * of reassembly being done in the stack */ | 3688 | * of reassembly being done in the stack */ |
3714 | #define E1000_CB_LENGTH 256 | 3689 | #define E1000_CB_LENGTH 256 |
3715 | if ((length < E1000_CB_LENGTH) && | 3690 | if (length < E1000_CB_LENGTH) { |
3716 | !rx_ring->rx_skb_top && | ||
3717 | /* or maybe (status & E1000_RXD_STAT_EOP) && */ | ||
3718 | !multi_descriptor) { | ||
3719 | struct sk_buff *new_skb = | 3691 | struct sk_buff *new_skb = |
3720 | dev_alloc_skb(length + NET_IP_ALIGN); | 3692 | dev_alloc_skb(length + NET_IP_ALIGN); |
3721 | if (new_skb) { | 3693 | if (new_skb) { |
@@ -3729,7 +3701,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3729 | skb = new_skb; | 3701 | skb = new_skb; |
3730 | skb_put(skb, length); | 3702 | skb_put(skb, length); |
3731 | } | 3703 | } |
3732 | } | 3704 | } else |
3705 | skb_put(skb, length); | ||
3733 | 3706 | ||
3734 | /* end copybreak code */ | 3707 | /* end copybreak code */ |
3735 | 3708 | ||
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 1b699259b4ec..31fb2d75dc44 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c | |||
@@ -57,7 +57,7 @@ struct ifb_private { | |||
57 | struct sk_buff_head tq; | 57 | struct sk_buff_head tq; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static int numifbs = 1; | 60 | static int numifbs = 2; |
61 | 61 | ||
62 | static void ri_tasklet(unsigned long dev); | 62 | static void ri_tasklet(unsigned long dev); |
63 | static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); | 63 | static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); |
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index fa176ffb4ad5..8936058a3cce 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c | |||
@@ -108,6 +108,7 @@ static void irda_usb_close(struct irda_usb_cb *self); | |||
108 | static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs); | 108 | static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs); |
109 | static void write_bulk_callback(struct urb *urb, struct pt_regs *regs); | 109 | static void write_bulk_callback(struct urb *urb, struct pt_regs *regs); |
110 | static void irda_usb_receive(struct urb *urb, struct pt_regs *regs); | 110 | static void irda_usb_receive(struct urb *urb, struct pt_regs *regs); |
111 | static void irda_usb_rx_defer_expired(unsigned long data); | ||
111 | static int irda_usb_net_open(struct net_device *dev); | 112 | static int irda_usb_net_open(struct net_device *dev); |
112 | static int irda_usb_net_close(struct net_device *dev); | 113 | static int irda_usb_net_close(struct net_device *dev); |
113 | static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 114 | static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
@@ -677,6 +678,12 @@ static void irda_usb_net_timeout(struct net_device *netdev) | |||
677 | * on the interrupt pipe and hang the Rx URB only when an interrupt is | 678 | * on the interrupt pipe and hang the Rx URB only when an interrupt is |
678 | * received. | 679 | * received. |
679 | * Jean II | 680 | * Jean II |
681 | * | ||
682 | * Note : don't read the above as what we are currently doing, but as | ||
683 | * something we could do with KC dongle. Also don't forget that the | ||
684 | * interrupt pipe is not part of the original standard, so this would | ||
685 | * need to be optional... | ||
686 | * Jean II | ||
680 | */ | 687 | */ |
681 | 688 | ||
682 | /*------------------------------------------------------------------*/ | 689 | /*------------------------------------------------------------------*/ |
@@ -704,10 +711,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc | |||
704 | /* Reinitialize URB */ | 711 | /* Reinitialize URB */ |
705 | usb_fill_bulk_urb(urb, self->usbdev, | 712 | usb_fill_bulk_urb(urb, self->usbdev, |
706 | usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep), | 713 | usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep), |
707 | skb->data, skb->truesize, | 714 | skb->data, IRDA_SKB_MAX_MTU, |
708 | irda_usb_receive, skb); | 715 | irda_usb_receive, skb); |
709 | /* Note : unlink *must* be synchronous because of the code in | ||
710 | * irda_usb_net_close() -> free the skb - Jean II */ | ||
711 | urb->status = 0; | 716 | urb->status = 0; |
712 | 717 | ||
713 | /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */ | 718 | /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */ |
@@ -734,6 +739,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs) | |||
734 | struct irda_skb_cb *cb; | 739 | struct irda_skb_cb *cb; |
735 | struct sk_buff *newskb; | 740 | struct sk_buff *newskb; |
736 | struct sk_buff *dataskb; | 741 | struct sk_buff *dataskb; |
742 | struct urb *next_urb; | ||
737 | int docopy; | 743 | int docopy; |
738 | 744 | ||
739 | IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); | 745 | IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); |
@@ -755,20 +761,37 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs) | |||
755 | if (urb->status != 0) { | 761 | if (urb->status != 0) { |
756 | switch (urb->status) { | 762 | switch (urb->status) { |
757 | case -EILSEQ: | 763 | case -EILSEQ: |
758 | self->stats.rx_errors++; | ||
759 | self->stats.rx_crc_errors++; | 764 | self->stats.rx_crc_errors++; |
760 | break; | 765 | /* Also precursor to a hot-unplug on UHCI. */ |
766 | /* Fallthrough... */ | ||
761 | case -ECONNRESET: /* -104 */ | 767 | case -ECONNRESET: /* -104 */ |
762 | IRDA_DEBUG(0, "%s(), Connection Reset (-104), transfer_flags 0x%04X \n", __FUNCTION__, urb->transfer_flags); | 768 | /* Random error, if I remember correctly */ |
763 | /* uhci_cleanup_unlink() is going to kill the Rx | 769 | /* uhci_cleanup_unlink() is going to kill the Rx |
764 | * URB just after we return. No problem, at this | 770 | * URB just after we return. No problem, at this |
765 | * point the URB will be idle ;-) - Jean II */ | 771 | * point the URB will be idle ;-) - Jean II */ |
766 | break; | 772 | case -ESHUTDOWN: /* -108 */ |
773 | /* That's usually a hot-unplug. Submit will fail... */ | ||
774 | case -ETIMEDOUT: /* -110 */ | ||
775 | /* Usually precursor to a hot-unplug on OHCI. */ | ||
767 | default: | 776 | default: |
768 | IRDA_DEBUG(0, "%s(), RX status %d,transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags); | 777 | self->stats.rx_errors++; |
778 | IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags); | ||
769 | break; | 779 | break; |
770 | } | 780 | } |
771 | goto done; | 781 | /* If we received an error, we don't want to resubmit the |
782 | * Rx URB straight away but to give the USB layer a little | ||
783 | * bit of breathing room. | ||
784 | * We are in the USB thread context, therefore there is a | ||
785 | * danger of recursion (new URB we submit fails, we come | ||
786 | * back here). | ||
787 | * With recent USB stack (2.6.15+), I'm seeing that on | ||
788 | * hot unplug of the dongle... | ||
789 | * Lowest effective timer is 10ms... | ||
790 | * Jean II */ | ||
791 | self->rx_defer_timer.function = &irda_usb_rx_defer_expired; | ||
792 | self->rx_defer_timer.data = (unsigned long) urb; | ||
793 | mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000)); | ||
794 | return; | ||
772 | } | 795 | } |
773 | 796 | ||
774 | /* Check for empty frames */ | 797 | /* Check for empty frames */ |
@@ -845,13 +868,45 @@ done: | |||
845 | * idle slot.... | 868 | * idle slot.... |
846 | * Jean II */ | 869 | * Jean II */ |
847 | /* Note : with this scheme, we could submit the idle URB before | 870 | /* Note : with this scheme, we could submit the idle URB before |
848 | * processing the Rx URB. Another time... Jean II */ | 871 | * processing the Rx URB. I don't think it would buy us anything as |
872 | * we are running in the USB thread context. Jean II */ | ||
873 | next_urb = self->idle_rx_urb; | ||
849 | 874 | ||
850 | /* Submit the idle URB to replace the URB we've just received */ | ||
851 | irda_usb_submit(self, skb, self->idle_rx_urb); | ||
852 | /* Recycle Rx URB : Now, the idle URB is the present one */ | 875 | /* Recycle Rx URB : Now, the idle URB is the present one */ |
853 | urb->context = NULL; | 876 | urb->context = NULL; |
854 | self->idle_rx_urb = urb; | 877 | self->idle_rx_urb = urb; |
878 | |||
879 | /* Submit the idle URB to replace the URB we've just received. | ||
880 | * Do it last to avoid race conditions... Jean II */ | ||
881 | irda_usb_submit(self, skb, next_urb); | ||
882 | } | ||
883 | |||
884 | /*------------------------------------------------------------------*/ | ||
885 | /* | ||
886 | * In case of errors, we want the USB layer to have time to recover. | ||
887 | * Now, it is time to resubmit ouur Rx URB... | ||
888 | */ | ||
889 | static void irda_usb_rx_defer_expired(unsigned long data) | ||
890 | { | ||
891 | struct urb *urb = (struct urb *) data; | ||
892 | struct sk_buff *skb = (struct sk_buff *) urb->context; | ||
893 | struct irda_usb_cb *self; | ||
894 | struct irda_skb_cb *cb; | ||
895 | struct urb *next_urb; | ||
896 | |||
897 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | ||
898 | |||
899 | /* Find ourselves */ | ||
900 | cb = (struct irda_skb_cb *) skb->cb; | ||
901 | IRDA_ASSERT(cb != NULL, return;); | ||
902 | self = (struct irda_usb_cb *) cb->context; | ||
903 | IRDA_ASSERT(self != NULL, return;); | ||
904 | |||
905 | /* Same stuff as when Rx is done, see above... */ | ||
906 | next_urb = self->idle_rx_urb; | ||
907 | urb->context = NULL; | ||
908 | self->idle_rx_urb = urb; | ||
909 | irda_usb_submit(self, skb, next_urb); | ||
855 | } | 910 | } |
856 | 911 | ||
857 | /*------------------------------------------------------------------*/ | 912 | /*------------------------------------------------------------------*/ |
@@ -990,6 +1045,9 @@ static int irda_usb_net_close(struct net_device *netdev) | |||
990 | /* Stop network Tx queue */ | 1045 | /* Stop network Tx queue */ |
991 | netif_stop_queue(netdev); | 1046 | netif_stop_queue(netdev); |
992 | 1047 | ||
1048 | /* Kill defered Rx URB */ | ||
1049 | del_timer(&self->rx_defer_timer); | ||
1050 | |||
993 | /* Deallocate all the Rx path buffers (URBs and skb) */ | 1051 | /* Deallocate all the Rx path buffers (URBs and skb) */ |
994 | for (i = 0; i < IU_MAX_RX_URBS; i++) { | 1052 | for (i = 0; i < IU_MAX_RX_URBS; i++) { |
995 | struct urb *urb = self->rx_urb[i]; | 1053 | struct urb *urb = self->rx_urb[i]; |
@@ -1365,6 +1423,7 @@ static int irda_usb_probe(struct usb_interface *intf, | |||
1365 | self = net->priv; | 1423 | self = net->priv; |
1366 | self->netdev = net; | 1424 | self->netdev = net; |
1367 | spin_lock_init(&self->lock); | 1425 | spin_lock_init(&self->lock); |
1426 | init_timer(&self->rx_defer_timer); | ||
1368 | 1427 | ||
1369 | /* Create all of the needed urbs */ | 1428 | /* Create all of the needed urbs */ |
1370 | for (i = 0; i < IU_MAX_RX_URBS; i++) { | 1429 | for (i = 0; i < IU_MAX_RX_URBS; i++) { |
@@ -1498,6 +1557,9 @@ static void irda_usb_disconnect(struct usb_interface *intf) | |||
1498 | * This will stop/desactivate the Tx path. - Jean II */ | 1557 | * This will stop/desactivate the Tx path. - Jean II */ |
1499 | self->present = 0; | 1558 | self->present = 0; |
1500 | 1559 | ||
1560 | /* Kill defered Rx URB */ | ||
1561 | del_timer(&self->rx_defer_timer); | ||
1562 | |||
1501 | /* We need to have irq enabled to unlink the URBs. That's OK, | 1563 | /* We need to have irq enabled to unlink the URBs. That's OK, |
1502 | * at this point the Tx path is gone - Jean II */ | 1564 | * at this point the Tx path is gone - Jean II */ |
1503 | spin_unlock_irqrestore(&self->lock, flags); | 1565 | spin_unlock_irqrestore(&self->lock, flags); |
@@ -1507,11 +1569,11 @@ static void irda_usb_disconnect(struct usb_interface *intf) | |||
1507 | /* Accept no more transmissions */ | 1569 | /* Accept no more transmissions */ |
1508 | /*netif_device_detach(self->netdev);*/ | 1570 | /*netif_device_detach(self->netdev);*/ |
1509 | netif_stop_queue(self->netdev); | 1571 | netif_stop_queue(self->netdev); |
1510 | /* Stop all the receive URBs */ | 1572 | /* Stop all the receive URBs. Must be synchronous. */ |
1511 | for (i = 0; i < IU_MAX_RX_URBS; i++) | 1573 | for (i = 0; i < IU_MAX_RX_URBS; i++) |
1512 | usb_kill_urb(self->rx_urb[i]); | 1574 | usb_kill_urb(self->rx_urb[i]); |
1513 | /* Cancel Tx and speed URB. | 1575 | /* Cancel Tx and speed URB. |
1514 | * Toggle flags to make sure it's synchronous. */ | 1576 | * Make sure it's synchronous to avoid races. */ |
1515 | usb_kill_urb(self->tx_urb); | 1577 | usb_kill_urb(self->tx_urb); |
1516 | usb_kill_urb(self->speed_urb); | 1578 | usb_kill_urb(self->speed_urb); |
1517 | } | 1579 | } |
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h index bd8f66542322..4026af42dd47 100644 --- a/drivers/net/irda/irda-usb.h +++ b/drivers/net/irda/irda-usb.h | |||
@@ -136,8 +136,6 @@ struct irda_usb_cb { | |||
136 | __u16 bulk_out_mtu; /* Max Tx packet size in bytes */ | 136 | __u16 bulk_out_mtu; /* Max Tx packet size in bytes */ |
137 | __u8 bulk_int_ep; /* Interrupt Endpoint assignments */ | 137 | __u8 bulk_int_ep; /* Interrupt Endpoint assignments */ |
138 | 138 | ||
139 | wait_queue_head_t wait_q; /* for timeouts */ | ||
140 | |||
141 | struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */ | 139 | struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */ |
142 | struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */ | 140 | struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */ |
143 | struct urb *tx_urb; /* URB used to send data frames */ | 141 | struct urb *tx_urb; /* URB used to send data frames */ |
@@ -147,17 +145,18 @@ struct irda_usb_cb { | |||
147 | struct net_device_stats stats; | 145 | struct net_device_stats stats; |
148 | struct irlap_cb *irlap; /* The link layer we are binded to */ | 146 | struct irlap_cb *irlap; /* The link layer we are binded to */ |
149 | struct qos_info qos; | 147 | struct qos_info qos; |
150 | hashbin_t *tx_list; /* Queued transmit skb's */ | ||
151 | char *speed_buff; /* Buffer for speed changes */ | 148 | char *speed_buff; /* Buffer for speed changes */ |
152 | 149 | ||
153 | struct timeval stamp; | 150 | struct timeval stamp; |
154 | struct timeval now; | 151 | struct timeval now; |
155 | 152 | ||
156 | spinlock_t lock; /* For serializing operations */ | 153 | spinlock_t lock; /* For serializing Tx operations */ |
157 | 154 | ||
158 | __u16 xbofs; /* Current xbofs setting */ | 155 | __u16 xbofs; /* Current xbofs setting */ |
159 | __s16 new_xbofs; /* xbofs we need to set */ | 156 | __s16 new_xbofs; /* xbofs we need to set */ |
160 | __u32 speed; /* Current speed */ | 157 | __u32 speed; /* Current speed */ |
161 | __s32 new_speed; /* speed we need to set */ | 158 | __s32 new_speed; /* speed we need to set */ |
159 | |||
160 | struct timer_list rx_defer_timer; /* Wait for Rx error to clear */ | ||
162 | }; | 161 | }; |
163 | 162 | ||
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 01ddfc8cce3f..aa5581369399 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
@@ -806,6 +806,7 @@ static struct pcmcia_device_id axnet_ids[] = { | |||
806 | PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309), | 806 | PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309), |
807 | PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106), | 807 | PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106), |
808 | PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), | 808 | PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), |
809 | PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc), | ||
809 | PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef), | 810 | PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef), |
810 | PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef), | 811 | PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef), |
811 | PCMCIA_DEVICE_PROD_ID12("Billionton", "LNA-100B", 0x552ab682, 0xbc3b87e1), | 812 | PCMCIA_DEVICE_PROD_ID12("Billionton", "LNA-100B", 0x552ab682, 0xbc3b87e1), |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 6e1018448eea..8cc0d0bbdf50 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -287,6 +287,20 @@ enum RTL8169_register_content { | |||
287 | TxInterFrameGapShift = 24, | 287 | TxInterFrameGapShift = 24, |
288 | TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ | 288 | TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ |
289 | 289 | ||
290 | /* Config1 register p.24 */ | ||
291 | PMEnable = (1 << 0), /* Power Management Enable */ | ||
292 | |||
293 | /* Config3 register p.25 */ | ||
294 | MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ | ||
295 | LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ | ||
296 | |||
297 | /* Config5 register p.27 */ | ||
298 | BWF = (1 << 6), /* Accept Broadcast wakeup frame */ | ||
299 | MWF = (1 << 5), /* Accept Multicast wakeup frame */ | ||
300 | UWF = (1 << 4), /* Accept Unicast wakeup frame */ | ||
301 | LanWake = (1 << 1), /* LanWake enable/disable */ | ||
302 | PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ | ||
303 | |||
290 | /* TBICSR p.28 */ | 304 | /* TBICSR p.28 */ |
291 | TBIReset = 0x80000000, | 305 | TBIReset = 0x80000000, |
292 | TBILoopback = 0x40000000, | 306 | TBILoopback = 0x40000000, |
@@ -433,6 +447,7 @@ struct rtl8169_private { | |||
433 | unsigned int (*phy_reset_pending)(void __iomem *); | 447 | unsigned int (*phy_reset_pending)(void __iomem *); |
434 | unsigned int (*link_ok)(void __iomem *); | 448 | unsigned int (*link_ok)(void __iomem *); |
435 | struct work_struct task; | 449 | struct work_struct task; |
450 | unsigned wol_enabled : 1; | ||
436 | }; | 451 | }; |
437 | 452 | ||
438 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); | 453 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); |
@@ -607,6 +622,80 @@ static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex) | |||
607 | *duplex = p->duplex; | 622 | *duplex = p->duplex; |
608 | } | 623 | } |
609 | 624 | ||
625 | static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
626 | { | ||
627 | struct rtl8169_private *tp = netdev_priv(dev); | ||
628 | void __iomem *ioaddr = tp->mmio_addr; | ||
629 | u8 options; | ||
630 | |||
631 | wol->wolopts = 0; | ||
632 | |||
633 | #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) | ||
634 | wol->supported = WAKE_ANY; | ||
635 | |||
636 | spin_lock_irq(&tp->lock); | ||
637 | |||
638 | options = RTL_R8(Config1); | ||
639 | if (!(options & PMEnable)) | ||
640 | goto out_unlock; | ||
641 | |||
642 | options = RTL_R8(Config3); | ||
643 | if (options & LinkUp) | ||
644 | wol->wolopts |= WAKE_PHY; | ||
645 | if (options & MagicPacket) | ||
646 | wol->wolopts |= WAKE_MAGIC; | ||
647 | |||
648 | options = RTL_R8(Config5); | ||
649 | if (options & UWF) | ||
650 | wol->wolopts |= WAKE_UCAST; | ||
651 | if (options & BWF) | ||
652 | wol->wolopts |= WAKE_BCAST; | ||
653 | if (options & MWF) | ||
654 | wol->wolopts |= WAKE_MCAST; | ||
655 | |||
656 | out_unlock: | ||
657 | spin_unlock_irq(&tp->lock); | ||
658 | } | ||
659 | |||
660 | static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
661 | { | ||
662 | struct rtl8169_private *tp = netdev_priv(dev); | ||
663 | void __iomem *ioaddr = tp->mmio_addr; | ||
664 | int i; | ||
665 | static struct { | ||
666 | u32 opt; | ||
667 | u16 reg; | ||
668 | u8 mask; | ||
669 | } cfg[] = { | ||
670 | { WAKE_ANY, Config1, PMEnable }, | ||
671 | { WAKE_PHY, Config3, LinkUp }, | ||
672 | { WAKE_MAGIC, Config3, MagicPacket }, | ||
673 | { WAKE_UCAST, Config5, UWF }, | ||
674 | { WAKE_BCAST, Config5, BWF }, | ||
675 | { WAKE_MCAST, Config5, MWF }, | ||
676 | { WAKE_ANY, Config5, LanWake } | ||
677 | }; | ||
678 | |||
679 | spin_lock_irq(&tp->lock); | ||
680 | |||
681 | RTL_W8(Cfg9346, Cfg9346_Unlock); | ||
682 | |||
683 | for (i = 0; i < ARRAY_SIZE(cfg); i++) { | ||
684 | u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; | ||
685 | if (wol->wolopts & cfg[i].opt) | ||
686 | options |= cfg[i].mask; | ||
687 | RTL_W8(cfg[i].reg, options); | ||
688 | } | ||
689 | |||
690 | RTL_W8(Cfg9346, Cfg9346_Lock); | ||
691 | |||
692 | tp->wol_enabled = (wol->wolopts) ? 1 : 0; | ||
693 | |||
694 | spin_unlock_irq(&tp->lock); | ||
695 | |||
696 | return 0; | ||
697 | } | ||
698 | |||
610 | static void rtl8169_get_drvinfo(struct net_device *dev, | 699 | static void rtl8169_get_drvinfo(struct net_device *dev, |
611 | struct ethtool_drvinfo *info) | 700 | struct ethtool_drvinfo *info) |
612 | { | 701 | { |
@@ -1025,6 +1114,8 @@ static struct ethtool_ops rtl8169_ethtool_ops = { | |||
1025 | .get_tso = ethtool_op_get_tso, | 1114 | .get_tso = ethtool_op_get_tso, |
1026 | .set_tso = ethtool_op_set_tso, | 1115 | .set_tso = ethtool_op_set_tso, |
1027 | .get_regs = rtl8169_get_regs, | 1116 | .get_regs = rtl8169_get_regs, |
1117 | .get_wol = rtl8169_get_wol, | ||
1118 | .set_wol = rtl8169_set_wol, | ||
1028 | .get_strings = rtl8169_get_strings, | 1119 | .get_strings = rtl8169_get_strings, |
1029 | .get_stats_count = rtl8169_get_stats_count, | 1120 | .get_stats_count = rtl8169_get_stats_count, |
1030 | .get_ethtool_stats = rtl8169_get_ethtool_stats, | 1121 | .get_ethtool_stats = rtl8169_get_ethtool_stats, |
@@ -1442,6 +1533,11 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out, | |||
1442 | } | 1533 | } |
1443 | tp->chipset = i; | 1534 | tp->chipset = i; |
1444 | 1535 | ||
1536 | RTL_W8(Cfg9346, Cfg9346_Unlock); | ||
1537 | RTL_W8(Config1, RTL_R8(Config1) | PMEnable); | ||
1538 | RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); | ||
1539 | RTL_W8(Cfg9346, Cfg9346_Lock); | ||
1540 | |||
1445 | *ioaddr_out = ioaddr; | 1541 | *ioaddr_out = ioaddr; |
1446 | *dev_out = dev; | 1542 | *dev_out = dev; |
1447 | out: | 1543 | out: |
@@ -1612,49 +1708,6 @@ rtl8169_remove_one(struct pci_dev *pdev) | |||
1612 | pci_set_drvdata(pdev, NULL); | 1708 | pci_set_drvdata(pdev, NULL); |
1613 | } | 1709 | } |
1614 | 1710 | ||
1615 | #ifdef CONFIG_PM | ||
1616 | |||
1617 | static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state) | ||
1618 | { | ||
1619 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1620 | struct rtl8169_private *tp = netdev_priv(dev); | ||
1621 | void __iomem *ioaddr = tp->mmio_addr; | ||
1622 | unsigned long flags; | ||
1623 | |||
1624 | if (!netif_running(dev)) | ||
1625 | return 0; | ||
1626 | |||
1627 | netif_device_detach(dev); | ||
1628 | netif_stop_queue(dev); | ||
1629 | spin_lock_irqsave(&tp->lock, flags); | ||
1630 | |||
1631 | /* Disable interrupts, stop Rx and Tx */ | ||
1632 | RTL_W16(IntrMask, 0); | ||
1633 | RTL_W8(ChipCmd, 0); | ||
1634 | |||
1635 | /* Update the error counts. */ | ||
1636 | tp->stats.rx_missed_errors += RTL_R32(RxMissed); | ||
1637 | RTL_W32(RxMissed, 0); | ||
1638 | spin_unlock_irqrestore(&tp->lock, flags); | ||
1639 | |||
1640 | return 0; | ||
1641 | } | ||
1642 | |||
1643 | static int rtl8169_resume(struct pci_dev *pdev) | ||
1644 | { | ||
1645 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1646 | |||
1647 | if (!netif_running(dev)) | ||
1648 | return 0; | ||
1649 | |||
1650 | netif_device_attach(dev); | ||
1651 | rtl8169_hw_start(dev); | ||
1652 | |||
1653 | return 0; | ||
1654 | } | ||
1655 | |||
1656 | #endif /* CONFIG_PM */ | ||
1657 | |||
1658 | static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, | 1711 | static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, |
1659 | struct net_device *dev) | 1712 | struct net_device *dev) |
1660 | { | 1713 | { |
@@ -2700,6 +2753,56 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev) | |||
2700 | return &tp->stats; | 2753 | return &tp->stats; |
2701 | } | 2754 | } |
2702 | 2755 | ||
2756 | #ifdef CONFIG_PM | ||
2757 | |||
2758 | static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2759 | { | ||
2760 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2761 | struct rtl8169_private *tp = netdev_priv(dev); | ||
2762 | void __iomem *ioaddr = tp->mmio_addr; | ||
2763 | |||
2764 | if (!netif_running(dev)) | ||
2765 | goto out; | ||
2766 | |||
2767 | netif_device_detach(dev); | ||
2768 | netif_stop_queue(dev); | ||
2769 | |||
2770 | spin_lock_irq(&tp->lock); | ||
2771 | |||
2772 | rtl8169_asic_down(ioaddr); | ||
2773 | |||
2774 | tp->stats.rx_missed_errors += RTL_R32(RxMissed); | ||
2775 | RTL_W32(RxMissed, 0); | ||
2776 | |||
2777 | spin_unlock_irq(&tp->lock); | ||
2778 | |||
2779 | pci_save_state(pdev); | ||
2780 | pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled); | ||
2781 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
2782 | out: | ||
2783 | return 0; | ||
2784 | } | ||
2785 | |||
2786 | static int rtl8169_resume(struct pci_dev *pdev) | ||
2787 | { | ||
2788 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2789 | |||
2790 | if (!netif_running(dev)) | ||
2791 | goto out; | ||
2792 | |||
2793 | netif_device_attach(dev); | ||
2794 | |||
2795 | pci_set_power_state(pdev, PCI_D0); | ||
2796 | pci_restore_state(pdev); | ||
2797 | pci_enable_wake(pdev, PCI_D0, 0); | ||
2798 | |||
2799 | rtl8169_schedule_work(dev, rtl8169_reset_task); | ||
2800 | out: | ||
2801 | return 0; | ||
2802 | } | ||
2803 | |||
2804 | #endif /* CONFIG_PM */ | ||
2805 | |||
2703 | static struct pci_driver rtl8169_pci_driver = { | 2806 | static struct pci_driver rtl8169_pci_driver = { |
2704 | .name = MODULENAME, | 2807 | .name = MODULENAME, |
2705 | .id_table = rtl8169_pci_tbl, | 2808 | .id_table = rtl8169_pci_tbl, |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 49b597cbc19a..b7f00d6eb6a6 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -4092,6 +4092,7 @@ static void s2io_set_multicast(struct net_device *dev) | |||
4092 | i++, mclist = mclist->next) { | 4092 | i++, mclist = mclist->next) { |
4093 | memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr, | 4093 | memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr, |
4094 | ETH_ALEN); | 4094 | ETH_ALEN); |
4095 | mac_addr = 0; | ||
4095 | for (j = 0; j < ETH_ALEN; j++) { | 4096 | for (j = 0; j < ETH_ALEN; j++) { |
4096 | mac_addr |= mclist->dmi_addr[j]; | 4097 | mac_addr |= mclist->dmi_addr[j]; |
4097 | mac_addr <<= 8; | 4098 | mac_addr <<= 8; |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index b420182eec4b..ed4bc91638d2 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -1791,6 +1791,8 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, | |||
1791 | goto out; | 1791 | goto out; |
1792 | } | 1792 | } |
1793 | 1793 | ||
1794 | pci_set_drvdata(pdev, dev); | ||
1795 | |||
1794 | tp = netdev_priv(dev); | 1796 | tp = netdev_priv(dev); |
1795 | ioaddr = tp->mmio_addr; | 1797 | ioaddr = tp->mmio_addr; |
1796 | 1798 | ||
@@ -1827,8 +1829,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, | |||
1827 | if (rc < 0) | 1829 | if (rc < 0) |
1828 | goto err_remove_mii; | 1830 | goto err_remove_mii; |
1829 | 1831 | ||
1830 | pci_set_drvdata(pdev, dev); | ||
1831 | |||
1832 | net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), " | 1832 | net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), " |
1833 | "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", | 1833 | "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", |
1834 | pci_name(pdev), sis_chip_info[ent->driver_data].name, | 1834 | pci_name(pdev), sis_chip_info[ent->driver_data].name, |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 3d95fa20cd88..7a952fe60be2 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -540,7 +540,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, | |||
540 | printk("%2.2x.\n", net_dev->dev_addr[i]); | 540 | printk("%2.2x.\n", net_dev->dev_addr[i]); |
541 | 541 | ||
542 | /* Detect Wake on Lan support */ | 542 | /* Detect Wake on Lan support */ |
543 | ret = inl(CFGPMC & PMESP); | 543 | ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27; |
544 | if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0) | 544 | if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0) |
545 | printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name); | 545 | printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name); |
546 | 546 | ||
@@ -2040,7 +2040,7 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo | |||
2040 | 2040 | ||
2041 | if (wol->wolopts == 0) { | 2041 | if (wol->wolopts == 0) { |
2042 | pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); | 2042 | pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); |
2043 | cfgpmcsr |= ~PME_EN; | 2043 | cfgpmcsr &= ~PME_EN; |
2044 | pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr); | 2044 | pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr); |
2045 | outl(pmctrl_bits, pmctrl_addr); | 2045 | outl(pmctrl_bits, pmctrl_addr); |
2046 | if (netif_msg_wol(sis_priv)) | 2046 | if (netif_msg_wol(sis_priv)) |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index bf55a4cfb3d2..25e028b7ce48 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -879,13 +879,12 @@ static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) | |||
879 | int i; | 879 | int i; |
880 | 880 | ||
881 | xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); | 881 | xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); |
882 | xm_read16(hw, port, XM_PHY_DATA); | 882 | *val = xm_read16(hw, port, XM_PHY_DATA); |
883 | 883 | ||
884 | /* Need to wait for external PHY */ | ||
885 | for (i = 0; i < PHY_RETRIES; i++) { | 884 | for (i = 0; i < PHY_RETRIES; i++) { |
886 | udelay(1); | ||
887 | if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) | 885 | if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) |
888 | goto ready; | 886 | goto ready; |
887 | udelay(1); | ||
889 | } | 888 | } |
890 | 889 | ||
891 | return -ETIMEDOUT; | 890 | return -ETIMEDOUT; |
@@ -918,7 +917,12 @@ static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) | |||
918 | 917 | ||
919 | ready: | 918 | ready: |
920 | xm_write16(hw, port, XM_PHY_DATA, val); | 919 | xm_write16(hw, port, XM_PHY_DATA, val); |
921 | return 0; | 920 | for (i = 0; i < PHY_RETRIES; i++) { |
921 | if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) | ||
922 | return 0; | ||
923 | udelay(1); | ||
924 | } | ||
925 | return -ETIMEDOUT; | ||
922 | } | 926 | } |
923 | 927 | ||
924 | static void genesis_init(struct skge_hw *hw) | 928 | static void genesis_init(struct skge_hw *hw) |
@@ -1168,13 +1172,17 @@ static void genesis_mac_init(struct skge_hw *hw, int port) | |||
1168 | u32 r; | 1172 | u32 r; |
1169 | const u8 zero[6] = { 0 }; | 1173 | const u8 zero[6] = { 0 }; |
1170 | 1174 | ||
1171 | /* Clear MIB counters */ | 1175 | for (i = 0; i < 10; i++) { |
1172 | xm_write16(hw, port, XM_STAT_CMD, | 1176 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), |
1173 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); | 1177 | MFF_SET_MAC_RST); |
1174 | /* Clear two times according to Errata #3 */ | 1178 | if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) |
1175 | xm_write16(hw, port, XM_STAT_CMD, | 1179 | goto reset_ok; |
1176 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); | 1180 | udelay(1); |
1181 | } | ||
1177 | 1182 | ||
1183 | printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name); | ||
1184 | |||
1185 | reset_ok: | ||
1178 | /* Unreset the XMAC. */ | 1186 | /* Unreset the XMAC. */ |
1179 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); | 1187 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); |
1180 | 1188 | ||
@@ -1191,7 +1199,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port) | |||
1191 | r |= GP_DIR_2|GP_IO_2; | 1199 | r |= GP_DIR_2|GP_IO_2; |
1192 | 1200 | ||
1193 | skge_write32(hw, B2_GP_IO, r); | 1201 | skge_write32(hw, B2_GP_IO, r); |
1194 | skge_read32(hw, B2_GP_IO); | 1202 | |
1195 | 1203 | ||
1196 | /* Enable GMII interface */ | 1204 | /* Enable GMII interface */ |
1197 | xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); | 1205 | xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); |
@@ -1205,6 +1213,13 @@ static void genesis_mac_init(struct skge_hw *hw, int port) | |||
1205 | for (i = 1; i < 16; i++) | 1213 | for (i = 1; i < 16; i++) |
1206 | xm_outaddr(hw, port, XM_EXM(i), zero); | 1214 | xm_outaddr(hw, port, XM_EXM(i), zero); |
1207 | 1215 | ||
1216 | /* Clear MIB counters */ | ||
1217 | xm_write16(hw, port, XM_STAT_CMD, | ||
1218 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); | ||
1219 | /* Clear two times according to Errata #3 */ | ||
1220 | xm_write16(hw, port, XM_STAT_CMD, | ||
1221 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); | ||
1222 | |||
1208 | /* configure Rx High Water Mark (XM_RX_HI_WM) */ | 1223 | /* configure Rx High Water Mark (XM_RX_HI_WM) */ |
1209 | xm_write16(hw, port, XM_RX_HI_WM, 1450); | 1224 | xm_write16(hw, port, XM_RX_HI_WM, 1450); |
1210 | 1225 | ||
@@ -1697,6 +1712,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port) | |||
1697 | skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); | 1712 | skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); |
1698 | skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); | 1713 | skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); |
1699 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); | 1714 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); |
1715 | |||
1700 | if (skge->autoneg == AUTONEG_DISABLE) { | 1716 | if (skge->autoneg == AUTONEG_DISABLE) { |
1701 | reg = GM_GPCR_AU_ALL_DIS; | 1717 | reg = GM_GPCR_AU_ALL_DIS; |
1702 | gma_write16(hw, port, GM_GP_CTRL, | 1718 | gma_write16(hw, port, GM_GP_CTRL, |
@@ -1704,16 +1720,23 @@ static void yukon_mac_init(struct skge_hw *hw, int port) | |||
1704 | 1720 | ||
1705 | switch (skge->speed) { | 1721 | switch (skge->speed) { |
1706 | case SPEED_1000: | 1722 | case SPEED_1000: |
1723 | reg &= ~GM_GPCR_SPEED_100; | ||
1707 | reg |= GM_GPCR_SPEED_1000; | 1724 | reg |= GM_GPCR_SPEED_1000; |
1708 | /* fallthru */ | 1725 | break; |
1709 | case SPEED_100: | 1726 | case SPEED_100: |
1727 | reg &= ~GM_GPCR_SPEED_1000; | ||
1710 | reg |= GM_GPCR_SPEED_100; | 1728 | reg |= GM_GPCR_SPEED_100; |
1729 | break; | ||
1730 | case SPEED_10: | ||
1731 | reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); | ||
1732 | break; | ||
1711 | } | 1733 | } |
1712 | 1734 | ||
1713 | if (skge->duplex == DUPLEX_FULL) | 1735 | if (skge->duplex == DUPLEX_FULL) |
1714 | reg |= GM_GPCR_DUP_FULL; | 1736 | reg |= GM_GPCR_DUP_FULL; |
1715 | } else | 1737 | } else |
1716 | reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; | 1738 | reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; |
1739 | |||
1717 | switch (skge->flow_control) { | 1740 | switch (skge->flow_control) { |
1718 | case FLOW_MODE_NONE: | 1741 | case FLOW_MODE_NONE: |
1719 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); | 1742 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); |
@@ -2162,8 +2185,10 @@ static int skge_up(struct net_device *dev) | |||
2162 | skge->tx_avail = skge->tx_ring.count - 1; | 2185 | skge->tx_avail = skge->tx_ring.count - 1; |
2163 | 2186 | ||
2164 | /* Enable IRQ from port */ | 2187 | /* Enable IRQ from port */ |
2188 | spin_lock_irq(&hw->hw_lock); | ||
2165 | hw->intr_mask |= portirqmask[port]; | 2189 | hw->intr_mask |= portirqmask[port]; |
2166 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2190 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2191 | spin_unlock_irq(&hw->hw_lock); | ||
2167 | 2192 | ||
2168 | /* Initialize MAC */ | 2193 | /* Initialize MAC */ |
2169 | spin_lock_bh(&hw->phy_lock); | 2194 | spin_lock_bh(&hw->phy_lock); |
@@ -2221,8 +2246,10 @@ static int skge_down(struct net_device *dev) | |||
2221 | else | 2246 | else |
2222 | yukon_stop(skge); | 2247 | yukon_stop(skge); |
2223 | 2248 | ||
2249 | spin_lock_irq(&hw->hw_lock); | ||
2224 | hw->intr_mask &= ~portirqmask[skge->port]; | 2250 | hw->intr_mask &= ~portirqmask[skge->port]; |
2225 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2251 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2252 | spin_unlock_irq(&hw->hw_lock); | ||
2226 | 2253 | ||
2227 | /* Stop transmitter */ | 2254 | /* Stop transmitter */ |
2228 | skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); | 2255 | skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); |
@@ -2670,8 +2697,7 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2670 | 2697 | ||
2671 | /* restart receiver */ | 2698 | /* restart receiver */ |
2672 | wmb(); | 2699 | wmb(); |
2673 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), | 2700 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); |
2674 | CSR_START | CSR_IRQ_CL_F); | ||
2675 | 2701 | ||
2676 | *budget -= work_done; | 2702 | *budget -= work_done; |
2677 | dev->quota -= work_done; | 2703 | dev->quota -= work_done; |
@@ -2679,10 +2705,11 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2679 | if (work_done >= to_do) | 2705 | if (work_done >= to_do) |
2680 | return 1; /* not done */ | 2706 | return 1; /* not done */ |
2681 | 2707 | ||
2682 | netif_rx_complete(dev); | 2708 | spin_lock_irq(&hw->hw_lock); |
2683 | hw->intr_mask |= portirqmask[skge->port]; | 2709 | __netif_rx_complete(dev); |
2684 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2710 | hw->intr_mask |= portirqmask[skge->port]; |
2685 | skge_read32(hw, B0_IMSK); | 2711 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2712 | spin_unlock_irq(&hw->hw_lock); | ||
2686 | 2713 | ||
2687 | return 0; | 2714 | return 0; |
2688 | } | 2715 | } |
@@ -2842,18 +2869,10 @@ static void skge_extirq(unsigned long data) | |||
2842 | } | 2869 | } |
2843 | spin_unlock(&hw->phy_lock); | 2870 | spin_unlock(&hw->phy_lock); |
2844 | 2871 | ||
2845 | local_irq_disable(); | 2872 | spin_lock_irq(&hw->hw_lock); |
2846 | hw->intr_mask |= IS_EXT_REG; | 2873 | hw->intr_mask |= IS_EXT_REG; |
2847 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2874 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2848 | local_irq_enable(); | 2875 | spin_unlock_irq(&hw->hw_lock); |
2849 | } | ||
2850 | |||
2851 | static inline void skge_wakeup(struct net_device *dev) | ||
2852 | { | ||
2853 | struct skge_port *skge = netdev_priv(dev); | ||
2854 | |||
2855 | prefetch(skge->rx_ring.to_clean); | ||
2856 | netif_rx_schedule(dev); | ||
2857 | } | 2876 | } |
2858 | 2877 | ||
2859 | static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | 2878 | static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) |
@@ -2864,15 +2883,17 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2864 | if (status == 0 || status == ~0) /* hotplug or shared irq */ | 2883 | if (status == 0 || status == ~0) /* hotplug or shared irq */ |
2865 | return IRQ_NONE; | 2884 | return IRQ_NONE; |
2866 | 2885 | ||
2867 | status &= hw->intr_mask; | 2886 | spin_lock(&hw->hw_lock); |
2868 | if (status & IS_R1_F) { | 2887 | if (status & IS_R1_F) { |
2888 | skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); | ||
2869 | hw->intr_mask &= ~IS_R1_F; | 2889 | hw->intr_mask &= ~IS_R1_F; |
2870 | skge_wakeup(hw->dev[0]); | 2890 | netif_rx_schedule(hw->dev[0]); |
2871 | } | 2891 | } |
2872 | 2892 | ||
2873 | if (status & IS_R2_F) { | 2893 | if (status & IS_R2_F) { |
2894 | skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); | ||
2874 | hw->intr_mask &= ~IS_R2_F; | 2895 | hw->intr_mask &= ~IS_R2_F; |
2875 | skge_wakeup(hw->dev[1]); | 2896 | netif_rx_schedule(hw->dev[1]); |
2876 | } | 2897 | } |
2877 | 2898 | ||
2878 | if (status & IS_XA1_F) | 2899 | if (status & IS_XA1_F) |
@@ -2914,6 +2935,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2914 | } | 2935 | } |
2915 | 2936 | ||
2916 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2937 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2938 | spin_unlock(&hw->hw_lock); | ||
2917 | 2939 | ||
2918 | return IRQ_HANDLED; | 2940 | return IRQ_HANDLED; |
2919 | } | 2941 | } |
@@ -3282,6 +3304,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3282 | 3304 | ||
3283 | hw->pdev = pdev; | 3305 | hw->pdev = pdev; |
3284 | spin_lock_init(&hw->phy_lock); | 3306 | spin_lock_init(&hw->phy_lock); |
3307 | spin_lock_init(&hw->hw_lock); | ||
3285 | tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); | 3308 | tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); |
3286 | 3309 | ||
3287 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); | 3310 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); |
diff --git a/drivers/net/skge.h b/drivers/net/skge.h index 2efdacc290e5..941f12a333b6 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h | |||
@@ -2402,6 +2402,7 @@ struct skge_hw { | |||
2402 | 2402 | ||
2403 | struct tasklet_struct ext_tasklet; | 2403 | struct tasklet_struct ext_tasklet; |
2404 | spinlock_t phy_lock; | 2404 | spinlock_t phy_lock; |
2405 | spinlock_t hw_lock; | ||
2405 | }; | 2406 | }; |
2406 | 2407 | ||
2407 | enum { | 2408 | enum { |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index cae2edf23004..73260364cba3 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -74,7 +74,7 @@ | |||
74 | #define TX_RING_SIZE 512 | 74 | #define TX_RING_SIZE 512 |
75 | #define TX_DEF_PENDING (TX_RING_SIZE - 1) | 75 | #define TX_DEF_PENDING (TX_RING_SIZE - 1) |
76 | #define TX_MIN_PENDING 64 | 76 | #define TX_MIN_PENDING 64 |
77 | #define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS) | 77 | #define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS) |
78 | 78 | ||
79 | #define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */ | 79 | #define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */ |
80 | #define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le)) | 80 | #define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le)) |
@@ -96,10 +96,6 @@ static int copybreak __read_mostly = 256; | |||
96 | module_param(copybreak, int, 0); | 96 | module_param(copybreak, int, 0); |
97 | MODULE_PARM_DESC(copybreak, "Receive copy threshold"); | 97 | MODULE_PARM_DESC(copybreak, "Receive copy threshold"); |
98 | 98 | ||
99 | static int disable_msi = 0; | ||
100 | module_param(disable_msi, int, 0); | ||
101 | MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); | ||
102 | |||
103 | static const struct pci_device_id sky2_id_table[] = { | 99 | static const struct pci_device_id sky2_id_table[] = { |
104 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, | 100 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, |
105 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, | 101 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, |
@@ -195,11 +191,11 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
195 | pr_debug("sky2_set_power_state %d\n", state); | 191 | pr_debug("sky2_set_power_state %d\n", state); |
196 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | 192 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
197 | 193 | ||
198 | pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control); | 194 | power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC); |
199 | vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) && | 195 | vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) && |
200 | (power_control & PCI_PM_CAP_PME_D3cold); | 196 | (power_control & PCI_PM_CAP_PME_D3cold); |
201 | 197 | ||
202 | pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control); | 198 | power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL); |
203 | 199 | ||
204 | power_control |= PCI_PM_CTRL_PME_STATUS; | 200 | power_control |= PCI_PM_CTRL_PME_STATUS; |
205 | power_control &= ~(PCI_PM_CTRL_STATE_MASK); | 201 | power_control &= ~(PCI_PM_CTRL_STATE_MASK); |
@@ -223,7 +219,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
223 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); | 219 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); |
224 | 220 | ||
225 | /* Turn off phy power saving */ | 221 | /* Turn off phy power saving */ |
226 | pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1); | 222 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); |
227 | reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); | 223 | reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); |
228 | 224 | ||
229 | /* looks like this XL is back asswards .. */ | 225 | /* looks like this XL is back asswards .. */ |
@@ -232,18 +228,28 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
232 | if (hw->ports > 1) | 228 | if (hw->ports > 1) |
233 | reg1 |= PCI_Y2_PHY2_COMA; | 229 | reg1 |= PCI_Y2_PHY2_COMA; |
234 | } | 230 | } |
235 | pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); | 231 | |
232 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { | ||
233 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); | ||
234 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); | ||
235 | reg1 &= P_ASPM_CONTROL_MSK; | ||
236 | sky2_pci_write32(hw, PCI_DEV_REG4, reg1); | ||
237 | sky2_pci_write32(hw, PCI_DEV_REG5, 0); | ||
238 | } | ||
239 | |||
240 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | ||
241 | |||
236 | break; | 242 | break; |
237 | 243 | ||
238 | case PCI_D3hot: | 244 | case PCI_D3hot: |
239 | case PCI_D3cold: | 245 | case PCI_D3cold: |
240 | /* Turn on phy power saving */ | 246 | /* Turn on phy power saving */ |
241 | pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1); | 247 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); |
242 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) | 248 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) |
243 | reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); | 249 | reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); |
244 | else | 250 | else |
245 | reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); | 251 | reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); |
246 | pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); | 252 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
247 | 253 | ||
248 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) | 254 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) |
249 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); | 255 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); |
@@ -265,7 +271,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
265 | ret = -1; | 271 | ret = -1; |
266 | } | 272 | } |
267 | 273 | ||
268 | pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control); | 274 | sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control); |
269 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | 275 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
270 | return ret; | 276 | return ret; |
271 | } | 277 | } |
@@ -463,16 +469,31 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
463 | ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); | 469 | ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); |
464 | } | 470 | } |
465 | 471 | ||
466 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); | 472 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) { |
473 | /* apply fixes in PHY AFE */ | ||
474 | gm_phy_write(hw, port, 22, 255); | ||
475 | /* increase differential signal amplitude in 10BASE-T */ | ||
476 | gm_phy_write(hw, port, 24, 0xaa99); | ||
477 | gm_phy_write(hw, port, 23, 0x2011); | ||
467 | 478 | ||
468 | if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { | 479 | /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ |
469 | /* turn on 100 Mbps LED (LED_LINK100) */ | 480 | gm_phy_write(hw, port, 24, 0xa204); |
470 | ledover |= PHY_M_LED_MO_100(MO_LED_ON); | 481 | gm_phy_write(hw, port, 23, 0x2002); |
471 | } | ||
472 | 482 | ||
473 | if (ledover) | 483 | /* set page register to 0 */ |
474 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); | 484 | gm_phy_write(hw, port, 22, 0); |
485 | } else { | ||
486 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); | ||
487 | |||
488 | if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { | ||
489 | /* turn on 100 Mbps LED (LED_LINK100) */ | ||
490 | ledover |= PHY_M_LED_MO_100(MO_LED_ON); | ||
491 | } | ||
492 | |||
493 | if (ledover) | ||
494 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); | ||
475 | 495 | ||
496 | } | ||
476 | /* Enable phy interrupt on auto-negotiation complete (or link up) */ | 497 | /* Enable phy interrupt on auto-negotiation complete (or link up) */ |
477 | if (sky2->autoneg == AUTONEG_ENABLE) | 498 | if (sky2->autoneg == AUTONEG_ENABLE) |
478 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); | 499 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); |
@@ -520,10 +541,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | |||
520 | 541 | ||
521 | switch (sky2->speed) { | 542 | switch (sky2->speed) { |
522 | case SPEED_1000: | 543 | case SPEED_1000: |
544 | reg &= ~GM_GPCR_SPEED_100; | ||
523 | reg |= GM_GPCR_SPEED_1000; | 545 | reg |= GM_GPCR_SPEED_1000; |
524 | /* fallthru */ | 546 | break; |
525 | case SPEED_100: | 547 | case SPEED_100: |
548 | reg &= ~GM_GPCR_SPEED_1000; | ||
526 | reg |= GM_GPCR_SPEED_100; | 549 | reg |= GM_GPCR_SPEED_100; |
550 | break; | ||
551 | case SPEED_10: | ||
552 | reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); | ||
553 | break; | ||
527 | } | 554 | } |
528 | 555 | ||
529 | if (sky2->duplex == DUPLEX_FULL) | 556 | if (sky2->duplex == DUPLEX_FULL) |
@@ -595,8 +622,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | |||
595 | 622 | ||
596 | /* Configure Rx MAC FIFO */ | 623 | /* Configure Rx MAC FIFO */ |
597 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); | 624 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); |
598 | sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T), | 625 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), |
599 | GMF_RX_CTRL_DEF); | 626 | GMF_OPER_ON | GMF_RX_F_FL_ON); |
600 | 627 | ||
601 | /* Flush Rx MAC FIFO on any flow control or error */ | 628 | /* Flush Rx MAC FIFO on any flow control or error */ |
602 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); | 629 | sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); |
@@ -947,6 +974,12 @@ static int sky2_rx_start(struct sky2_port *sky2) | |||
947 | 974 | ||
948 | sky2->rx_put = sky2->rx_next = 0; | 975 | sky2->rx_put = sky2->rx_next = 0; |
949 | sky2_qset(hw, rxq); | 976 | sky2_qset(hw, rxq); |
977 | |||
978 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) { | ||
979 | /* MAC Rx RAM Read is controlled by hardware */ | ||
980 | sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS); | ||
981 | } | ||
982 | |||
950 | sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); | 983 | sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); |
951 | 984 | ||
952 | rx_set_checksum(sky2); | 985 | rx_set_checksum(sky2); |
@@ -962,6 +995,10 @@ static int sky2_rx_start(struct sky2_port *sky2) | |||
962 | sky2_rx_add(sky2, re->mapaddr); | 995 | sky2_rx_add(sky2, re->mapaddr); |
963 | } | 996 | } |
964 | 997 | ||
998 | /* Truncate oversize frames */ | ||
999 | sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), sky2->rx_bufsize - 8); | ||
1000 | sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); | ||
1001 | |||
965 | /* Tell chip about available buffers */ | 1002 | /* Tell chip about available buffers */ |
966 | sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); | 1003 | sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); |
967 | sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX)); | 1004 | sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX)); |
@@ -1029,9 +1066,10 @@ static int sky2_up(struct net_device *dev) | |||
1029 | RB_RST_SET); | 1066 | RB_RST_SET); |
1030 | 1067 | ||
1031 | sky2_qset(hw, txqaddr[port]); | 1068 | sky2_qset(hw, txqaddr[port]); |
1032 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) | ||
1033 | sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0); | ||
1034 | 1069 | ||
1070 | /* Set almost empty threshold */ | ||
1071 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == 1) | ||
1072 | sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0); | ||
1035 | 1073 | ||
1036 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, | 1074 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, |
1037 | TX_RING_SIZE - 1); | 1075 | TX_RING_SIZE - 1); |
@@ -1041,8 +1079,10 @@ static int sky2_up(struct net_device *dev) | |||
1041 | goto err_out; | 1079 | goto err_out; |
1042 | 1080 | ||
1043 | /* Enable interrupts from phy/mac for port */ | 1081 | /* Enable interrupts from phy/mac for port */ |
1082 | spin_lock_irq(&hw->hw_lock); | ||
1044 | hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; | 1083 | hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; |
1045 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | 1084 | sky2_write32(hw, B0_IMSK, hw->intr_mask); |
1085 | spin_unlock_irq(&hw->hw_lock); | ||
1046 | return 0; | 1086 | return 0; |
1047 | 1087 | ||
1048 | err_out: | 1088 | err_out: |
@@ -1109,6 +1149,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1109 | struct sky2_tx_le *le = NULL; | 1149 | struct sky2_tx_le *le = NULL; |
1110 | struct tx_ring_info *re; | 1150 | struct tx_ring_info *re; |
1111 | unsigned i, len; | 1151 | unsigned i, len; |
1152 | int avail; | ||
1112 | dma_addr_t mapping; | 1153 | dma_addr_t mapping; |
1113 | u32 addr64; | 1154 | u32 addr64; |
1114 | u16 mss; | 1155 | u16 mss; |
@@ -1251,12 +1292,16 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1251 | re->idx = sky2->tx_prod; | 1292 | re->idx = sky2->tx_prod; |
1252 | le->ctrl |= EOP; | 1293 | le->ctrl |= EOP; |
1253 | 1294 | ||
1295 | avail = tx_avail(sky2); | ||
1296 | if (mss != 0 || avail < TX_MIN_PENDING) { | ||
1297 | le->ctrl |= FRC_STAT; | ||
1298 | if (avail <= MAX_SKB_TX_LE) | ||
1299 | netif_stop_queue(dev); | ||
1300 | } | ||
1301 | |||
1254 | sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod, | 1302 | sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod, |
1255 | &sky2->tx_last_put, TX_RING_SIZE); | 1303 | &sky2->tx_last_put, TX_RING_SIZE); |
1256 | 1304 | ||
1257 | if (tx_avail(sky2) <= MAX_SKB_TX_LE) | ||
1258 | netif_stop_queue(dev); | ||
1259 | |||
1260 | out_unlock: | 1305 | out_unlock: |
1261 | spin_unlock(&sky2->tx_lock); | 1306 | spin_unlock(&sky2->tx_lock); |
1262 | 1307 | ||
@@ -1342,10 +1387,10 @@ static int sky2_down(struct net_device *dev) | |||
1342 | netif_stop_queue(dev); | 1387 | netif_stop_queue(dev); |
1343 | 1388 | ||
1344 | /* Disable port IRQ */ | 1389 | /* Disable port IRQ */ |
1345 | local_irq_disable(); | 1390 | spin_lock_irq(&hw->hw_lock); |
1346 | hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); | 1391 | hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); |
1347 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | 1392 | sky2_write32(hw, B0_IMSK, hw->intr_mask); |
1348 | local_irq_enable(); | 1393 | spin_unlock_irq(&hw->hw_lock); |
1349 | 1394 | ||
1350 | flush_scheduled_work(); | 1395 | flush_scheduled_work(); |
1351 | 1396 | ||
@@ -1446,6 +1491,29 @@ static void sky2_link_up(struct sky2_port *sky2) | |||
1446 | sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); | 1491 | sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); |
1447 | 1492 | ||
1448 | reg = gma_read16(hw, port, GM_GP_CTRL); | 1493 | reg = gma_read16(hw, port, GM_GP_CTRL); |
1494 | if (sky2->autoneg == AUTONEG_DISABLE) { | ||
1495 | reg |= GM_GPCR_AU_ALL_DIS; | ||
1496 | |||
1497 | /* Is write/read necessary? Copied from sky2_mac_init */ | ||
1498 | gma_write16(hw, port, GM_GP_CTRL, reg); | ||
1499 | gma_read16(hw, port, GM_GP_CTRL); | ||
1500 | |||
1501 | switch (sky2->speed) { | ||
1502 | case SPEED_1000: | ||
1503 | reg &= ~GM_GPCR_SPEED_100; | ||
1504 | reg |= GM_GPCR_SPEED_1000; | ||
1505 | break; | ||
1506 | case SPEED_100: | ||
1507 | reg &= ~GM_GPCR_SPEED_1000; | ||
1508 | reg |= GM_GPCR_SPEED_100; | ||
1509 | break; | ||
1510 | case SPEED_10: | ||
1511 | reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); | ||
1512 | break; | ||
1513 | } | ||
1514 | } else | ||
1515 | reg &= ~GM_GPCR_AU_ALL_DIS; | ||
1516 | |||
1449 | if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE) | 1517 | if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE) |
1450 | reg |= GM_GPCR_DUP_FULL; | 1518 | reg |= GM_GPCR_DUP_FULL; |
1451 | 1519 | ||
@@ -1604,10 +1672,10 @@ static void sky2_phy_task(void *arg) | |||
1604 | out: | 1672 | out: |
1605 | up(&sky2->phy_sema); | 1673 | up(&sky2->phy_sema); |
1606 | 1674 | ||
1607 | local_irq_disable(); | 1675 | spin_lock_irq(&hw->hw_lock); |
1608 | hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2; | 1676 | hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2; |
1609 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | 1677 | sky2_write32(hw, B0_IMSK, hw->intr_mask); |
1610 | local_irq_enable(); | 1678 | spin_unlock_irq(&hw->hw_lock); |
1611 | } | 1679 | } |
1612 | 1680 | ||
1613 | 1681 | ||
@@ -1648,10 +1716,12 @@ static void sky2_tx_timeout(struct net_device *dev) | |||
1648 | 1716 | ||
1649 | 1717 | ||
1650 | #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) | 1718 | #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) |
1651 | /* Want receive buffer size to be multiple of 64 bits, and incl room for vlan */ | 1719 | /* Want receive buffer size to be multiple of 64 bits |
1720 | * and incl room for vlan and truncation | ||
1721 | */ | ||
1652 | static inline unsigned sky2_buf_size(int mtu) | 1722 | static inline unsigned sky2_buf_size(int mtu) |
1653 | { | 1723 | { |
1654 | return roundup(mtu + ETH_HLEN + 4, 8); | 1724 | return roundup(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8; |
1655 | } | 1725 | } |
1656 | 1726 | ||
1657 | static int sky2_change_mtu(struct net_device *dev, int new_mtu) | 1727 | static int sky2_change_mtu(struct net_device *dev, int new_mtu) |
@@ -1734,7 +1804,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2, | |||
1734 | if (!(status & GMR_FS_RX_OK)) | 1804 | if (!(status & GMR_FS_RX_OK)) |
1735 | goto resubmit; | 1805 | goto resubmit; |
1736 | 1806 | ||
1737 | if ((status >> 16) != length || length > sky2->rx_bufsize) | 1807 | if (length > sky2->netdev->mtu + ETH_HLEN) |
1738 | goto oversize; | 1808 | goto oversize; |
1739 | 1809 | ||
1740 | if (length < copybreak) { | 1810 | if (length < copybreak) { |
@@ -1834,6 +1904,17 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
1834 | 1904 | ||
1835 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); | 1905 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); |
1836 | 1906 | ||
1907 | /* | ||
1908 | * Kick the STAT_LEV_TIMER_CTRL timer. | ||
1909 | * This fixes my hangs on Yukon-EC (0xb6) rev 1. | ||
1910 | * The if clause is there to start the timer only if it has been | ||
1911 | * configured correctly and not been disabled via ethtool. | ||
1912 | */ | ||
1913 | if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) { | ||
1914 | sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); | ||
1915 | sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); | ||
1916 | } | ||
1917 | |||
1837 | hwidx = sky2_read16(hw, STAT_PUT_IDX); | 1918 | hwidx = sky2_read16(hw, STAT_PUT_IDX); |
1838 | BUG_ON(hwidx >= STATUS_RING_SIZE); | 1919 | BUG_ON(hwidx >= STATUS_RING_SIZE); |
1839 | rmb(); | 1920 | rmb(); |
@@ -1916,16 +1997,19 @@ exit_loop: | |||
1916 | sky2_tx_check(hw, 0, tx_done[0]); | 1997 | sky2_tx_check(hw, 0, tx_done[0]); |
1917 | sky2_tx_check(hw, 1, tx_done[1]); | 1998 | sky2_tx_check(hw, 1, tx_done[1]); |
1918 | 1999 | ||
2000 | if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) { | ||
2001 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); | ||
2002 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); | ||
2003 | } | ||
2004 | |||
1919 | if (likely(work_done < to_do)) { | 2005 | if (likely(work_done < to_do)) { |
1920 | /* need to restart TX timer */ | 2006 | spin_lock_irq(&hw->hw_lock); |
1921 | if (is_ec_a1(hw)) { | 2007 | __netif_rx_complete(dev0); |
1922 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); | ||
1923 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); | ||
1924 | } | ||
1925 | 2008 | ||
1926 | netif_rx_complete(dev0); | ||
1927 | hw->intr_mask |= Y2_IS_STAT_BMU; | 2009 | hw->intr_mask |= Y2_IS_STAT_BMU; |
1928 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | 2010 | sky2_write32(hw, B0_IMSK, hw->intr_mask); |
2011 | spin_unlock_irq(&hw->hw_lock); | ||
2012 | |||
1929 | return 0; | 2013 | return 0; |
1930 | } else { | 2014 | } else { |
1931 | *budget -= work_done; | 2015 | *budget -= work_done; |
@@ -1988,13 +2072,13 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
1988 | if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { | 2072 | if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { |
1989 | u16 pci_err; | 2073 | u16 pci_err; |
1990 | 2074 | ||
1991 | pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err); | 2075 | pci_err = sky2_pci_read16(hw, PCI_STATUS); |
1992 | if (net_ratelimit()) | 2076 | if (net_ratelimit()) |
1993 | printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", | 2077 | printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", |
1994 | pci_name(hw->pdev), pci_err); | 2078 | pci_name(hw->pdev), pci_err); |
1995 | 2079 | ||
1996 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | 2080 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
1997 | pci_write_config_word(hw->pdev, PCI_STATUS, | 2081 | sky2_pci_write16(hw, PCI_STATUS, |
1998 | pci_err | PCI_STATUS_ERROR_BITS); | 2082 | pci_err | PCI_STATUS_ERROR_BITS); |
1999 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | 2083 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
2000 | } | 2084 | } |
@@ -2003,7 +2087,7 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
2003 | /* PCI-Express uncorrectable Error occurred */ | 2087 | /* PCI-Express uncorrectable Error occurred */ |
2004 | u32 pex_err; | 2088 | u32 pex_err; |
2005 | 2089 | ||
2006 | pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err); | 2090 | pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT); |
2007 | 2091 | ||
2008 | if (net_ratelimit()) | 2092 | if (net_ratelimit()) |
2009 | printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", | 2093 | printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", |
@@ -2011,7 +2095,7 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
2011 | 2095 | ||
2012 | /* clear the interrupt */ | 2096 | /* clear the interrupt */ |
2013 | sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | 2097 | sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
2014 | pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT, | 2098 | sky2_pci_write32(hw, PEX_UNC_ERR_STAT, |
2015 | 0xffffffffUL); | 2099 | 0xffffffffUL); |
2016 | sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | 2100 | sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
2017 | 2101 | ||
@@ -2057,6 +2141,7 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) | |||
2057 | 2141 | ||
2058 | hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); | 2142 | hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); |
2059 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | 2143 | sky2_write32(hw, B0_IMSK, hw->intr_mask); |
2144 | |||
2060 | schedule_work(&sky2->phy_task); | 2145 | schedule_work(&sky2->phy_task); |
2061 | } | 2146 | } |
2062 | 2147 | ||
@@ -2070,6 +2155,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2070 | if (status == 0 || status == ~0) | 2155 | if (status == 0 || status == ~0) |
2071 | return IRQ_NONE; | 2156 | return IRQ_NONE; |
2072 | 2157 | ||
2158 | spin_lock(&hw->hw_lock); | ||
2073 | if (status & Y2_IS_HW_ERR) | 2159 | if (status & Y2_IS_HW_ERR) |
2074 | sky2_hw_intr(hw); | 2160 | sky2_hw_intr(hw); |
2075 | 2161 | ||
@@ -2098,7 +2184,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2098 | 2184 | ||
2099 | sky2_write32(hw, B0_Y2_SP_ICR, 2); | 2185 | sky2_write32(hw, B0_Y2_SP_ICR, 2); |
2100 | 2186 | ||
2101 | sky2_read32(hw, B0_IMSK); | 2187 | spin_unlock(&hw->hw_lock); |
2102 | 2188 | ||
2103 | return IRQ_HANDLED; | 2189 | return IRQ_HANDLED; |
2104 | } | 2190 | } |
@@ -2141,7 +2227,7 @@ static int sky2_reset(struct sky2_hw *hw) | |||
2141 | { | 2227 | { |
2142 | u16 status; | 2228 | u16 status; |
2143 | u8 t8, pmd_type; | 2229 | u8 t8, pmd_type; |
2144 | int i, err; | 2230 | int i; |
2145 | 2231 | ||
2146 | sky2_write8(hw, B0_CTST, CS_RST_CLR); | 2232 | sky2_write8(hw, B0_CTST, CS_RST_CLR); |
2147 | 2233 | ||
@@ -2163,25 +2249,18 @@ static int sky2_reset(struct sky2_hw *hw) | |||
2163 | sky2_write8(hw, B0_CTST, CS_RST_CLR); | 2249 | sky2_write8(hw, B0_CTST, CS_RST_CLR); |
2164 | 2250 | ||
2165 | /* clear PCI errors, if any */ | 2251 | /* clear PCI errors, if any */ |
2166 | err = pci_read_config_word(hw->pdev, PCI_STATUS, &status); | 2252 | status = sky2_pci_read16(hw, PCI_STATUS); |
2167 | if (err) | ||
2168 | goto pci_err; | ||
2169 | 2253 | ||
2170 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | 2254 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
2171 | err = pci_write_config_word(hw->pdev, PCI_STATUS, | 2255 | sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS); |
2172 | status | PCI_STATUS_ERROR_BITS); | 2256 | |
2173 | if (err) | ||
2174 | goto pci_err; | ||
2175 | 2257 | ||
2176 | sky2_write8(hw, B0_CTST, CS_MRST_CLR); | 2258 | sky2_write8(hw, B0_CTST, CS_MRST_CLR); |
2177 | 2259 | ||
2178 | /* clear any PEX errors */ | 2260 | /* clear any PEX errors */ |
2179 | if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) { | 2261 | if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) |
2180 | err = pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT, | 2262 | sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); |
2181 | 0xffffffffUL); | 2263 | |
2182 | if (err) | ||
2183 | goto pci_err; | ||
2184 | } | ||
2185 | 2264 | ||
2186 | pmd_type = sky2_read8(hw, B2_PMD_TYP); | 2265 | pmd_type = sky2_read8(hw, B2_PMD_TYP); |
2187 | hw->copper = !(pmd_type == 'L' || pmd_type == 'S'); | 2266 | hw->copper = !(pmd_type == 'L' || pmd_type == 'S'); |
@@ -2280,8 +2359,7 @@ static int sky2_reset(struct sky2_hw *hw) | |||
2280 | sky2_write8(hw, STAT_FIFO_ISR_WM, 16); | 2359 | sky2_write8(hw, STAT_FIFO_ISR_WM, 16); |
2281 | 2360 | ||
2282 | sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); | 2361 | sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); |
2283 | sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); | 2362 | sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7)); |
2284 | sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); | ||
2285 | } | 2363 | } |
2286 | 2364 | ||
2287 | /* enable status unit */ | 2365 | /* enable status unit */ |
@@ -2292,14 +2370,6 @@ static int sky2_reset(struct sky2_hw *hw) | |||
2292 | sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); | 2370 | sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); |
2293 | 2371 | ||
2294 | return 0; | 2372 | return 0; |
2295 | |||
2296 | pci_err: | ||
2297 | /* This is to catch a BIOS bug workaround where | ||
2298 | * mmconfig table doesn't have other buses. | ||
2299 | */ | ||
2300 | printk(KERN_ERR PFX "%s: can't access PCI config space\n", | ||
2301 | pci_name(hw->pdev)); | ||
2302 | return err; | ||
2303 | } | 2373 | } |
2304 | 2374 | ||
2305 | static u32 sky2_supported_modes(const struct sky2_hw *hw) | 2375 | static u32 sky2_supported_modes(const struct sky2_hw *hw) |
@@ -2823,11 +2893,11 @@ static int sky2_set_coalesce(struct net_device *dev, | |||
2823 | (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax)) | 2893 | (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax)) |
2824 | return -EINVAL; | 2894 | return -EINVAL; |
2825 | 2895 | ||
2826 | if (ecmd->tx_max_coalesced_frames > 0xffff) | 2896 | if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1) |
2827 | return -EINVAL; | 2897 | return -EINVAL; |
2828 | if (ecmd->rx_max_coalesced_frames > 0xff) | 2898 | if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) |
2829 | return -EINVAL; | 2899 | return -EINVAL; |
2830 | if (ecmd->rx_max_coalesced_frames_irq > 0xff) | 2900 | if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING) |
2831 | return -EINVAL; | 2901 | return -EINVAL; |
2832 | 2902 | ||
2833 | if (ecmd->tx_coalesce_usecs == 0) | 2903 | if (ecmd->tx_coalesce_usecs == 0) |
@@ -3063,61 +3133,6 @@ static void __devinit sky2_show_addr(struct net_device *dev) | |||
3063 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | 3133 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); |
3064 | } | 3134 | } |
3065 | 3135 | ||
3066 | /* Handle software interrupt used during MSI test */ | ||
3067 | static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id, | ||
3068 | struct pt_regs *regs) | ||
3069 | { | ||
3070 | struct sky2_hw *hw = dev_id; | ||
3071 | u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); | ||
3072 | |||
3073 | if (status == 0) | ||
3074 | return IRQ_NONE; | ||
3075 | |||
3076 | if (status & Y2_IS_IRQ_SW) { | ||
3077 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); | ||
3078 | hw->msi = 1; | ||
3079 | } | ||
3080 | sky2_write32(hw, B0_Y2_SP_ICR, 2); | ||
3081 | |||
3082 | sky2_read32(hw, B0_IMSK); | ||
3083 | return IRQ_HANDLED; | ||
3084 | } | ||
3085 | |||
3086 | /* Test interrupt path by forcing a a software IRQ */ | ||
3087 | static int __devinit sky2_test_msi(struct sky2_hw *hw) | ||
3088 | { | ||
3089 | struct pci_dev *pdev = hw->pdev; | ||
3090 | int i, err; | ||
3091 | |||
3092 | sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); | ||
3093 | |||
3094 | err = request_irq(pdev->irq, sky2_test_intr, SA_SHIRQ, DRV_NAME, hw); | ||
3095 | if (err) { | ||
3096 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", | ||
3097 | pci_name(pdev), pdev->irq); | ||
3098 | return err; | ||
3099 | } | ||
3100 | |||
3101 | sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); | ||
3102 | wmb(); | ||
3103 | |||
3104 | for (i = 0; i < 10; i++) { | ||
3105 | barrier(); | ||
3106 | if (hw->msi) | ||
3107 | goto found; | ||
3108 | mdelay(1); | ||
3109 | } | ||
3110 | |||
3111 | err = -EOPNOTSUPP; | ||
3112 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); | ||
3113 | found: | ||
3114 | sky2_write32(hw, B0_IMSK, 0); | ||
3115 | |||
3116 | free_irq(pdev->irq, hw); | ||
3117 | |||
3118 | return err; | ||
3119 | } | ||
3120 | |||
3121 | static int __devinit sky2_probe(struct pci_dev *pdev, | 3136 | static int __devinit sky2_probe(struct pci_dev *pdev, |
3122 | const struct pci_device_id *ent) | 3137 | const struct pci_device_id *ent) |
3123 | { | 3138 | { |
@@ -3169,17 +3184,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
3169 | } | 3184 | } |
3170 | } | 3185 | } |
3171 | 3186 | ||
3172 | #ifdef __BIG_ENDIAN | ||
3173 | /* byte swap descriptors in hardware */ | ||
3174 | { | ||
3175 | u32 reg; | ||
3176 | |||
3177 | pci_read_config_dword(pdev, PCI_DEV_REG2, ®); | ||
3178 | reg |= PCI_REV_DESC; | ||
3179 | pci_write_config_dword(pdev, PCI_DEV_REG2, reg); | ||
3180 | } | ||
3181 | #endif | ||
3182 | |||
3183 | err = -ENOMEM; | 3187 | err = -ENOMEM; |
3184 | hw = kzalloc(sizeof(*hw), GFP_KERNEL); | 3188 | hw = kzalloc(sizeof(*hw), GFP_KERNEL); |
3185 | if (!hw) { | 3189 | if (!hw) { |
@@ -3197,6 +3201,18 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
3197 | goto err_out_free_hw; | 3201 | goto err_out_free_hw; |
3198 | } | 3202 | } |
3199 | hw->pm_cap = pm_cap; | 3203 | hw->pm_cap = pm_cap; |
3204 | spin_lock_init(&hw->hw_lock); | ||
3205 | |||
3206 | #ifdef __BIG_ENDIAN | ||
3207 | /* byte swap descriptors in hardware */ | ||
3208 | { | ||
3209 | u32 reg; | ||
3210 | |||
3211 | reg = sky2_pci_read32(hw, PCI_DEV_REG2); | ||
3212 | reg |= PCI_REV_DESC; | ||
3213 | sky2_pci_write32(hw, PCI_DEV_REG2, reg); | ||
3214 | } | ||
3215 | #endif | ||
3200 | 3216 | ||
3201 | /* ring for status responses */ | 3217 | /* ring for status responses */ |
3202 | hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES, | 3218 | hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES, |
@@ -3238,22 +3254,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
3238 | } | 3254 | } |
3239 | } | 3255 | } |
3240 | 3256 | ||
3241 | if (!disable_msi && pci_enable_msi(pdev) == 0) { | 3257 | err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw); |
3242 | err = sky2_test_msi(hw); | ||
3243 | if (err == -EOPNOTSUPP) { | ||
3244 | /* MSI test failed, go back to INTx mode */ | ||
3245 | printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, " | ||
3246 | "switching to INTx mode. Please report this failure to " | ||
3247 | "the PCI maintainer and include system chipset information.\n", | ||
3248 | pci_name(pdev)); | ||
3249 | pci_disable_msi(pdev); | ||
3250 | } | ||
3251 | else if (err) | ||
3252 | goto err_out_unregister; | ||
3253 | } | ||
3254 | |||
3255 | err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ | SA_SAMPLE_RANDOM, | ||
3256 | DRV_NAME, hw); | ||
3257 | if (err) { | 3258 | if (err) { |
3258 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", | 3259 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", |
3259 | pci_name(pdev), pdev->irq); | 3260 | pci_name(pdev), pdev->irq); |
@@ -3268,8 +3269,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
3268 | return 0; | 3269 | return 0; |
3269 | 3270 | ||
3270 | err_out_unregister: | 3271 | err_out_unregister: |
3271 | if (hw->msi) | ||
3272 | pci_disable_msi(pdev); | ||
3273 | if (dev1) { | 3272 | if (dev1) { |
3274 | unregister_netdev(dev1); | 3273 | unregister_netdev(dev1); |
3275 | free_netdev(dev1); | 3274 | free_netdev(dev1); |
@@ -3312,8 +3311,6 @@ static void __devexit sky2_remove(struct pci_dev *pdev) | |||
3312 | sky2_read8(hw, B0_CTST); | 3311 | sky2_read8(hw, B0_CTST); |
3313 | 3312 | ||
3314 | free_irq(pdev->irq, hw); | 3313 | free_irq(pdev->irq, hw); |
3315 | if (hw->msi) | ||
3316 | pci_disable_msi(pdev); | ||
3317 | pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); | 3314 | pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); |
3318 | pci_release_regions(pdev); | 3315 | pci_release_regions(pdev); |
3319 | pci_disable_device(pdev); | 3316 | pci_disable_device(pdev); |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index fd12c289a238..dce955c76f3c 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -5,14 +5,22 @@ | |||
5 | #define _SKY2_H | 5 | #define _SKY2_H |
6 | 6 | ||
7 | /* PCI config registers */ | 7 | /* PCI config registers */ |
8 | #define PCI_DEV_REG1 0x40 | 8 | enum { |
9 | #define PCI_DEV_REG2 0x44 | 9 | PCI_DEV_REG1 = 0x40, |
10 | #define PCI_DEV_STATUS 0x7c | 10 | PCI_DEV_REG2 = 0x44, |
11 | #define PCI_OS_PCI_X (1<<26) | 11 | PCI_DEV_STATUS = 0x7c, |
12 | PCI_DEV_REG3 = 0x80, | ||
13 | PCI_DEV_REG4 = 0x84, | ||
14 | PCI_DEV_REG5 = 0x88, | ||
15 | }; | ||
12 | 16 | ||
13 | #define PEX_LNK_STAT 0xf2 | 17 | enum { |
14 | #define PEX_UNC_ERR_STAT 0x104 | 18 | PEX_DEV_CAP = 0xe4, |
15 | #define PEX_DEV_CTRL 0xe8 | 19 | PEX_DEV_CTRL = 0xe8, |
20 | PEX_DEV_STA = 0xea, | ||
21 | PEX_LNK_STAT = 0xf2, | ||
22 | PEX_UNC_ERR_STAT= 0x104, | ||
23 | }; | ||
16 | 24 | ||
17 | /* Yukon-2 */ | 25 | /* Yukon-2 */ |
18 | enum pci_dev_reg_1 { | 26 | enum pci_dev_reg_1 { |
@@ -37,6 +45,25 @@ enum pci_dev_reg_2 { | |||
37 | PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ | 45 | PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ |
38 | }; | 46 | }; |
39 | 47 | ||
48 | /* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ | ||
49 | enum pci_dev_reg_4 { | ||
50 | /* (Link Training & Status State Machine) */ | ||
51 | P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */ | ||
52 | /* (Active State Power Management) */ | ||
53 | P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */ | ||
54 | P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */ | ||
55 | P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */ | ||
56 | P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */ | ||
57 | |||
58 | P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */ | ||
59 | P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */ | ||
60 | P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */ | ||
61 | P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */ | ||
62 | P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */ | ||
63 | P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN | ||
64 | | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY, | ||
65 | }; | ||
66 | |||
40 | 67 | ||
41 | #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ | 68 | #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ |
42 | PCI_STATUS_SIG_SYSTEM_ERROR | \ | 69 | PCI_STATUS_SIG_SYSTEM_ERROR | \ |
@@ -507,6 +534,16 @@ enum { | |||
507 | }; | 534 | }; |
508 | #define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) | 535 | #define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) |
509 | 536 | ||
537 | /* Q_F 32 bit Flag Register */ | ||
538 | enum { | ||
539 | F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */ | ||
540 | F_EMPTY = 1<<27, /* Tx FIFO: empty flag */ | ||
541 | F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */ | ||
542 | F_WM_REACHED = 1<<25, /* Watermark reached */ | ||
543 | F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */ | ||
544 | F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */ | ||
545 | F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */ | ||
546 | }; | ||
510 | 547 | ||
511 | /* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ | 548 | /* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ |
512 | enum { | 549 | enum { |
@@ -909,10 +946,12 @@ enum { | |||
909 | PHY_BCOM_ID1_C0 = 0x6044, | 946 | PHY_BCOM_ID1_C0 = 0x6044, |
910 | PHY_BCOM_ID1_C5 = 0x6047, | 947 | PHY_BCOM_ID1_C5 = 0x6047, |
911 | 948 | ||
912 | PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ | 949 | PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ |
913 | PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ | 950 | PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ |
914 | PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ | 951 | PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ |
915 | PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ | 952 | PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ |
953 | PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */ | ||
954 | PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */ | ||
916 | }; | 955 | }; |
917 | 956 | ||
918 | /* Advertisement register bits */ | 957 | /* Advertisement register bits */ |
@@ -1837,11 +1876,11 @@ struct sky2_port { | |||
1837 | struct sky2_hw { | 1876 | struct sky2_hw { |
1838 | void __iomem *regs; | 1877 | void __iomem *regs; |
1839 | struct pci_dev *pdev; | 1878 | struct pci_dev *pdev; |
1840 | u32 intr_mask; | ||
1841 | struct net_device *dev[2]; | 1879 | struct net_device *dev[2]; |
1880 | spinlock_t hw_lock; | ||
1881 | u32 intr_mask; | ||
1842 | 1882 | ||
1843 | int pm_cap; | 1883 | int pm_cap; |
1844 | int msi; | ||
1845 | u8 chip_id; | 1884 | u8 chip_id; |
1846 | u8 chip_rev; | 1885 | u8 chip_rev; |
1847 | u8 copper; | 1886 | u8 copper; |
@@ -1912,4 +1951,25 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg, | |||
1912 | gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); | 1951 | gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); |
1913 | gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); | 1952 | gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); |
1914 | } | 1953 | } |
1954 | |||
1955 | /* PCI config space access */ | ||
1956 | static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg) | ||
1957 | { | ||
1958 | return sky2_read32(hw, Y2_CFG_SPC + reg); | ||
1959 | } | ||
1960 | |||
1961 | static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg) | ||
1962 | { | ||
1963 | return sky2_read16(hw, Y2_CFG_SPC + reg); | ||
1964 | } | ||
1965 | |||
1966 | static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val) | ||
1967 | { | ||
1968 | sky2_write32(hw, Y2_CFG_SPC + reg, val); | ||
1969 | } | ||
1970 | |||
1971 | static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val) | ||
1972 | { | ||
1973 | sky2_write16(hw, Y2_CFG_SPC + reg, val); | ||
1974 | } | ||
1915 | #endif | 1975 | #endif |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index e7dc653d5bd6..b8f1524da557 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -3532,9 +3532,23 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) | |||
3532 | (base + len + 8 < base)); | 3532 | (base + len + 8 < base)); |
3533 | } | 3533 | } |
3534 | 3534 | ||
3535 | /* Test for DMA addresses > 40-bit */ | ||
3536 | static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, | ||
3537 | int len) | ||
3538 | { | ||
3539 | #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) | ||
3540 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) | ||
3541 | return (((u64) mapping + len) > DMA_40BIT_MASK); | ||
3542 | return 0; | ||
3543 | #else | ||
3544 | return 0; | ||
3545 | #endif | ||
3546 | } | ||
3547 | |||
3535 | static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); | 3548 | static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); |
3536 | 3549 | ||
3537 | static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | 3550 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ |
3551 | static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | ||
3538 | u32 last_plus_one, u32 *start, | 3552 | u32 last_plus_one, u32 *start, |
3539 | u32 base_flags, u32 mss) | 3553 | u32 base_flags, u32 mss) |
3540 | { | 3554 | { |
@@ -3742,6 +3756,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3742 | if (tg3_4g_overflow_test(mapping, len)) | 3756 | if (tg3_4g_overflow_test(mapping, len)) |
3743 | would_hit_hwbug = 1; | 3757 | would_hit_hwbug = 1; |
3744 | 3758 | ||
3759 | if (tg3_40bit_overflow_test(tp, mapping, len)) | ||
3760 | would_hit_hwbug = 1; | ||
3761 | |||
3745 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 3762 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
3746 | tg3_set_txd(tp, entry, mapping, len, | 3763 | tg3_set_txd(tp, entry, mapping, len, |
3747 | base_flags, (i == last)|(mss << 1)); | 3764 | base_flags, (i == last)|(mss << 1)); |
@@ -3763,7 +3780,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3763 | /* If the workaround fails due to memory/mapping | 3780 | /* If the workaround fails due to memory/mapping |
3764 | * failure, silently drop this packet. | 3781 | * failure, silently drop this packet. |
3765 | */ | 3782 | */ |
3766 | if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one, | 3783 | if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, |
3767 | &start, base_flags, mss)) | 3784 | &start, base_flags, mss)) |
3768 | goto out_unlock; | 3785 | goto out_unlock; |
3769 | 3786 | ||
@@ -9408,6 +9425,15 @@ static int __devinit tg3_is_sun_570X(struct tg3 *tp) | |||
9408 | return 0; | 9425 | return 0; |
9409 | if (venid == PCI_VENDOR_ID_SUN) | 9426 | if (venid == PCI_VENDOR_ID_SUN) |
9410 | return 1; | 9427 | return 1; |
9428 | |||
9429 | /* TG3 chips onboard the SunBlade-2500 don't have the | ||
9430 | * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they | ||
9431 | * are distinguishable from non-Sun variants by being | ||
9432 | * named "network" by the firmware. Non-Sun cards will | ||
9433 | * show up as being named "ethernet". | ||
9434 | */ | ||
9435 | if (!strcmp(pcp->prom_name, "network")) | ||
9436 | return 1; | ||
9411 | } | 9437 | } |
9412 | return 0; | 9438 | return 0; |
9413 | } | 9439 | } |
@@ -10517,8 +10543,6 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) | |||
10517 | strcat(str, "66MHz"); | 10543 | strcat(str, "66MHz"); |
10518 | else if (clock_ctrl == 6) | 10544 | else if (clock_ctrl == 6) |
10519 | strcat(str, "100MHz"); | 10545 | strcat(str, "100MHz"); |
10520 | else if (clock_ctrl == 7) | ||
10521 | strcat(str, "133MHz"); | ||
10522 | } else { | 10546 | } else { |
10523 | strcpy(str, "PCI:"); | 10547 | strcpy(str, "PCI:"); |
10524 | if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) | 10548 | if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) |
@@ -10599,8 +10623,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
10599 | unsigned long tg3reg_base, tg3reg_len; | 10623 | unsigned long tg3reg_base, tg3reg_len; |
10600 | struct net_device *dev; | 10624 | struct net_device *dev; |
10601 | struct tg3 *tp; | 10625 | struct tg3 *tp; |
10602 | int i, err, pci_using_dac, pm_cap; | 10626 | int i, err, pm_cap; |
10603 | char str[40]; | 10627 | char str[40]; |
10628 | u64 dma_mask, persist_dma_mask; | ||
10604 | 10629 | ||
10605 | if (tg3_version_printed++ == 0) | 10630 | if (tg3_version_printed++ == 0) |
10606 | printk(KERN_INFO "%s", version); | 10631 | printk(KERN_INFO "%s", version); |
@@ -10637,26 +10662,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
10637 | goto err_out_free_res; | 10662 | goto err_out_free_res; |
10638 | } | 10663 | } |
10639 | 10664 | ||
10640 | /* Configure DMA attributes. */ | ||
10641 | err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | ||
10642 | if (!err) { | ||
10643 | pci_using_dac = 1; | ||
10644 | err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | ||
10645 | if (err < 0) { | ||
10646 | printk(KERN_ERR PFX "Unable to obtain 64 bit DMA " | ||
10647 | "for consistent allocations\n"); | ||
10648 | goto err_out_free_res; | ||
10649 | } | ||
10650 | } else { | ||
10651 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
10652 | if (err) { | ||
10653 | printk(KERN_ERR PFX "No usable DMA configuration, " | ||
10654 | "aborting.\n"); | ||
10655 | goto err_out_free_res; | ||
10656 | } | ||
10657 | pci_using_dac = 0; | ||
10658 | } | ||
10659 | |||
10660 | tg3reg_base = pci_resource_start(pdev, 0); | 10665 | tg3reg_base = pci_resource_start(pdev, 0); |
10661 | tg3reg_len = pci_resource_len(pdev, 0); | 10666 | tg3reg_len = pci_resource_len(pdev, 0); |
10662 | 10667 | ||
@@ -10670,8 +10675,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
10670 | SET_MODULE_OWNER(dev); | 10675 | SET_MODULE_OWNER(dev); |
10671 | SET_NETDEV_DEV(dev, &pdev->dev); | 10676 | SET_NETDEV_DEV(dev, &pdev->dev); |
10672 | 10677 | ||
10673 | if (pci_using_dac) | ||
10674 | dev->features |= NETIF_F_HIGHDMA; | ||
10675 | dev->features |= NETIF_F_LLTX; | 10678 | dev->features |= NETIF_F_LLTX; |
10676 | #if TG3_VLAN_TAG_USED | 10679 | #if TG3_VLAN_TAG_USED |
10677 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 10680 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
@@ -10756,6 +10759,44 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
10756 | goto err_out_iounmap; | 10759 | goto err_out_iounmap; |
10757 | } | 10760 | } |
10758 | 10761 | ||
10762 | /* 5714, 5715 and 5780 cannot support DMA addresses > 40-bit. | ||
10763 | * On 64-bit systems with IOMMU, use 40-bit dma_mask. | ||
10764 | * On 64-bit systems without IOMMU, use 64-bit dma_mask and | ||
10765 | * do DMA address check in tg3_start_xmit(). | ||
10766 | */ | ||
10767 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { | ||
10768 | persist_dma_mask = dma_mask = DMA_40BIT_MASK; | ||
10769 | #ifdef CONFIG_HIGHMEM | ||
10770 | dma_mask = DMA_64BIT_MASK; | ||
10771 | #endif | ||
10772 | } else if (tp->tg3_flags2 & TG3_FLG2_IS_5788) | ||
10773 | persist_dma_mask = dma_mask = DMA_32BIT_MASK; | ||
10774 | else | ||
10775 | persist_dma_mask = dma_mask = DMA_64BIT_MASK; | ||
10776 | |||
10777 | /* Configure DMA attributes. */ | ||
10778 | if (dma_mask > DMA_32BIT_MASK) { | ||
10779 | err = pci_set_dma_mask(pdev, dma_mask); | ||
10780 | if (!err) { | ||
10781 | dev->features |= NETIF_F_HIGHDMA; | ||
10782 | err = pci_set_consistent_dma_mask(pdev, | ||
10783 | persist_dma_mask); | ||
10784 | if (err < 0) { | ||
10785 | printk(KERN_ERR PFX "Unable to obtain 64 bit " | ||
10786 | "DMA for consistent allocations\n"); | ||
10787 | goto err_out_iounmap; | ||
10788 | } | ||
10789 | } | ||
10790 | } | ||
10791 | if (err || dma_mask == DMA_32BIT_MASK) { | ||
10792 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
10793 | if (err) { | ||
10794 | printk(KERN_ERR PFX "No usable DMA configuration, " | ||
10795 | "aborting.\n"); | ||
10796 | goto err_out_iounmap; | ||
10797 | } | ||
10798 | } | ||
10799 | |||
10759 | tg3_init_bufmgr_config(tp); | 10800 | tg3_init_bufmgr_config(tp); |
10760 | 10801 | ||
10761 | #if TG3_TSO_SUPPORT != 0 | 10802 | #if TG3_TSO_SUPPORT != 0 |
@@ -10824,9 +10865,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
10824 | } else | 10865 | } else |
10825 | tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; | 10866 | tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; |
10826 | 10867 | ||
10827 | if (tp->tg3_flags2 & TG3_FLG2_IS_5788) | ||
10828 | dev->features &= ~NETIF_F_HIGHDMA; | ||
10829 | |||
10830 | /* flow control autonegotiation is default behavior */ | 10868 | /* flow control autonegotiation is default behavior */ |
10831 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; | 10869 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; |
10832 | 10870 | ||
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index c2506b56a186..12076f8f942c 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c | |||
@@ -536,6 +536,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, | |||
536 | u16 device_id; | 536 | u16 device_id; |
537 | int reg, rc = -ENODEV; | 537 | int reg, rc = -ENODEV; |
538 | 538 | ||
539 | #ifdef CONFIG_PCI | ||
539 | if (pdev) { | 540 | if (pdev) { |
540 | rc = pci_enable_device(pdev); | 541 | rc = pci_enable_device(pdev); |
541 | if (rc) | 542 | if (rc) |
@@ -547,6 +548,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, | |||
547 | goto err_out; | 548 | goto err_out; |
548 | } | 549 | } |
549 | } | 550 | } |
551 | #endif /* CONFIG_PCI */ | ||
550 | 552 | ||
551 | dev = alloc_etherdev(sizeof(TLanPrivateInfo)); | 553 | dev = alloc_etherdev(sizeof(TLanPrivateInfo)); |
552 | if (dev == NULL) { | 554 | if (dev == NULL) { |
diff --git a/drivers/net/tokenring/smctr.h b/drivers/net/tokenring/smctr.h index b306c7e4c793..88dfa2e01d6e 100644 --- a/drivers/net/tokenring/smctr.h +++ b/drivers/net/tokenring/smctr.h | |||
@@ -1042,7 +1042,7 @@ typedef struct net_local { | |||
1042 | __u16 functional_address[2]; | 1042 | __u16 functional_address[2]; |
1043 | __u16 bitwise_group_address[2]; | 1043 | __u16 bitwise_group_address[2]; |
1044 | 1044 | ||
1045 | __u8 *ptr_ucode; | 1045 | const __u8 *ptr_ucode; |
1046 | 1046 | ||
1047 | __u8 cleanup; | 1047 | __u8 cleanup; |
1048 | 1048 | ||
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index d7fb3ffe06ac..2d0cfbceee22 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -1362,7 +1362,6 @@ static int de_open (struct net_device *dev) | |||
1362 | { | 1362 | { |
1363 | struct de_private *de = dev->priv; | 1363 | struct de_private *de = dev->priv; |
1364 | int rc; | 1364 | int rc; |
1365 | unsigned long flags; | ||
1366 | 1365 | ||
1367 | if (netif_msg_ifup(de)) | 1366 | if (netif_msg_ifup(de)) |
1368 | printk(KERN_DEBUG "%s: enabling interface\n", dev->name); | 1367 | printk(KERN_DEBUG "%s: enabling interface\n", dev->name); |
@@ -1376,18 +1375,20 @@ static int de_open (struct net_device *dev) | |||
1376 | return rc; | 1375 | return rc; |
1377 | } | 1376 | } |
1378 | 1377 | ||
1379 | rc = de_init_hw(de); | 1378 | dw32(IntrMask, 0); |
1380 | if (rc) { | ||
1381 | printk(KERN_ERR "%s: h/w init failure, err=%d\n", | ||
1382 | dev->name, rc); | ||
1383 | goto err_out_free; | ||
1384 | } | ||
1385 | 1379 | ||
1386 | rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev); | 1380 | rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev); |
1387 | if (rc) { | 1381 | if (rc) { |
1388 | printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n", | 1382 | printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n", |
1389 | dev->name, dev->irq, rc); | 1383 | dev->name, dev->irq, rc); |
1390 | goto err_out_hw; | 1384 | goto err_out_free; |
1385 | } | ||
1386 | |||
1387 | rc = de_init_hw(de); | ||
1388 | if (rc) { | ||
1389 | printk(KERN_ERR "%s: h/w init failure, err=%d\n", | ||
1390 | dev->name, rc); | ||
1391 | goto err_out_free_irq; | ||
1391 | } | 1392 | } |
1392 | 1393 | ||
1393 | netif_start_queue(dev); | 1394 | netif_start_queue(dev); |
@@ -1395,11 +1396,8 @@ static int de_open (struct net_device *dev) | |||
1395 | 1396 | ||
1396 | return 0; | 1397 | return 0; |
1397 | 1398 | ||
1398 | err_out_hw: | 1399 | err_out_free_irq: |
1399 | spin_lock_irqsave(&de->lock, flags); | 1400 | free_irq(dev->irq, dev); |
1400 | de_stop_hw(de); | ||
1401 | spin_unlock_irqrestore(&de->lock, flags); | ||
1402 | |||
1403 | err_out_free: | 1401 | err_out_free: |
1404 | de_free_rings(de); | 1402 | de_free_rings(de); |
1405 | return rc; | 1403 | return rc; |
@@ -1455,6 +1453,8 @@ static void de_tx_timeout (struct net_device *dev) | |||
1455 | synchronize_irq(dev->irq); | 1453 | synchronize_irq(dev->irq); |
1456 | de_clean_rings(de); | 1454 | de_clean_rings(de); |
1457 | 1455 | ||
1456 | de_init_rings(de); | ||
1457 | |||
1458 | de_init_hw(de); | 1458 | de_init_hw(de); |
1459 | 1459 | ||
1460 | netif_wake_queue(dev); | 1460 | netif_wake_queue(dev); |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 50b8c6754b1e..a1ed2d983740 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -249,8 +249,11 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, | |||
249 | 249 | ||
250 | if (align) | 250 | if (align) |
251 | skb_reserve(skb, align); | 251 | skb_reserve(skb, align); |
252 | if (memcpy_fromiovec(skb_put(skb, len), iv, len)) | 252 | if (memcpy_fromiovec(skb_put(skb, len), iv, len)) { |
253 | tun->stats.rx_dropped++; | ||
254 | kfree_skb(skb); | ||
253 | return -EFAULT; | 255 | return -EFAULT; |
256 | } | ||
254 | 257 | ||
255 | skb->dev = tun->dev; | 258 | skb->dev = tun->dev; |
256 | switch (tun->flags & TUN_TYPE_MASK) { | 259 | switch (tun->flags & TUN_TYPE_MASK) { |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index c2d5907dc8e0..ed1f837c8fda 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -1106,6 +1106,9 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) | |||
1106 | 1106 | ||
1107 | for (i = 0; i < vptr->options.numrx; i++) { | 1107 | for (i = 0; i < vptr->options.numrx; i++) { |
1108 | struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); | 1108 | struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); |
1109 | struct rx_desc *rd = vptr->rd_ring + i; | ||
1110 | |||
1111 | memset(rd, 0, sizeof(*rd)); | ||
1109 | 1112 | ||
1110 | if (!rd_info->skb) | 1113 | if (!rd_info->skb) |
1111 | continue; | 1114 | continue; |
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 233a4f608084..ef85d76575a2 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig | |||
@@ -148,7 +148,7 @@ config IPW2100 | |||
148 | In order to use this driver, you will need a firmware image for it. | 148 | In order to use this driver, you will need a firmware image for it. |
149 | You can obtain the firmware from | 149 | You can obtain the firmware from |
150 | <http://ipw2100.sf.net/>. Once you have the firmware image, you | 150 | <http://ipw2100.sf.net/>. Once you have the firmware image, you |
151 | will need to place it in /etc/firmware. | 151 | will need to place it in /lib/firmware. |
152 | 152 | ||
153 | You will also very likely need the Wireless Tools in order to | 153 | You will also very likely need the Wireless Tools in order to |
154 | configure your card: | 154 | configure your card: |
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 98a76f10a0f7..dfc24016ba81 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c | |||
@@ -1872,7 +1872,7 @@ static int atmel_set_encodeext(struct net_device *dev, | |||
1872 | struct atmel_private *priv = netdev_priv(dev); | 1872 | struct atmel_private *priv = netdev_priv(dev); |
1873 | struct iw_point *encoding = &wrqu->encoding; | 1873 | struct iw_point *encoding = &wrqu->encoding; |
1874 | struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; | 1874 | struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; |
1875 | int idx, key_len; | 1875 | int idx, key_len, alg = ext->alg, set_key = 1; |
1876 | 1876 | ||
1877 | /* Determine and validate the key index */ | 1877 | /* Determine and validate the key index */ |
1878 | idx = encoding->flags & IW_ENCODE_INDEX; | 1878 | idx = encoding->flags & IW_ENCODE_INDEX; |
@@ -1883,39 +1883,42 @@ static int atmel_set_encodeext(struct net_device *dev, | |||
1883 | } else | 1883 | } else |
1884 | idx = priv->default_key; | 1884 | idx = priv->default_key; |
1885 | 1885 | ||
1886 | if ((encoding->flags & IW_ENCODE_DISABLED) || | 1886 | if (encoding->flags & IW_ENCODE_DISABLED) |
1887 | ext->alg == IW_ENCODE_ALG_NONE) { | 1887 | alg = IW_ENCODE_ALG_NONE; |
1888 | priv->wep_is_on = 0; | ||
1889 | priv->encryption_level = 0; | ||
1890 | priv->pairwise_cipher_suite = CIPHER_SUITE_NONE; | ||
1891 | } | ||
1892 | 1888 | ||
1893 | if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) | 1889 | if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { |
1894 | priv->default_key = idx; | 1890 | priv->default_key = idx; |
1891 | set_key = ext->key_len > 0 ? 1 : 0; | ||
1892 | } | ||
1895 | 1893 | ||
1896 | /* Set the requested key */ | 1894 | if (set_key) { |
1897 | switch (ext->alg) { | 1895 | /* Set the requested key first */ |
1898 | case IW_ENCODE_ALG_NONE: | 1896 | switch (alg) { |
1899 | break; | 1897 | case IW_ENCODE_ALG_NONE: |
1900 | case IW_ENCODE_ALG_WEP: | 1898 | priv->wep_is_on = 0; |
1901 | if (ext->key_len > 5) { | 1899 | priv->encryption_level = 0; |
1902 | priv->wep_key_len[idx] = 13; | 1900 | priv->pairwise_cipher_suite = CIPHER_SUITE_NONE; |
1903 | priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128; | 1901 | break; |
1904 | priv->encryption_level = 2; | 1902 | case IW_ENCODE_ALG_WEP: |
1905 | } else if (ext->key_len > 0) { | 1903 | if (ext->key_len > 5) { |
1906 | priv->wep_key_len[idx] = 5; | 1904 | priv->wep_key_len[idx] = 13; |
1907 | priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64; | 1905 | priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128; |
1908 | priv->encryption_level = 1; | 1906 | priv->encryption_level = 2; |
1909 | } else { | 1907 | } else if (ext->key_len > 0) { |
1908 | priv->wep_key_len[idx] = 5; | ||
1909 | priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64; | ||
1910 | priv->encryption_level = 1; | ||
1911 | } else { | ||
1912 | return -EINVAL; | ||
1913 | } | ||
1914 | priv->wep_is_on = 1; | ||
1915 | memset(priv->wep_keys[idx], 0, 13); | ||
1916 | key_len = min ((int)ext->key_len, priv->wep_key_len[idx]); | ||
1917 | memcpy(priv->wep_keys[idx], ext->key, key_len); | ||
1918 | break; | ||
1919 | default: | ||
1910 | return -EINVAL; | 1920 | return -EINVAL; |
1911 | } | 1921 | } |
1912 | priv->wep_is_on = 1; | ||
1913 | memset(priv->wep_keys[idx], 0, 13); | ||
1914 | key_len = min ((int)ext->key_len, priv->wep_key_len[idx]); | ||
1915 | memcpy(priv->wep_keys[idx], ext->key, key_len); | ||
1916 | break; | ||
1917 | default: | ||
1918 | return -EINVAL; | ||
1919 | } | 1922 | } |
1920 | 1923 | ||
1921 | return -EINPROGRESS; | 1924 | return -EINPROGRESS; |
@@ -3061,17 +3064,26 @@ static void authenticate(struct atmel_private *priv, u16 frame_len) | |||
3061 | } | 3064 | } |
3062 | 3065 | ||
3063 | if (status == C80211_MGMT_SC_Success && priv->wep_is_on) { | 3066 | if (status == C80211_MGMT_SC_Success && priv->wep_is_on) { |
3067 | int should_associate = 0; | ||
3064 | /* WEP */ | 3068 | /* WEP */ |
3065 | if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum) | 3069 | if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum) |
3066 | return; | 3070 | return; |
3067 | 3071 | ||
3068 | if (trans_seq_no == 0x0002 && | 3072 | if (system == C80211_MGMT_AAN_OPENSYSTEM) { |
3069 | auth->el_id == C80211_MGMT_ElementID_ChallengeText) { | 3073 | if (trans_seq_no == 0x0002) { |
3070 | send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); | 3074 | should_associate = 1; |
3071 | return; | 3075 | } |
3076 | } else if (system == C80211_MGMT_AAN_SHAREDKEY) { | ||
3077 | if (trans_seq_no == 0x0002 && | ||
3078 | auth->el_id == C80211_MGMT_ElementID_ChallengeText) { | ||
3079 | send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); | ||
3080 | return; | ||
3081 | } else if (trans_seq_no == 0x0004) { | ||
3082 | should_associate = 1; | ||
3083 | } | ||
3072 | } | 3084 | } |
3073 | 3085 | ||
3074 | if (trans_seq_no == 0x0004) { | 3086 | if (should_associate) { |
3075 | if(priv->station_was_associated) { | 3087 | if(priv->station_was_associated) { |
3076 | atmel_enter_state(priv, STATION_STATE_REASSOCIATING); | 3088 | atmel_enter_state(priv, STATION_STATE_REASSOCIATING); |
3077 | send_association_request(priv, 1); | 3089 | send_association_request(priv, 1); |
@@ -3084,11 +3096,13 @@ static void authenticate(struct atmel_private *priv, u16 frame_len) | |||
3084 | } | 3096 | } |
3085 | } | 3097 | } |
3086 | 3098 | ||
3087 | if (status == C80211_MGMT_SC_AuthAlgNotSupported) { | 3099 | if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { |
3088 | /* Do opensystem first, then try sharedkey */ | 3100 | /* Do opensystem first, then try sharedkey */ |
3089 | if (system == C80211_MGMT_AAN_OPENSYSTEM) { | 3101 | if (system == WLAN_AUTH_OPEN) { |
3090 | priv->CurrentAuthentTransactionSeqNum = 0x001; | 3102 | priv->CurrentAuthentTransactionSeqNum = 0x001; |
3091 | send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0); | 3103 | priv->exclude_unencrypted = 1; |
3104 | send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0); | ||
3105 | return; | ||
3092 | } else if (priv->connect_to_any_BSS) { | 3106 | } else if (priv->connect_to_any_BSS) { |
3093 | int bss_index; | 3107 | int bss_index; |
3094 | 3108 | ||
@@ -3439,10 +3453,13 @@ static void atmel_management_timer(u_long a) | |||
3439 | priv->AuthenticationRequestRetryCnt = 0; | 3453 | priv->AuthenticationRequestRetryCnt = 0; |
3440 | restart_search(priv); | 3454 | restart_search(priv); |
3441 | } else { | 3455 | } else { |
3456 | int auth = C80211_MGMT_AAN_OPENSYSTEM; | ||
3442 | priv->AuthenticationRequestRetryCnt++; | 3457 | priv->AuthenticationRequestRetryCnt++; |
3443 | priv->CurrentAuthentTransactionSeqNum = 0x0001; | 3458 | priv->CurrentAuthentTransactionSeqNum = 0x0001; |
3444 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); | 3459 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); |
3445 | send_authentication_request(priv, C80211_MGMT_AAN_OPENSYSTEM, NULL, 0); | 3460 | if (priv->wep_is_on && priv->exclude_unencrypted) |
3461 | auth = C80211_MGMT_AAN_SHAREDKEY; | ||
3462 | send_authentication_request(priv, auth, NULL, 0); | ||
3446 | } | 3463 | } |
3447 | break; | 3464 | break; |
3448 | 3465 | ||
@@ -3541,12 +3558,15 @@ static void atmel_command_irq(struct atmel_private *priv) | |||
3541 | priv->station_was_associated = priv->station_is_associated; | 3558 | priv->station_was_associated = priv->station_is_associated; |
3542 | atmel_enter_state(priv, STATION_STATE_READY); | 3559 | atmel_enter_state(priv, STATION_STATE_READY); |
3543 | } else { | 3560 | } else { |
3561 | int auth = C80211_MGMT_AAN_OPENSYSTEM; | ||
3544 | priv->AuthenticationRequestRetryCnt = 0; | 3562 | priv->AuthenticationRequestRetryCnt = 0; |
3545 | atmel_enter_state(priv, STATION_STATE_AUTHENTICATING); | 3563 | atmel_enter_state(priv, STATION_STATE_AUTHENTICATING); |
3546 | 3564 | ||
3547 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); | 3565 | mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); |
3548 | priv->CurrentAuthentTransactionSeqNum = 0x0001; | 3566 | priv->CurrentAuthentTransactionSeqNum = 0x0001; |
3549 | send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0); | 3567 | if (priv->wep_is_on && priv->exclude_unencrypted) |
3568 | auth = C80211_MGMT_AAN_SHAREDKEY; | ||
3569 | send_authentication_request(priv, auth, NULL, 0); | ||
3550 | } | 3570 | } |
3551 | return; | 3571 | return; |
3552 | } | 3572 | } |
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index 8bc0b528548f..f8f4503475f9 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c | |||
@@ -877,7 +877,6 @@ static struct pcmcia_device_id hostap_cs_ids[] = { | |||
877 | PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), | 877 | PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), |
878 | PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), | 878 | PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), |
879 | PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), | 879 | PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), |
880 | PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), | ||
881 | PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), | 880 | PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), |
882 | PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b), | 881 | PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b), |
883 | PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), | 882 | PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), |
@@ -891,6 +890,10 @@ static struct pcmcia_device_id hostap_cs_ids[] = { | |||
891 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), | 890 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), |
892 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), | 891 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), |
893 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), | 892 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), |
893 | PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "INTERSIL", | ||
894 | 0x74c5e40d), | ||
895 | PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil", | ||
896 | 0x4b801a17), | ||
894 | PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus", | 897 | PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus", |
895 | 0x7a954bd9, 0x74be00c6), | 898 | 0x7a954bd9, 0x74be00c6), |
896 | PCMCIA_DEVICE_PROD_ID1234( | 899 | PCMCIA_DEVICE_PROD_ID1234( |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index 14beab4bc91c..287676ad80df 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -4616,9 +4616,9 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4616 | } | 4616 | } |
4617 | 4617 | ||
4618 | default: | 4618 | default: |
4619 | IPW_ERROR("Unknown notification: " | 4619 | IPW_DEBUG_NOTIF("Unknown notification: " |
4620 | "subtype=%d,flags=0x%2x,size=%d\n", | 4620 | "subtype=%d,flags=0x%2x,size=%d\n", |
4621 | notif->subtype, notif->flags, notif->size); | 4621 | notif->subtype, notif->flags, notif->size); |
4622 | } | 4622 | } |
4623 | } | 4623 | } |
4624 | 4624 | ||
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c index 3c128b692bce..ec6f2a48895b 100644 --- a/drivers/net/wireless/orinoco_cs.c +++ b/drivers/net/wireless/orinoco_cs.c | |||
@@ -590,6 +590,7 @@ static struct pcmcia_device_id orinoco_cs_ids[] = { | |||
590 | PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9), | 590 | PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9), |
591 | PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26), | 591 | PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26), |
592 | PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b), | 592 | PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b), |
593 | PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757), | ||
593 | PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a), | 594 | PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a), |
594 | PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e), | 595 | PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e), |
595 | PCMCIA_DEVICE_PROD_ID123("The Linksys Group, Inc.", "Instant Wireless Network PC Card", "ISL37300P", 0xa5f472c2, 0x590eb502, 0xc9049a39), | 596 | PCMCIA_DEVICE_PROD_ID123("The Linksys Group, Inc.", "Instant Wireless Network PC Card", "ISL37300P", 0xa5f472c2, 0x590eb502, 0xc9049a39), |
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index cf373625fc70..98122f3a4bc2 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c | |||
@@ -950,16 +950,8 @@ wv_82593_cmd(struct net_device * dev, | |||
950 | static inline int | 950 | static inline int |
951 | wv_diag(struct net_device * dev) | 951 | wv_diag(struct net_device * dev) |
952 | { | 952 | { |
953 | int ret = FALSE; | 953 | return(wv_82593_cmd(dev, "wv_diag(): diagnose", |
954 | 954 | OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED)); | |
955 | if(wv_82593_cmd(dev, "wv_diag(): diagnose", | ||
956 | OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED)) | ||
957 | ret = TRUE; | ||
958 | |||
959 | #ifdef DEBUG_CONFIG_ERRORS | ||
960 | printk(KERN_INFO "wavelan_cs: i82593 Self Test failed!\n"); | ||
961 | #endif | ||
962 | return(ret); | ||
963 | } /* wv_diag */ | 955 | } /* wv_diag */ |
964 | 956 | ||
965 | /*------------------------------------------------------------------*/ | 957 | /*------------------------------------------------------------------*/ |
@@ -3604,8 +3596,8 @@ wv_82593_config(struct net_device * dev) | |||
3604 | cfblk.lin_prio = 0; /* conform to 802.3 backoff algoritm */ | 3596 | cfblk.lin_prio = 0; /* conform to 802.3 backoff algoritm */ |
3605 | cfblk.exp_prio = 5; /* conform to 802.3 backoff algoritm */ | 3597 | cfblk.exp_prio = 5; /* conform to 802.3 backoff algoritm */ |
3606 | cfblk.bof_met = 1; /* conform to 802.3 backoff algoritm */ | 3598 | cfblk.bof_met = 1; /* conform to 802.3 backoff algoritm */ |
3607 | cfblk.ifrm_spc = 0x20; /* 32 bit times interframe spacing */ | 3599 | cfblk.ifrm_spc = 0x20 >> 4; /* 32 bit times interframe spacing */ |
3608 | cfblk.slottim_low = 0x20; /* 32 bit times slot time */ | 3600 | cfblk.slottim_low = 0x20 >> 5; /* 32 bit times slot time */ |
3609 | cfblk.slottim_hi = 0x0; | 3601 | cfblk.slottim_hi = 0x0; |
3610 | cfblk.max_retr = 15; | 3602 | cfblk.max_retr = 15; |
3611 | cfblk.prmisc = ((lp->promiscuous) ? TRUE: FALSE); /* Promiscuous mode */ | 3603 | cfblk.prmisc = ((lp->promiscuous) ? TRUE: FALSE); /* Promiscuous mode */ |