diff options
108 files changed, 11145 insertions, 4303 deletions
diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/README.ipw2200 index acb30c5dcff3..4f2a40f1dbc6 100644 --- a/Documentation/networking/README.ipw2200 +++ b/Documentation/networking/README.ipw2200 | |||
@@ -14,8 +14,8 @@ Copyright (C) 2004-2006, Intel Corporation | |||
14 | 14 | ||
15 | README.ipw2200 | 15 | README.ipw2200 |
16 | 16 | ||
17 | Version: 1.0.8 | 17 | Version: 1.1.2 |
18 | Date : October 20, 2005 | 18 | Date : March 30, 2006 |
19 | 19 | ||
20 | 20 | ||
21 | Index | 21 | Index |
@@ -103,7 +103,7 @@ file. | |||
103 | 103 | ||
104 | 1.1. Overview of Features | 104 | 1.1. Overview of Features |
105 | ----------------------------------------------- | 105 | ----------------------------------------------- |
106 | The current release (1.0.8) supports the following features: | 106 | The current release (1.1.2) supports the following features: |
107 | 107 | ||
108 | + BSS mode (Infrastructure, Managed) | 108 | + BSS mode (Infrastructure, Managed) |
109 | + IBSS mode (Ad-Hoc) | 109 | + IBSS mode (Ad-Hoc) |
@@ -247,8 +247,8 @@ and can set the contents via echo. For example: | |||
247 | % cat /sys/bus/pci/drivers/ipw2200/debug_level | 247 | % cat /sys/bus/pci/drivers/ipw2200/debug_level |
248 | 248 | ||
249 | Will report the current debug level of the driver's logging subsystem | 249 | Will report the current debug level of the driver's logging subsystem |
250 | (only available if CONFIG_IPW_DEBUG was configured when the driver was | 250 | (only available if CONFIG_IPW2200_DEBUG was configured when the driver |
251 | built). | 251 | was built). |
252 | 252 | ||
253 | You can set the debug level via: | 253 | You can set the debug level via: |
254 | 254 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index c3c5842402df..1421f74b6009 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1425,6 +1425,8 @@ P: Jesse Brandeburg | |||
1425 | M: jesse.brandeburg@intel.com | 1425 | M: jesse.brandeburg@intel.com |
1426 | P: Jeff Kirsher | 1426 | P: Jeff Kirsher |
1427 | M: jeffrey.t.kirsher@intel.com | 1427 | M: jeffrey.t.kirsher@intel.com |
1428 | P: Auke Kok | ||
1429 | M: auke-jan.h.kok@intel.com | ||
1428 | W: http://sourceforge.net/projects/e1000/ | 1430 | W: http://sourceforge.net/projects/e1000/ |
1429 | S: Supported | 1431 | S: Supported |
1430 | 1432 | ||
@@ -1437,6 +1439,8 @@ P: Jesse Brandeburg | |||
1437 | M: jesse.brandeburg@intel.com | 1439 | M: jesse.brandeburg@intel.com |
1438 | P: Jeff Kirsher | 1440 | P: Jeff Kirsher |
1439 | M: jeffrey.t.kirsher@intel.com | 1441 | M: jeffrey.t.kirsher@intel.com |
1442 | P: Auke Kok | ||
1443 | M: auke-jan.h.kok@intel.com | ||
1440 | W: http://sourceforge.net/projects/e1000/ | 1444 | W: http://sourceforge.net/projects/e1000/ |
1441 | S: Supported | 1445 | S: Supported |
1442 | 1446 | ||
@@ -1449,6 +1453,8 @@ P: John Ronciak | |||
1449 | M: john.ronciak@intel.com | 1453 | M: john.ronciak@intel.com |
1450 | P: Jesse Brandeburg | 1454 | P: Jesse Brandeburg |
1451 | M: jesse.brandeburg@intel.com | 1455 | M: jesse.brandeburg@intel.com |
1456 | P: Auke Kok | ||
1457 | M: auke-jan.h.kok@intel.com | ||
1452 | W: http://sourceforge.net/projects/e1000/ | 1458 | W: http://sourceforge.net/projects/e1000/ |
1453 | S: Supported | 1459 | S: Supported |
1454 | 1460 | ||
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 066e22b01a94..46d8c01437e9 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -19,11 +19,11 @@ | |||
19 | See the file COPYING in this distribution for more information. | 19 | See the file COPYING in this distribution for more information. |
20 | 20 | ||
21 | Contributors: | 21 | Contributors: |
22 | 22 | ||
23 | Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br> | 23 | Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br> |
24 | PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br> | 24 | PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br> |
25 | LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br> | 25 | LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br> |
26 | 26 | ||
27 | TODO: | 27 | TODO: |
28 | * Test Tx checksumming thoroughly | 28 | * Test Tx checksumming thoroughly |
29 | * Implement dev->tx_timeout | 29 | * Implement dev->tx_timeout |
@@ -461,7 +461,7 @@ static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
461 | static inline void cp_set_rxbufsize (struct cp_private *cp) | 461 | static inline void cp_set_rxbufsize (struct cp_private *cp) |
462 | { | 462 | { |
463 | unsigned int mtu = cp->dev->mtu; | 463 | unsigned int mtu = cp->dev->mtu; |
464 | 464 | ||
465 | if (mtu > ETH_DATA_LEN) | 465 | if (mtu > ETH_DATA_LEN) |
466 | /* MTU + ethernet header + FCS + optional VLAN tag */ | 466 | /* MTU + ethernet header + FCS + optional VLAN tag */ |
467 | cp->rx_buf_sz = mtu + ETH_HLEN + 8; | 467 | cp->rx_buf_sz = mtu + ETH_HLEN + 8; |
@@ -510,7 +510,7 @@ static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, | |||
510 | static inline unsigned int cp_rx_csum_ok (u32 status) | 510 | static inline unsigned int cp_rx_csum_ok (u32 status) |
511 | { | 511 | { |
512 | unsigned int protocol = (status >> 16) & 0x3; | 512 | unsigned int protocol = (status >> 16) & 0x3; |
513 | 513 | ||
514 | if (likely((protocol == RxProtoTCP) && (!(status & TCPFail)))) | 514 | if (likely((protocol == RxProtoTCP) && (!(status & TCPFail)))) |
515 | return 1; | 515 | return 1; |
516 | else if ((protocol == RxProtoUDP) && (!(status & UDPFail))) | 516 | else if ((protocol == RxProtoUDP) && (!(status & UDPFail))) |
@@ -1061,7 +1061,7 @@ static void cp_init_hw (struct cp_private *cp) | |||
1061 | cpw8(Config3, PARMEnable); | 1061 | cpw8(Config3, PARMEnable); |
1062 | cp->wol_enabled = 0; | 1062 | cp->wol_enabled = 0; |
1063 | 1063 | ||
1064 | cpw8(Config5, cpr8(Config5) & PMEStatus); | 1064 | cpw8(Config5, cpr8(Config5) & PMEStatus); |
1065 | 1065 | ||
1066 | cpw32_f(HiTxRingAddr, 0); | 1066 | cpw32_f(HiTxRingAddr, 0); |
1067 | cpw32_f(HiTxRingAddr + 4, 0); | 1067 | cpw32_f(HiTxRingAddr + 4, 0); |
@@ -1351,7 +1351,7 @@ static void netdev_get_wol (struct cp_private *cp, | |||
1351 | WAKE_MCAST | WAKE_UCAST; | 1351 | WAKE_MCAST | WAKE_UCAST; |
1352 | /* We don't need to go on if WOL is disabled */ | 1352 | /* We don't need to go on if WOL is disabled */ |
1353 | if (!cp->wol_enabled) return; | 1353 | if (!cp->wol_enabled) return; |
1354 | 1354 | ||
1355 | options = cpr8 (Config3); | 1355 | options = cpr8 (Config3); |
1356 | if (options & LinkUp) wol->wolopts |= WAKE_PHY; | 1356 | if (options & LinkUp) wol->wolopts |= WAKE_PHY; |
1357 | if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC; | 1357 | if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC; |
@@ -1919,7 +1919,7 @@ static int cp_resume (struct pci_dev *pdev) | |||
1919 | mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE); | 1919 | mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE); |
1920 | 1920 | ||
1921 | spin_unlock_irqrestore (&cp->lock, flags); | 1921 | spin_unlock_irqrestore (&cp->lock, flags); |
1922 | 1922 | ||
1923 | return 0; | 1923 | return 0; |
1924 | } | 1924 | } |
1925 | #endif /* CONFIG_PM */ | 1925 | #endif /* CONFIG_PM */ |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index feae7832fc84..abd6261465f1 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -165,7 +165,7 @@ static int multicast_filter_limit = 32; | |||
165 | static int debug = -1; | 165 | static int debug = -1; |
166 | 166 | ||
167 | /* | 167 | /* |
168 | * Receive ring size | 168 | * Receive ring size |
169 | * Warning: 64K ring has hardware issues and may lock up. | 169 | * Warning: 64K ring has hardware issues and may lock up. |
170 | */ | 170 | */ |
171 | #if defined(CONFIG_SH_DREAMCAST) | 171 | #if defined(CONFIG_SH_DREAMCAST) |
@@ -257,7 +257,7 @@ static struct pci_device_id rtl8139_pci_tbl[] = { | |||
257 | {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, | 257 | {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, |
258 | {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, | 258 | {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, |
259 | {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, | 259 | {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, |
260 | {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, | 260 | {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, |
261 | 261 | ||
262 | #ifdef CONFIG_SH_SECUREEDGE5410 | 262 | #ifdef CONFIG_SH_SECUREEDGE5410 |
263 | /* Bogus 8139 silicon reports 8129 without external PROM :-( */ | 263 | /* Bogus 8139 silicon reports 8129 without external PROM :-( */ |
@@ -1824,7 +1824,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev, | |||
1824 | int tmp_work; | 1824 | int tmp_work; |
1825 | #endif | 1825 | #endif |
1826 | 1826 | ||
1827 | if (netif_msg_rx_err (tp)) | 1827 | if (netif_msg_rx_err (tp)) |
1828 | printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n", | 1828 | printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n", |
1829 | dev->name, rx_status); | 1829 | dev->name, rx_status); |
1830 | tp->stats.rx_errors++; | 1830 | tp->stats.rx_errors++; |
@@ -1944,7 +1944,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, | |||
1944 | RTL_R16 (RxBufAddr), | 1944 | RTL_R16 (RxBufAddr), |
1945 | RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd)); | 1945 | RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd)); |
1946 | 1946 | ||
1947 | while (netif_running(dev) && received < budget | 1947 | while (netif_running(dev) && received < budget |
1948 | && (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) { | 1948 | && (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) { |
1949 | u32 ring_offset = cur_rx % RX_BUF_LEN; | 1949 | u32 ring_offset = cur_rx % RX_BUF_LEN; |
1950 | u32 rx_status; | 1950 | u32 rx_status; |
@@ -2031,7 +2031,7 @@ no_early_rx: | |||
2031 | 2031 | ||
2032 | netif_receive_skb (skb); | 2032 | netif_receive_skb (skb); |
2033 | } else { | 2033 | } else { |
2034 | if (net_ratelimit()) | 2034 | if (net_ratelimit()) |
2035 | printk (KERN_WARNING | 2035 | printk (KERN_WARNING |
2036 | "%s: Memory squeeze, dropping packet.\n", | 2036 | "%s: Memory squeeze, dropping packet.\n", |
2037 | dev->name); | 2037 | dev->name); |
@@ -2158,13 +2158,13 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance, | |||
2158 | status = RTL_R16 (IntrStatus); | 2158 | status = RTL_R16 (IntrStatus); |
2159 | 2159 | ||
2160 | /* shared irq? */ | 2160 | /* shared irq? */ |
2161 | if (unlikely((status & rtl8139_intr_mask) == 0)) | 2161 | if (unlikely((status & rtl8139_intr_mask) == 0)) |
2162 | goto out; | 2162 | goto out; |
2163 | 2163 | ||
2164 | handled = 1; | 2164 | handled = 1; |
2165 | 2165 | ||
2166 | /* h/w no longer present (hotplug?) or major error, bail */ | 2166 | /* h/w no longer present (hotplug?) or major error, bail */ |
2167 | if (unlikely(status == 0xFFFF)) | 2167 | if (unlikely(status == 0xFFFF)) |
2168 | goto out; | 2168 | goto out; |
2169 | 2169 | ||
2170 | /* close possible race's with dev_close */ | 2170 | /* close possible race's with dev_close */ |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index bdaaad8f2123..f499a3bc629f 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -865,6 +865,22 @@ config DM9000 | |||
865 | <file:Documentation/networking/net-modules.txt>. The module will be | 865 | <file:Documentation/networking/net-modules.txt>. The module will be |
866 | called dm9000. | 866 | called dm9000. |
867 | 867 | ||
868 | config SMC911X | ||
869 | tristate "SMSC LAN911[5678] support" | ||
870 | select CRC32 | ||
871 | select MII | ||
872 | depends on NET_ETHERNET | ||
873 | help | ||
874 | This is a driver for SMSC's LAN911x series of Ethernet chipsets | ||
875 | including the new LAN9115, LAN9116, LAN9117, and LAN9118. | ||
876 | Say Y if you want it compiled into the kernel, | ||
877 | and read the Ethernet-HOWTO, available from | ||
878 | <http://www.linuxdoc.org/docs.html#howto>. | ||
879 | |||
880 | This driver is also available as a module. The module will be | ||
881 | called smc911x. If you want to compile it as a module, say M | ||
882 | here and read <file:Documentation/modules.txt> | ||
883 | |||
868 | config NET_VENDOR_RACAL | 884 | config NET_VENDOR_RACAL |
869 | bool "Racal-Interlan (Micom) NI cards" | 885 | bool "Racal-Interlan (Micom) NI cards" |
870 | depends on NET_ETHERNET && ISA | 886 | depends on NET_ETHERNET && ISA |
@@ -2311,6 +2327,23 @@ config S2IO_NAPI | |||
2311 | 2327 | ||
2312 | If in doubt, say N. | 2328 | If in doubt, say N. |
2313 | 2329 | ||
2330 | config MYRI10GE | ||
2331 | tristate "Myricom Myri-10G Ethernet support" | ||
2332 | depends on PCI | ||
2333 | select FW_LOADER | ||
2334 | select CRC32 | ||
2335 | ---help--- | ||
2336 | This driver supports Myricom Myri-10G Dual Protocol interface in | ||
2337 | Ethernet mode. If the eeprom on your board is not recent enough, | ||
2338 | you will need a newer firmware image. | ||
2339 | You may get this image or more information, at: | ||
2340 | |||
2341 | <http://www.myri.com/Myri-10G/> | ||
2342 | |||
2343 | To compile this driver as a module, choose M here and read | ||
2344 | <file:Documentation/networking/net-modules.txt>. The module | ||
2345 | will be called myri10ge. | ||
2346 | |||
2314 | endmenu | 2347 | endmenu |
2315 | 2348 | ||
2316 | source "drivers/net/tokenring/Kconfig" | 2349 | source "drivers/net/tokenring/Kconfig" |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index b90468aea077..1eced3287507 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -192,7 +192,9 @@ obj-$(CONFIG_R8169) += r8169.o | |||
192 | obj-$(CONFIG_AMD8111_ETH) += amd8111e.o | 192 | obj-$(CONFIG_AMD8111_ETH) += amd8111e.o |
193 | obj-$(CONFIG_IBMVETH) += ibmveth.o | 193 | obj-$(CONFIG_IBMVETH) += ibmveth.o |
194 | obj-$(CONFIG_S2IO) += s2io.o | 194 | obj-$(CONFIG_S2IO) += s2io.o |
195 | obj-$(CONFIG_MYRI10GE) += myri10ge/ | ||
195 | obj-$(CONFIG_SMC91X) += smc91x.o | 196 | obj-$(CONFIG_SMC91X) += smc91x.o |
197 | obj-$(CONFIG_SMC911X) += smc911x.o | ||
196 | obj-$(CONFIG_DM9000) += dm9000.o | 198 | obj-$(CONFIG_DM9000) += dm9000.o |
197 | obj-$(CONFIG_FEC_8XX) += fec_8xx/ | 199 | obj-$(CONFIG_FEC_8XX) += fec_8xx/ |
198 | 200 | ||
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 14dbad14afb6..e1fe960d71b3 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * | 2 | * |
3 | * Alchemy Au1x00 ethernet driver | 3 | * Alchemy Au1x00 ethernet driver |
4 | * | 4 | * |
5 | * Copyright 2001,2002,2003 MontaVista Software Inc. | 5 | * Copyright 2001-2003, 2006 MontaVista Software Inc. |
6 | * Copyright 2002 TimeSys Corp. | 6 | * Copyright 2002 TimeSys Corp. |
7 | * Added ethtool/mii-tool support, | 7 | * Added ethtool/mii-tool support, |
8 | * Copyright 2004 Matt Porter <mporter@kernel.crashing.org> | 8 | * Copyright 2004 Matt Porter <mporter@kernel.crashing.org> |
@@ -68,7 +68,7 @@ static int au1000_debug = 5; | |||
68 | static int au1000_debug = 3; | 68 | static int au1000_debug = 3; |
69 | #endif | 69 | #endif |
70 | 70 | ||
71 | #define DRV_NAME "au1000eth" | 71 | #define DRV_NAME "au1000_eth" |
72 | #define DRV_VERSION "1.5" | 72 | #define DRV_VERSION "1.5" |
73 | #define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>" | 73 | #define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>" |
74 | #define DRV_DESC "Au1xxx on-chip Ethernet driver" | 74 | #define DRV_DESC "Au1xxx on-chip Ethernet driver" |
@@ -80,7 +80,7 @@ MODULE_LICENSE("GPL"); | |||
80 | // prototypes | 80 | // prototypes |
81 | static void hard_stop(struct net_device *); | 81 | static void hard_stop(struct net_device *); |
82 | static void enable_rx_tx(struct net_device *dev); | 82 | static void enable_rx_tx(struct net_device *dev); |
83 | static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num); | 83 | static struct net_device * au1000_probe(int port_num); |
84 | static int au1000_init(struct net_device *); | 84 | static int au1000_init(struct net_device *); |
85 | static int au1000_open(struct net_device *); | 85 | static int au1000_open(struct net_device *); |
86 | static int au1000_close(struct net_device *); | 86 | static int au1000_close(struct net_device *); |
@@ -1160,12 +1160,27 @@ setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base) | |||
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | static struct { | 1162 | static struct { |
1163 | int port; | ||
1164 | u32 base_addr; | 1163 | u32 base_addr; |
1165 | u32 macen_addr; | 1164 | u32 macen_addr; |
1166 | int irq; | 1165 | int irq; |
1167 | struct net_device *dev; | 1166 | struct net_device *dev; |
1168 | } iflist[2]; | 1167 | } iflist[2] = { |
1168 | #ifdef CONFIG_SOC_AU1000 | ||
1169 | {AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT}, | ||
1170 | {AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT} | ||
1171 | #endif | ||
1172 | #ifdef CONFIG_SOC_AU1100 | ||
1173 | {AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT} | ||
1174 | #endif | ||
1175 | #ifdef CONFIG_SOC_AU1500 | ||
1176 | {AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT}, | ||
1177 | {AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT} | ||
1178 | #endif | ||
1179 | #ifdef CONFIG_SOC_AU1550 | ||
1180 | {AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT}, | ||
1181 | {AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT} | ||
1182 | #endif | ||
1183 | }; | ||
1169 | 1184 | ||
1170 | static int num_ifs; | 1185 | static int num_ifs; |
1171 | 1186 | ||
@@ -1176,58 +1191,14 @@ static int num_ifs; | |||
1176 | */ | 1191 | */ |
1177 | static int __init au1000_init_module(void) | 1192 | static int __init au1000_init_module(void) |
1178 | { | 1193 | { |
1179 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1180 | int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4); | 1194 | int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4); |
1181 | struct net_device *dev; | 1195 | struct net_device *dev; |
1182 | int i, found_one = 0; | 1196 | int i, found_one = 0; |
1183 | 1197 | ||
1184 | switch (c->cputype) { | 1198 | num_ifs = NUM_ETH_INTERFACES - ni; |
1185 | #ifdef CONFIG_SOC_AU1000 | 1199 | |
1186 | case CPU_AU1000: | ||
1187 | num_ifs = 2 - ni; | ||
1188 | iflist[0].base_addr = AU1000_ETH0_BASE; | ||
1189 | iflist[1].base_addr = AU1000_ETH1_BASE; | ||
1190 | iflist[0].macen_addr = AU1000_MAC0_ENABLE; | ||
1191 | iflist[1].macen_addr = AU1000_MAC1_ENABLE; | ||
1192 | iflist[0].irq = AU1000_MAC0_DMA_INT; | ||
1193 | iflist[1].irq = AU1000_MAC1_DMA_INT; | ||
1194 | break; | ||
1195 | #endif | ||
1196 | #ifdef CONFIG_SOC_AU1100 | ||
1197 | case CPU_AU1100: | ||
1198 | num_ifs = 1 - ni; | ||
1199 | iflist[0].base_addr = AU1100_ETH0_BASE; | ||
1200 | iflist[0].macen_addr = AU1100_MAC0_ENABLE; | ||
1201 | iflist[0].irq = AU1100_MAC0_DMA_INT; | ||
1202 | break; | ||
1203 | #endif | ||
1204 | #ifdef CONFIG_SOC_AU1500 | ||
1205 | case CPU_AU1500: | ||
1206 | num_ifs = 2 - ni; | ||
1207 | iflist[0].base_addr = AU1500_ETH0_BASE; | ||
1208 | iflist[1].base_addr = AU1500_ETH1_BASE; | ||
1209 | iflist[0].macen_addr = AU1500_MAC0_ENABLE; | ||
1210 | iflist[1].macen_addr = AU1500_MAC1_ENABLE; | ||
1211 | iflist[0].irq = AU1500_MAC0_DMA_INT; | ||
1212 | iflist[1].irq = AU1500_MAC1_DMA_INT; | ||
1213 | break; | ||
1214 | #endif | ||
1215 | #ifdef CONFIG_SOC_AU1550 | ||
1216 | case CPU_AU1550: | ||
1217 | num_ifs = 2 - ni; | ||
1218 | iflist[0].base_addr = AU1550_ETH0_BASE; | ||
1219 | iflist[1].base_addr = AU1550_ETH1_BASE; | ||
1220 | iflist[0].macen_addr = AU1550_MAC0_ENABLE; | ||
1221 | iflist[1].macen_addr = AU1550_MAC1_ENABLE; | ||
1222 | iflist[0].irq = AU1550_MAC0_DMA_INT; | ||
1223 | iflist[1].irq = AU1550_MAC1_DMA_INT; | ||
1224 | break; | ||
1225 | #endif | ||
1226 | default: | ||
1227 | num_ifs = 0; | ||
1228 | } | ||
1229 | for(i = 0; i < num_ifs; i++) { | 1200 | for(i = 0; i < num_ifs; i++) { |
1230 | dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i); | 1201 | dev = au1000_probe(i); |
1231 | iflist[i].dev = dev; | 1202 | iflist[i].dev = dev; |
1232 | if (dev) | 1203 | if (dev) |
1233 | found_one++; | 1204 | found_one++; |
@@ -1436,8 +1407,7 @@ static struct ethtool_ops au1000_ethtool_ops = { | |||
1436 | .get_link = au1000_get_link | 1407 | .get_link = au1000_get_link |
1437 | }; | 1408 | }; |
1438 | 1409 | ||
1439 | static struct net_device * | 1410 | static struct net_device * au1000_probe(int port_num) |
1440 | au1000_probe(u32 ioaddr, int irq, int port_num) | ||
1441 | { | 1411 | { |
1442 | static unsigned version_printed = 0; | 1412 | static unsigned version_printed = 0; |
1443 | struct au1000_private *aup = NULL; | 1413 | struct au1000_private *aup = NULL; |
@@ -1445,94 +1415,95 @@ au1000_probe(u32 ioaddr, int irq, int port_num) | |||
1445 | db_dest_t *pDB, *pDBfree; | 1415 | db_dest_t *pDB, *pDBfree; |
1446 | char *pmac, *argptr; | 1416 | char *pmac, *argptr; |
1447 | char ethaddr[6]; | 1417 | char ethaddr[6]; |
1448 | int i, err; | 1418 | int irq, i, err; |
1419 | u32 base, macen; | ||
1420 | |||
1421 | if (port_num >= NUM_ETH_INTERFACES) | ||
1422 | return NULL; | ||
1449 | 1423 | ||
1450 | if (!request_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE, "Au1x00 ENET")) | 1424 | base = CPHYSADDR(iflist[port_num].base_addr ); |
1425 | macen = CPHYSADDR(iflist[port_num].macen_addr); | ||
1426 | irq = iflist[port_num].irq; | ||
1427 | |||
1428 | if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") || | ||
1429 | !request_mem_region(macen, 4, "Au1x00 ENET")) | ||
1451 | return NULL; | 1430 | return NULL; |
1452 | 1431 | ||
1453 | if (version_printed++ == 0) | 1432 | if (version_printed++ == 0) |
1454 | printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); | 1433 | printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); |
1455 | 1434 | ||
1456 | dev = alloc_etherdev(sizeof(struct au1000_private)); | 1435 | dev = alloc_etherdev(sizeof(struct au1000_private)); |
1457 | if (!dev) { | 1436 | if (!dev) { |
1458 | printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n"); | 1437 | printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME); |
1459 | return NULL; | 1438 | return NULL; |
1460 | } | 1439 | } |
1461 | 1440 | ||
1462 | if ((err = register_netdev(dev))) { | 1441 | if ((err = register_netdev(dev)) != 0) { |
1463 | printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n", | 1442 | printk(KERN_ERR "%s: Cannot register net device, error %d\n", |
1464 | err); | 1443 | DRV_NAME, err); |
1465 | free_netdev(dev); | 1444 | free_netdev(dev); |
1466 | return NULL; | 1445 | return NULL; |
1467 | } | 1446 | } |
1468 | 1447 | ||
1469 | printk("%s: Au1x Ethernet found at 0x%x, irq %d\n", | 1448 | printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n", |
1470 | dev->name, ioaddr, irq); | 1449 | dev->name, base, irq); |
1471 | 1450 | ||
1472 | aup = dev->priv; | 1451 | aup = dev->priv; |
1473 | 1452 | ||
1474 | /* Allocate the data buffers */ | 1453 | /* Allocate the data buffers */ |
1475 | /* Snooping works fine with eth on all au1xxx */ | 1454 | /* Snooping works fine with eth on all au1xxx */ |
1476 | aup->vaddr = (u32)dma_alloc_noncoherent(NULL, | 1455 | aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * |
1477 | MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS), | 1456 | (NUM_TX_BUFFS + NUM_RX_BUFFS), |
1478 | &aup->dma_addr, | 1457 | &aup->dma_addr, 0); |
1479 | 0); | ||
1480 | if (!aup->vaddr) { | 1458 | if (!aup->vaddr) { |
1481 | free_netdev(dev); | 1459 | free_netdev(dev); |
1482 | release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE); | 1460 | release_mem_region( base, MAC_IOSIZE); |
1461 | release_mem_region(macen, 4); | ||
1483 | return NULL; | 1462 | return NULL; |
1484 | } | 1463 | } |
1485 | 1464 | ||
1486 | /* aup->mac is the base address of the MAC's registers */ | 1465 | /* aup->mac is the base address of the MAC's registers */ |
1487 | aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr); | 1466 | aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr; |
1467 | |||
1488 | /* Setup some variables for quick register address access */ | 1468 | /* Setup some variables for quick register address access */ |
1489 | if (ioaddr == iflist[0].base_addr) | 1469 | aup->enable = (volatile u32 *)iflist[port_num].macen_addr; |
1490 | { | 1470 | aup->mac_id = port_num; |
1491 | /* check env variables first */ | 1471 | au_macs[port_num] = aup; |
1492 | if (!get_ethernet_addr(ethaddr)) { | 1472 | |
1473 | if (port_num == 0) { | ||
1474 | /* Check the environment variables first */ | ||
1475 | if (get_ethernet_addr(ethaddr) == 0) | ||
1493 | memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); | 1476 | memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); |
1494 | } else { | 1477 | else { |
1495 | /* Check command line */ | 1478 | /* Check command line */ |
1496 | argptr = prom_getcmdline(); | 1479 | argptr = prom_getcmdline(); |
1497 | if ((pmac = strstr(argptr, "ethaddr=")) == NULL) { | 1480 | if ((pmac = strstr(argptr, "ethaddr=")) == NULL) |
1498 | printk(KERN_INFO "%s: No mac address found\n", | 1481 | printk(KERN_INFO "%s: No MAC address found\n", |
1499 | dev->name); | 1482 | dev->name); |
1500 | /* use the hard coded mac addresses */ | 1483 | /* Use the hard coded MAC addresses */ |
1501 | } else { | 1484 | else { |
1502 | str2eaddr(ethaddr, pmac + strlen("ethaddr=")); | 1485 | str2eaddr(ethaddr, pmac + strlen("ethaddr=")); |
1503 | memcpy(au1000_mac_addr, ethaddr, | 1486 | memcpy(au1000_mac_addr, ethaddr, |
1504 | sizeof(au1000_mac_addr)); | 1487 | sizeof(au1000_mac_addr)); |
1505 | } | 1488 | } |
1506 | } | 1489 | } |
1507 | aup->enable = (volatile u32 *) | 1490 | |
1508 | ((unsigned long)iflist[0].macen_addr); | ||
1509 | memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr)); | ||
1510 | setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); | 1491 | setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); |
1511 | aup->mac_id = 0; | 1492 | } else if (port_num == 1) |
1512 | au_macs[0] = aup; | ||
1513 | } | ||
1514 | else | ||
1515 | if (ioaddr == iflist[1].base_addr) | ||
1516 | { | ||
1517 | aup->enable = (volatile u32 *) | ||
1518 | ((unsigned long)iflist[1].macen_addr); | ||
1519 | memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr)); | ||
1520 | dev->dev_addr[4] += 0x10; | ||
1521 | setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); | 1493 | setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); |
1522 | aup->mac_id = 1; | ||
1523 | au_macs[1] = aup; | ||
1524 | } | ||
1525 | else | ||
1526 | { | ||
1527 | printk(KERN_ERR "%s: bad ioaddr\n", dev->name); | ||
1528 | } | ||
1529 | 1494 | ||
1530 | /* bring the device out of reset, otherwise probing the mii | 1495 | /* |
1531 | * will hang */ | 1496 | * Assign to the Ethernet ports two consecutive MAC addresses |
1497 | * to match those that are printed on their stickers | ||
1498 | */ | ||
1499 | memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr)); | ||
1500 | dev->dev_addr[5] += port_num; | ||
1501 | |||
1502 | /* Bring the device out of reset, otherwise probing the MII will hang */ | ||
1532 | *aup->enable = MAC_EN_CLOCK_ENABLE; | 1503 | *aup->enable = MAC_EN_CLOCK_ENABLE; |
1533 | au_sync_delay(2); | 1504 | au_sync_delay(2); |
1534 | *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 | | 1505 | *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 | |
1535 | MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE; | 1506 | MAC_EN_CLOCK_ENABLE; |
1536 | au_sync_delay(2); | 1507 | au_sync_delay(2); |
1537 | 1508 | ||
1538 | aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL); | 1509 | aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL); |
@@ -1581,7 +1552,7 @@ au1000_probe(u32 ioaddr, int irq, int port_num) | |||
1581 | } | 1552 | } |
1582 | 1553 | ||
1583 | spin_lock_init(&aup->lock); | 1554 | spin_lock_init(&aup->lock); |
1584 | dev->base_addr = ioaddr; | 1555 | dev->base_addr = base; |
1585 | dev->irq = irq; | 1556 | dev->irq = irq; |
1586 | dev->open = au1000_open; | 1557 | dev->open = au1000_open; |
1587 | dev->hard_start_xmit = au1000_tx; | 1558 | dev->hard_start_xmit = au1000_tx; |
@@ -1615,13 +1586,12 @@ err_out: | |||
1615 | if (aup->tx_db_inuse[i]) | 1586 | if (aup->tx_db_inuse[i]) |
1616 | ReleaseDB(aup, aup->tx_db_inuse[i]); | 1587 | ReleaseDB(aup, aup->tx_db_inuse[i]); |
1617 | } | 1588 | } |
1618 | dma_free_noncoherent(NULL, | 1589 | dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), |
1619 | MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS), | 1590 | (void *)aup->vaddr, aup->dma_addr); |
1620 | (void *)aup->vaddr, | ||
1621 | aup->dma_addr); | ||
1622 | unregister_netdev(dev); | 1591 | unregister_netdev(dev); |
1623 | free_netdev(dev); | 1592 | free_netdev(dev); |
1624 | release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE); | 1593 | release_mem_region( base, MAC_IOSIZE); |
1594 | release_mem_region(macen, 4); | ||
1625 | return NULL; | 1595 | return NULL; |
1626 | } | 1596 | } |
1627 | 1597 | ||
@@ -1806,20 +1776,18 @@ static void __exit au1000_cleanup_module(void) | |||
1806 | aup = (struct au1000_private *) dev->priv; | 1776 | aup = (struct au1000_private *) dev->priv; |
1807 | unregister_netdev(dev); | 1777 | unregister_netdev(dev); |
1808 | kfree(aup->mii); | 1778 | kfree(aup->mii); |
1809 | for (j = 0; j < NUM_RX_DMA; j++) { | 1779 | for (j = 0; j < NUM_RX_DMA; j++) |
1810 | if (aup->rx_db_inuse[j]) | 1780 | if (aup->rx_db_inuse[j]) |
1811 | ReleaseDB(aup, aup->rx_db_inuse[j]); | 1781 | ReleaseDB(aup, aup->rx_db_inuse[j]); |
1812 | } | 1782 | for (j = 0; j < NUM_TX_DMA; j++) |
1813 | for (j = 0; j < NUM_TX_DMA; j++) { | ||
1814 | if (aup->tx_db_inuse[j]) | 1783 | if (aup->tx_db_inuse[j]) |
1815 | ReleaseDB(aup, aup->tx_db_inuse[j]); | 1784 | ReleaseDB(aup, aup->tx_db_inuse[j]); |
1816 | } | 1785 | dma_free_noncoherent(NULL, MAX_BUF_SIZE * |
1817 | dma_free_noncoherent(NULL, | 1786 | (NUM_TX_BUFFS + NUM_RX_BUFFS), |
1818 | MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS), | 1787 | (void *)aup->vaddr, aup->dma_addr); |
1819 | (void *)aup->vaddr, | 1788 | release_mem_region(dev->base_addr, MAC_IOSIZE); |
1820 | aup->dma_addr); | 1789 | release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4); |
1821 | free_netdev(dev); | 1790 | free_netdev(dev); |
1822 | release_mem_region(CPHYSADDR(iflist[i].base_addr), MAC_IOSIZE); | ||
1823 | } | 1791 | } |
1824 | } | 1792 | } |
1825 | } | 1793 | } |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index ac48f7543500..39f36aa05aa8 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -4877,7 +4877,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
4877 | const struct pci_device_id *ent) | 4877 | const struct pci_device_id *ent) |
4878 | { | 4878 | { |
4879 | static int cas_version_printed = 0; | 4879 | static int cas_version_printed = 0; |
4880 | unsigned long casreg_base, casreg_len; | 4880 | unsigned long casreg_len; |
4881 | struct net_device *dev; | 4881 | struct net_device *dev; |
4882 | struct cas *cp; | 4882 | struct cas *cp; |
4883 | int i, err, pci_using_dac; | 4883 | int i, err, pci_using_dac; |
@@ -4972,7 +4972,6 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
4972 | pci_using_dac = 0; | 4972 | pci_using_dac = 0; |
4973 | } | 4973 | } |
4974 | 4974 | ||
4975 | casreg_base = pci_resource_start(pdev, 0); | ||
4976 | casreg_len = pci_resource_len(pdev, 0); | 4975 | casreg_len = pci_resource_len(pdev, 0); |
4977 | 4976 | ||
4978 | cp = netdev_priv(dev); | 4977 | cp = netdev_priv(dev); |
@@ -5024,7 +5023,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
5024 | cp->timer_ticks = 0; | 5023 | cp->timer_ticks = 0; |
5025 | 5024 | ||
5026 | /* give us access to cassini registers */ | 5025 | /* give us access to cassini registers */ |
5027 | cp->regs = ioremap(casreg_base, casreg_len); | 5026 | cp->regs = pci_iomap(pdev, 0, casreg_len); |
5028 | if (cp->regs == 0UL) { | 5027 | if (cp->regs == 0UL) { |
5029 | printk(KERN_ERR PFX "Cannot map device registers, " | 5028 | printk(KERN_ERR PFX "Cannot map device registers, " |
5030 | "aborting.\n"); | 5029 | "aborting.\n"); |
@@ -5123,7 +5122,7 @@ err_out_iounmap: | |||
5123 | cas_shutdown(cp); | 5122 | cas_shutdown(cp); |
5124 | mutex_unlock(&cp->pm_mutex); | 5123 | mutex_unlock(&cp->pm_mutex); |
5125 | 5124 | ||
5126 | iounmap(cp->regs); | 5125 | pci_iounmap(pdev, cp->regs); |
5127 | 5126 | ||
5128 | 5127 | ||
5129 | err_out_free_res: | 5128 | err_out_free_res: |
@@ -5171,7 +5170,7 @@ static void __devexit cas_remove_one(struct pci_dev *pdev) | |||
5171 | #endif | 5170 | #endif |
5172 | pci_free_consistent(pdev, sizeof(struct cas_init_block), | 5171 | pci_free_consistent(pdev, sizeof(struct cas_init_block), |
5173 | cp->init_block, cp->block_dvma); | 5172 | cp->init_block, cp->block_dvma); |
5174 | iounmap(cp->regs); | 5173 | pci_iounmap(pdev, cp->regs); |
5175 | free_netdev(dev); | 5174 | free_netdev(dev); |
5176 | pci_release_regions(pdev); | 5175 | pci_release_regions(pdev); |
5177 | pci_disable_device(pdev); | 5176 | pci_disable_device(pdev); |
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile index ca9f89552da3..5dea2b7dea4d 100644 --- a/drivers/net/e1000/Makefile +++ b/drivers/net/e1000/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | ################################################################################ | 1 | ################################################################################ |
2 | # | 2 | # |
3 | # | 3 | # |
4 | # Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved. | 4 | # Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | # | 5 | # |
6 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
7 | # under the terms of the GNU General Public License as published by the Free | 7 | # under the terms of the GNU General Public License as published by the Free |
@@ -22,6 +22,7 @@ | |||
22 | # | 22 | # |
23 | # Contact Information: | 23 | # Contact Information: |
24 | # Linux NICS <linux.nics@intel.com> | 24 | # Linux NICS <linux.nics@intel.com> |
25 | # e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | # Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 26 | # Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 | # | 27 | # |
27 | ################################################################################ | 28 | ################################################################################ |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 281de41d030a..2bc34fbfa69c 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | Contact Information: | 23 | Contact Information: |
24 | Linux NICS <linux.nics@intel.com> | 24 | Linux NICS <linux.nics@intel.com> |
25 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 26 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 | 27 | ||
27 | *******************************************************************************/ | 28 | *******************************************************************************/ |
@@ -114,6 +115,8 @@ struct e1000_adapter; | |||
114 | /* Supported Rx Buffer Sizes */ | 115 | /* Supported Rx Buffer Sizes */ |
115 | #define E1000_RXBUFFER_128 128 /* Used for packet split */ | 116 | #define E1000_RXBUFFER_128 128 /* Used for packet split */ |
116 | #define E1000_RXBUFFER_256 256 /* Used for packet split */ | 117 | #define E1000_RXBUFFER_256 256 /* Used for packet split */ |
118 | #define E1000_RXBUFFER_512 512 | ||
119 | #define E1000_RXBUFFER_1024 1024 | ||
117 | #define E1000_RXBUFFER_2048 2048 | 120 | #define E1000_RXBUFFER_2048 2048 |
118 | #define E1000_RXBUFFER_4096 4096 | 121 | #define E1000_RXBUFFER_4096 4096 |
119 | #define E1000_RXBUFFER_8192 8192 | 122 | #define E1000_RXBUFFER_8192 8192 |
@@ -334,7 +337,6 @@ struct e1000_adapter { | |||
334 | boolean_t have_msi; | 337 | boolean_t have_msi; |
335 | #endif | 338 | #endif |
336 | /* to not mess up cache alignment, always add to the bottom */ | 339 | /* to not mess up cache alignment, always add to the bottom */ |
337 | boolean_t txb2b; | ||
338 | #ifdef NETIF_F_TSO | 340 | #ifdef NETIF_F_TSO |
339 | boolean_t tso_force; | 341 | boolean_t tso_force; |
340 | #endif | 342 | #endif |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index ecccca35c6f4..cfdf0b24ffc1 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | Contact Information: | 23 | Contact Information: |
24 | Linux NICS <linux.nics@intel.com> | 24 | Linux NICS <linux.nics@intel.com> |
25 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 26 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 | 27 | ||
27 | *******************************************************************************/ | 28 | *******************************************************************************/ |
@@ -864,15 +865,15 @@ static int | |||
864 | e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | 865 | e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) |
865 | { | 866 | { |
866 | struct net_device *netdev = adapter->netdev; | 867 | struct net_device *netdev = adapter->netdev; |
867 | uint32_t mask, i=0, shared_int = TRUE; | 868 | uint32_t mask, i=0, shared_int = TRUE; |
868 | uint32_t irq = adapter->pdev->irq; | 869 | uint32_t irq = adapter->pdev->irq; |
869 | 870 | ||
870 | *data = 0; | 871 | *data = 0; |
871 | 872 | ||
872 | /* Hook up test interrupt handler just for this test */ | 873 | /* Hook up test interrupt handler just for this test */ |
873 | if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { | 874 | if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { |
874 | shared_int = FALSE; | 875 | shared_int = FALSE; |
875 | } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ, | 876 | } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ, |
876 | netdev->name, netdev)){ | 877 | netdev->name, netdev)){ |
877 | *data = 1; | 878 | *data = 1; |
878 | return -1; | 879 | return -1; |
@@ -888,22 +889,22 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | |||
888 | /* Interrupt to test */ | 889 | /* Interrupt to test */ |
889 | mask = 1 << i; | 890 | mask = 1 << i; |
890 | 891 | ||
891 | if (!shared_int) { | 892 | if (!shared_int) { |
892 | /* Disable the interrupt to be reported in | 893 | /* Disable the interrupt to be reported in |
893 | * the cause register and then force the same | 894 | * the cause register and then force the same |
894 | * interrupt and see if one gets posted. If | 895 | * interrupt and see if one gets posted. If |
895 | * an interrupt was posted to the bus, the | 896 | * an interrupt was posted to the bus, the |
896 | * test failed. | 897 | * test failed. |
897 | */ | 898 | */ |
898 | adapter->test_icr = 0; | 899 | adapter->test_icr = 0; |
899 | E1000_WRITE_REG(&adapter->hw, IMC, mask); | 900 | E1000_WRITE_REG(&adapter->hw, IMC, mask); |
900 | E1000_WRITE_REG(&adapter->hw, ICS, mask); | 901 | E1000_WRITE_REG(&adapter->hw, ICS, mask); |
901 | msec_delay(10); | 902 | msec_delay(10); |
902 | 903 | ||
903 | if (adapter->test_icr & mask) { | 904 | if (adapter->test_icr & mask) { |
904 | *data = 3; | 905 | *data = 3; |
905 | break; | 906 | break; |
906 | } | 907 | } |
907 | } | 908 | } |
908 | 909 | ||
909 | /* Enable the interrupt to be reported in | 910 | /* Enable the interrupt to be reported in |
@@ -922,7 +923,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | |||
922 | break; | 923 | break; |
923 | } | 924 | } |
924 | 925 | ||
925 | if (!shared_int) { | 926 | if (!shared_int) { |
926 | /* Disable the other interrupts to be reported in | 927 | /* Disable the other interrupts to be reported in |
927 | * the cause register and then force the other | 928 | * the cause register and then force the other |
928 | * interrupts and see if any get posted. If | 929 | * interrupts and see if any get posted. If |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 523c2c9fc0ac..3959039b16ec 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | Contact Information: | 23 | Contact Information: |
24 | Linux NICS <linux.nics@intel.com> | 24 | Linux NICS <linux.nics@intel.com> |
25 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 26 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 | 27 | ||
27 | *******************************************************************************/ | 28 | *******************************************************************************/ |
@@ -764,7 +765,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
764 | } | 765 | } |
765 | 766 | ||
766 | if (hw->mac_type == e1000_82573) { | 767 | if (hw->mac_type == e1000_82573) { |
767 | e1000_enable_tx_pkt_filtering(hw); | 768 | e1000_enable_tx_pkt_filtering(hw); |
768 | } | 769 | } |
769 | 770 | ||
770 | switch (hw->mac_type) { | 771 | switch (hw->mac_type) { |
@@ -860,7 +861,7 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw) | |||
860 | 861 | ||
861 | if(eeprom_data != EEPROM_RESERVED_WORD) { | 862 | if(eeprom_data != EEPROM_RESERVED_WORD) { |
862 | /* Adjust SERDES output amplitude only. */ | 863 | /* Adjust SERDES output amplitude only. */ |
863 | eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; | 864 | eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; |
864 | ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); | 865 | ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); |
865 | if(ret_val) | 866 | if(ret_val) |
866 | return ret_val; | 867 | return ret_val; |
@@ -1227,7 +1228,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw) | |||
1227 | 1228 | ||
1228 | if (hw->phy_reset_disable) | 1229 | if (hw->phy_reset_disable) |
1229 | return E1000_SUCCESS; | 1230 | return E1000_SUCCESS; |
1230 | 1231 | ||
1231 | ret_val = e1000_phy_reset(hw); | 1232 | ret_val = e1000_phy_reset(hw); |
1232 | if (ret_val) { | 1233 | if (ret_val) { |
1233 | DEBUGOUT("Error Resetting the PHY\n"); | 1234 | DEBUGOUT("Error Resetting the PHY\n"); |
@@ -1369,7 +1370,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw) | |||
1369 | DEBUGFUNC("e1000_copper_link_ggp_setup"); | 1370 | DEBUGFUNC("e1000_copper_link_ggp_setup"); |
1370 | 1371 | ||
1371 | if(!hw->phy_reset_disable) { | 1372 | if(!hw->phy_reset_disable) { |
1372 | 1373 | ||
1373 | /* Enable CRS on TX for half-duplex operation. */ | 1374 | /* Enable CRS on TX for half-duplex operation. */ |
1374 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, | 1375 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, |
1375 | &phy_data); | 1376 | &phy_data); |
@@ -1518,7 +1519,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw) | |||
1518 | 1519 | ||
1519 | if(hw->phy_reset_disable) | 1520 | if(hw->phy_reset_disable) |
1520 | return E1000_SUCCESS; | 1521 | return E1000_SUCCESS; |
1521 | 1522 | ||
1522 | /* Enable CRS on TX. This must be set for half-duplex operation. */ | 1523 | /* Enable CRS on TX. This must be set for half-duplex operation. */ |
1523 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | 1524 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); |
1524 | if(ret_val) | 1525 | if(ret_val) |
@@ -1664,7 +1665,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1664 | * collision distance in the Transmit Control Register. | 1665 | * collision distance in the Transmit Control Register. |
1665 | * 2) Set up flow control on the MAC to that established with | 1666 | * 2) Set up flow control on the MAC to that established with |
1666 | * the link partner. | 1667 | * the link partner. |
1667 | * 3) Config DSP to improve Gigabit link quality for some PHY revisions. | 1668 | * 3) Config DSP to improve Gigabit link quality for some PHY revisions. |
1668 | * | 1669 | * |
1669 | * hw - Struct containing variables accessed by shared code | 1670 | * hw - Struct containing variables accessed by shared code |
1670 | ******************************************************************************/ | 1671 | ******************************************************************************/ |
@@ -1673,7 +1674,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw) | |||
1673 | { | 1674 | { |
1674 | int32_t ret_val; | 1675 | int32_t ret_val; |
1675 | DEBUGFUNC("e1000_copper_link_postconfig"); | 1676 | DEBUGFUNC("e1000_copper_link_postconfig"); |
1676 | 1677 | ||
1677 | if(hw->mac_type >= e1000_82544) { | 1678 | if(hw->mac_type >= e1000_82544) { |
1678 | e1000_config_collision_dist(hw); | 1679 | e1000_config_collision_dist(hw); |
1679 | } else { | 1680 | } else { |
@@ -1697,7 +1698,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw) | |||
1697 | return ret_val; | 1698 | return ret_val; |
1698 | } | 1699 | } |
1699 | } | 1700 | } |
1700 | 1701 | ||
1701 | return E1000_SUCCESS; | 1702 | return E1000_SUCCESS; |
1702 | } | 1703 | } |
1703 | 1704 | ||
@@ -1753,11 +1754,11 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1753 | } | 1754 | } |
1754 | 1755 | ||
1755 | if(hw->autoneg) { | 1756 | if(hw->autoneg) { |
1756 | /* Setup autoneg and flow control advertisement | 1757 | /* Setup autoneg and flow control advertisement |
1757 | * and perform autonegotiation */ | 1758 | * and perform autonegotiation */ |
1758 | ret_val = e1000_copper_link_autoneg(hw); | 1759 | ret_val = e1000_copper_link_autoneg(hw); |
1759 | if(ret_val) | 1760 | if(ret_val) |
1760 | return ret_val; | 1761 | return ret_val; |
1761 | } else { | 1762 | } else { |
1762 | /* PHY will be set to 10H, 10F, 100H,or 100F | 1763 | /* PHY will be set to 10H, 10F, 100H,or 100F |
1763 | * depending on value from forced_speed_duplex. */ | 1764 | * depending on value from forced_speed_duplex. */ |
@@ -1785,7 +1786,7 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1785 | ret_val = e1000_copper_link_postconfig(hw); | 1786 | ret_val = e1000_copper_link_postconfig(hw); |
1786 | if(ret_val) | 1787 | if(ret_val) |
1787 | return ret_val; | 1788 | return ret_val; |
1788 | 1789 | ||
1789 | DEBUGOUT("Valid link established!!!\n"); | 1790 | DEBUGOUT("Valid link established!!!\n"); |
1790 | return E1000_SUCCESS; | 1791 | return E1000_SUCCESS; |
1791 | } | 1792 | } |
@@ -1983,7 +1984,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1983 | 1984 | ||
1984 | DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); | 1985 | DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); |
1985 | 1986 | ||
1986 | ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); | 1987 | ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); |
1987 | if(ret_val) | 1988 | if(ret_val) |
1988 | return ret_val; | 1989 | return ret_val; |
1989 | 1990 | ||
@@ -2272,7 +2273,7 @@ e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
2272 | 2273 | ||
2273 | DEBUGFUNC("e1000_config_mac_to_phy"); | 2274 | DEBUGFUNC("e1000_config_mac_to_phy"); |
2274 | 2275 | ||
2275 | /* 82544 or newer MAC, Auto Speed Detection takes care of | 2276 | /* 82544 or newer MAC, Auto Speed Detection takes care of |
2276 | * MAC speed/duplex configuration.*/ | 2277 | * MAC speed/duplex configuration.*/ |
2277 | if (hw->mac_type >= e1000_82544) | 2278 | if (hw->mac_type >= e1000_82544) |
2278 | return E1000_SUCCESS; | 2279 | return E1000_SUCCESS; |
@@ -2291,9 +2292,9 @@ e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
2291 | if(ret_val) | 2292 | if(ret_val) |
2292 | return ret_val; | 2293 | return ret_val; |
2293 | 2294 | ||
2294 | if(phy_data & M88E1000_PSSR_DPLX) | 2295 | if(phy_data & M88E1000_PSSR_DPLX) |
2295 | ctrl |= E1000_CTRL_FD; | 2296 | ctrl |= E1000_CTRL_FD; |
2296 | else | 2297 | else |
2297 | ctrl &= ~E1000_CTRL_FD; | 2298 | ctrl &= ~E1000_CTRL_FD; |
2298 | 2299 | ||
2299 | e1000_config_collision_dist(hw); | 2300 | e1000_config_collision_dist(hw); |
@@ -2492,10 +2493,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2492 | */ | 2493 | */ |
2493 | if(hw->original_fc == e1000_fc_full) { | 2494 | if(hw->original_fc == e1000_fc_full) { |
2494 | hw->fc = e1000_fc_full; | 2495 | hw->fc = e1000_fc_full; |
2495 | DEBUGOUT("Flow Control = FULL.\r\n"); | 2496 | DEBUGOUT("Flow Control = FULL.\n"); |
2496 | } else { | 2497 | } else { |
2497 | hw->fc = e1000_fc_rx_pause; | 2498 | hw->fc = e1000_fc_rx_pause; |
2498 | DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n"); | 2499 | DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); |
2499 | } | 2500 | } |
2500 | } | 2501 | } |
2501 | /* For receiving PAUSE frames ONLY. | 2502 | /* For receiving PAUSE frames ONLY. |
@@ -2511,7 +2512,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2511 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 2512 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
2512 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 2513 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
2513 | hw->fc = e1000_fc_tx_pause; | 2514 | hw->fc = e1000_fc_tx_pause; |
2514 | DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n"); | 2515 | DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); |
2515 | } | 2516 | } |
2516 | /* For transmitting PAUSE frames ONLY. | 2517 | /* For transmitting PAUSE frames ONLY. |
2517 | * | 2518 | * |
@@ -2526,7 +2527,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2526 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 2527 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
2527 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 2528 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
2528 | hw->fc = e1000_fc_rx_pause; | 2529 | hw->fc = e1000_fc_rx_pause; |
2529 | DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n"); | 2530 | DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); |
2530 | } | 2531 | } |
2531 | /* Per the IEEE spec, at this point flow control should be | 2532 | /* Per the IEEE spec, at this point flow control should be |
2532 | * disabled. However, we want to consider that we could | 2533 | * disabled. However, we want to consider that we could |
@@ -2552,10 +2553,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2552 | hw->original_fc == e1000_fc_tx_pause) || | 2553 | hw->original_fc == e1000_fc_tx_pause) || |
2553 | hw->fc_strict_ieee) { | 2554 | hw->fc_strict_ieee) { |
2554 | hw->fc = e1000_fc_none; | 2555 | hw->fc = e1000_fc_none; |
2555 | DEBUGOUT("Flow Control = NONE.\r\n"); | 2556 | DEBUGOUT("Flow Control = NONE.\n"); |
2556 | } else { | 2557 | } else { |
2557 | hw->fc = e1000_fc_rx_pause; | 2558 | hw->fc = e1000_fc_rx_pause; |
2558 | DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n"); | 2559 | DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); |
2559 | } | 2560 | } |
2560 | 2561 | ||
2561 | /* Now we need to do one last check... If we auto- | 2562 | /* Now we need to do one last check... If we auto- |
@@ -2580,7 +2581,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2580 | return ret_val; | 2581 | return ret_val; |
2581 | } | 2582 | } |
2582 | } else { | 2583 | } else { |
2583 | DEBUGOUT("Copper PHY and Auto Neg has not completed.\r\n"); | 2584 | DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); |
2584 | } | 2585 | } |
2585 | } | 2586 | } |
2586 | return E1000_SUCCESS; | 2587 | return E1000_SUCCESS; |
@@ -2763,7 +2764,7 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
2763 | hw->autoneg_failed = 1; | 2764 | hw->autoneg_failed = 1; |
2764 | return 0; | 2765 | return 0; |
2765 | } | 2766 | } |
2766 | DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\r\n"); | 2767 | DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); |
2767 | 2768 | ||
2768 | /* Disable auto-negotiation in the TXCW register */ | 2769 | /* Disable auto-negotiation in the TXCW register */ |
2769 | E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE)); | 2770 | E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE)); |
@@ -2788,7 +2789,7 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
2788 | else if(((hw->media_type == e1000_media_type_fiber) || | 2789 | else if(((hw->media_type == e1000_media_type_fiber) || |
2789 | (hw->media_type == e1000_media_type_internal_serdes)) && | 2790 | (hw->media_type == e1000_media_type_internal_serdes)) && |
2790 | (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 2791 | (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
2791 | DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\r\n"); | 2792 | DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
2792 | E1000_WRITE_REG(hw, TXCW, hw->txcw); | 2793 | E1000_WRITE_REG(hw, TXCW, hw->txcw); |
2793 | E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU)); | 2794 | E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU)); |
2794 | 2795 | ||
@@ -2851,13 +2852,13 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, | |||
2851 | 2852 | ||
2852 | if(status & E1000_STATUS_FD) { | 2853 | if(status & E1000_STATUS_FD) { |
2853 | *duplex = FULL_DUPLEX; | 2854 | *duplex = FULL_DUPLEX; |
2854 | DEBUGOUT("Full Duplex\r\n"); | 2855 | DEBUGOUT("Full Duplex\n"); |
2855 | } else { | 2856 | } else { |
2856 | *duplex = HALF_DUPLEX; | 2857 | *duplex = HALF_DUPLEX; |
2857 | DEBUGOUT(" Half Duplex\r\n"); | 2858 | DEBUGOUT(" Half Duplex\n"); |
2858 | } | 2859 | } |
2859 | } else { | 2860 | } else { |
2860 | DEBUGOUT("1000 Mbs, Full Duplex\r\n"); | 2861 | DEBUGOUT("1000 Mbs, Full Duplex\n"); |
2861 | *speed = SPEED_1000; | 2862 | *speed = SPEED_1000; |
2862 | *duplex = FULL_DUPLEX; | 2863 | *duplex = FULL_DUPLEX; |
2863 | } | 2864 | } |
@@ -2883,7 +2884,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, | |||
2883 | } | 2884 | } |
2884 | } | 2885 | } |
2885 | 2886 | ||
2886 | if ((hw->mac_type == e1000_80003es2lan) && | 2887 | if ((hw->mac_type == e1000_80003es2lan) && |
2887 | (hw->media_type == e1000_media_type_copper)) { | 2888 | (hw->media_type == e1000_media_type_copper)) { |
2888 | if (*speed == SPEED_1000) | 2889 | if (*speed == SPEED_1000) |
2889 | ret_val = e1000_configure_kmrn_for_1000(hw); | 2890 | ret_val = e1000_configure_kmrn_for_1000(hw); |
@@ -3159,7 +3160,7 @@ e1000_read_phy_reg(struct e1000_hw *hw, | |||
3159 | if (e1000_swfw_sync_acquire(hw, swfw)) | 3160 | if (e1000_swfw_sync_acquire(hw, swfw)) |
3160 | return -E1000_ERR_SWFW_SYNC; | 3161 | return -E1000_ERR_SWFW_SYNC; |
3161 | 3162 | ||
3162 | if((hw->phy_type == e1000_phy_igp || | 3163 | if((hw->phy_type == e1000_phy_igp || |
3163 | hw->phy_type == e1000_phy_igp_2) && | 3164 | hw->phy_type == e1000_phy_igp_2) && |
3164 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3165 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
3165 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3166 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
@@ -3298,7 +3299,7 @@ e1000_write_phy_reg(struct e1000_hw *hw, | |||
3298 | if (e1000_swfw_sync_acquire(hw, swfw)) | 3299 | if (e1000_swfw_sync_acquire(hw, swfw)) |
3299 | return -E1000_ERR_SWFW_SYNC; | 3300 | return -E1000_ERR_SWFW_SYNC; |
3300 | 3301 | ||
3301 | if((hw->phy_type == e1000_phy_igp || | 3302 | if((hw->phy_type == e1000_phy_igp || |
3302 | hw->phy_type == e1000_phy_igp_2) && | 3303 | hw->phy_type == e1000_phy_igp_2) && |
3303 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3304 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
3304 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3305 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
@@ -3496,22 +3497,22 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3496 | } | 3497 | } |
3497 | /* Read the device control register and assert the E1000_CTRL_PHY_RST | 3498 | /* Read the device control register and assert the E1000_CTRL_PHY_RST |
3498 | * bit. Then, take it out of reset. | 3499 | * bit. Then, take it out of reset. |
3499 | * For pre-e1000_82571 hardware, we delay for 10ms between the assert | 3500 | * For pre-e1000_82571 hardware, we delay for 10ms between the assert |
3500 | * and deassert. For e1000_82571 hardware and later, we instead delay | 3501 | * and deassert. For e1000_82571 hardware and later, we instead delay |
3501 | * for 50us between and 10ms after the deassertion. | 3502 | * for 50us between and 10ms after the deassertion. |
3502 | */ | 3503 | */ |
3503 | ctrl = E1000_READ_REG(hw, CTRL); | 3504 | ctrl = E1000_READ_REG(hw, CTRL); |
3504 | E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST); | 3505 | E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST); |
3505 | E1000_WRITE_FLUSH(hw); | 3506 | E1000_WRITE_FLUSH(hw); |
3506 | 3507 | ||
3507 | if (hw->mac_type < e1000_82571) | 3508 | if (hw->mac_type < e1000_82571) |
3508 | msec_delay(10); | 3509 | msec_delay(10); |
3509 | else | 3510 | else |
3510 | udelay(100); | 3511 | udelay(100); |
3511 | 3512 | ||
3512 | E1000_WRITE_REG(hw, CTRL, ctrl); | 3513 | E1000_WRITE_REG(hw, CTRL, ctrl); |
3513 | E1000_WRITE_FLUSH(hw); | 3514 | E1000_WRITE_FLUSH(hw); |
3514 | 3515 | ||
3515 | if (hw->mac_type >= e1000_82571) | 3516 | if (hw->mac_type >= e1000_82571) |
3516 | msec_delay(10); | 3517 | msec_delay(10); |
3517 | e1000_swfw_sync_release(hw, swfw); | 3518 | e1000_swfw_sync_release(hw, swfw); |
@@ -3815,7 +3816,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw, | |||
3815 | /* Check polarity status */ | 3816 | /* Check polarity status */ |
3816 | ret_val = e1000_check_polarity(hw, &polarity); | 3817 | ret_val = e1000_check_polarity(hw, &polarity); |
3817 | if(ret_val) | 3818 | if(ret_val) |
3818 | return ret_val; | 3819 | return ret_val; |
3819 | phy_info->cable_polarity = polarity; | 3820 | phy_info->cable_polarity = polarity; |
3820 | 3821 | ||
3821 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | 3822 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); |
@@ -4540,14 +4541,14 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw, | |||
4540 | 4541 | ||
4541 | E1000_WRITE_REG(hw, EERD, eerd); | 4542 | E1000_WRITE_REG(hw, EERD, eerd); |
4542 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); | 4543 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); |
4543 | 4544 | ||
4544 | if(error) { | 4545 | if(error) { |
4545 | break; | 4546 | break; |
4546 | } | 4547 | } |
4547 | data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA); | 4548 | data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA); |
4548 | 4549 | ||
4549 | } | 4550 | } |
4550 | 4551 | ||
4551 | return error; | 4552 | return error; |
4552 | } | 4553 | } |
4553 | 4554 | ||
@@ -4573,24 +4574,24 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw, | |||
4573 | return -E1000_ERR_SWFW_SYNC; | 4574 | return -E1000_ERR_SWFW_SYNC; |
4574 | 4575 | ||
4575 | for (i = 0; i < words; i++) { | 4576 | for (i = 0; i < words; i++) { |
4576 | register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | | 4577 | register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | |
4577 | ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | | 4578 | ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | |
4578 | E1000_EEPROM_RW_REG_START; | 4579 | E1000_EEPROM_RW_REG_START; |
4579 | 4580 | ||
4580 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); | 4581 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); |
4581 | if(error) { | 4582 | if(error) { |
4582 | break; | 4583 | break; |
4583 | } | 4584 | } |
4584 | 4585 | ||
4585 | E1000_WRITE_REG(hw, EEWR, register_value); | 4586 | E1000_WRITE_REG(hw, EEWR, register_value); |
4586 | 4587 | ||
4587 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); | 4588 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); |
4588 | 4589 | ||
4589 | if(error) { | 4590 | if(error) { |
4590 | break; | 4591 | break; |
4591 | } | 4592 | } |
4592 | } | 4593 | } |
4593 | 4594 | ||
4594 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); | 4595 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); |
4595 | return error; | 4596 | return error; |
4596 | } | 4597 | } |
@@ -4610,7 +4611,7 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) | |||
4610 | for(i = 0; i < attempts; i++) { | 4611 | for(i = 0; i < attempts; i++) { |
4611 | if(eerd == E1000_EEPROM_POLL_READ) | 4612 | if(eerd == E1000_EEPROM_POLL_READ) |
4612 | reg = E1000_READ_REG(hw, EERD); | 4613 | reg = E1000_READ_REG(hw, EERD); |
4613 | else | 4614 | else |
4614 | reg = E1000_READ_REG(hw, EEWR); | 4615 | reg = E1000_READ_REG(hw, EEWR); |
4615 | 4616 | ||
4616 | if(reg & E1000_EEPROM_RW_REG_DONE) { | 4617 | if(reg & E1000_EEPROM_RW_REG_DONE) { |
@@ -5135,7 +5136,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw, | |||
5135 | uint32_t i; | 5136 | uint32_t i; |
5136 | uint32_t num_rar_entry; | 5137 | uint32_t num_rar_entry; |
5137 | uint32_t num_mta_entry; | 5138 | uint32_t num_mta_entry; |
5138 | 5139 | ||
5139 | DEBUGFUNC("e1000_mc_addr_list_update"); | 5140 | DEBUGFUNC("e1000_mc_addr_list_update"); |
5140 | 5141 | ||
5141 | /* Set the new number of MC addresses that we are being requested to use. */ | 5142 | /* Set the new number of MC addresses that we are being requested to use. */ |
@@ -6240,7 +6241,7 @@ e1000_check_polarity(struct e1000_hw *hw, | |||
6240 | * 1 - Downshift ocured. | 6241 | * 1 - Downshift ocured. |
6241 | * | 6242 | * |
6242 | * returns: - E1000_ERR_XXX | 6243 | * returns: - E1000_ERR_XXX |
6243 | * E1000_SUCCESS | 6244 | * E1000_SUCCESS |
6244 | * | 6245 | * |
6245 | * For phy's older then IGP, this function reads the Downshift bit in the Phy | 6246 | * For phy's older then IGP, this function reads the Downshift bit in the Phy |
6246 | * Specific Status register. For IGP phy's, it reads the Downgrade bit in the | 6247 | * Specific Status register. For IGP phy's, it reads the Downgrade bit in the |
@@ -6255,7 +6256,7 @@ e1000_check_downshift(struct e1000_hw *hw) | |||
6255 | 6256 | ||
6256 | DEBUGFUNC("e1000_check_downshift"); | 6257 | DEBUGFUNC("e1000_check_downshift"); |
6257 | 6258 | ||
6258 | if(hw->phy_type == e1000_phy_igp || | 6259 | if(hw->phy_type == e1000_phy_igp || |
6259 | hw->phy_type == e1000_phy_igp_2) { | 6260 | hw->phy_type == e1000_phy_igp_2) { |
6260 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, | 6261 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, |
6261 | &phy_data); | 6262 | &phy_data); |
@@ -6684,8 +6685,8 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, | |||
6684 | 6685 | ||
6685 | 6686 | ||
6686 | } else { | 6687 | } else { |
6687 | 6688 | ||
6688 | phy_data |= IGP02E1000_PM_D0_LPLU; | 6689 | phy_data |= IGP02E1000_PM_D0_LPLU; |
6689 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | 6690 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); |
6690 | if (ret_val) | 6691 | if (ret_val) |
6691 | return ret_val; | 6692 | return ret_val; |
@@ -6777,7 +6778,7 @@ int32_t | |||
6777 | e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer) | 6778 | e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer) |
6778 | { | 6779 | { |
6779 | uint8_t i; | 6780 | uint8_t i; |
6780 | uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET; | 6781 | uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET; |
6781 | uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH; | 6782 | uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH; |
6782 | 6783 | ||
6783 | length = (length >> 2); | 6784 | length = (length >> 2); |
@@ -6796,7 +6797,7 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer) | |||
6796 | * and also checks whether the previous command is completed. | 6797 | * and also checks whether the previous command is completed. |
6797 | * It busy waits in case of previous command is not completed. | 6798 | * It busy waits in case of previous command is not completed. |
6798 | * | 6799 | * |
6799 | * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or | 6800 | * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or |
6800 | * timeout | 6801 | * timeout |
6801 | * - E1000_SUCCESS for success. | 6802 | * - E1000_SUCCESS for success. |
6802 | ****************************************************************************/ | 6803 | ****************************************************************************/ |
@@ -6820,7 +6821,7 @@ e1000_mng_enable_host_if(struct e1000_hw * hw) | |||
6820 | msec_delay_irq(1); | 6821 | msec_delay_irq(1); |
6821 | } | 6822 | } |
6822 | 6823 | ||
6823 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { | 6824 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { |
6824 | DEBUGOUT("Previous command timeout failed .\n"); | 6825 | DEBUGOUT("Previous command timeout failed .\n"); |
6825 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 6826 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
6826 | } | 6827 | } |
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 150e45e30f87..467c9ed944f8 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | Contact Information: | 23 | Contact Information: |
24 | Linux NICS <linux.nics@intel.com> | 24 | Linux NICS <linux.nics@intel.com> |
25 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 26 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 | 27 | ||
27 | *******************************************************************************/ | 28 | *******************************************************************************/ |
@@ -374,7 +375,7 @@ struct e1000_host_mng_dhcp_cookie{ | |||
374 | }; | 375 | }; |
375 | #endif | 376 | #endif |
376 | 377 | ||
377 | int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, | 378 | int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, |
378 | uint16_t length); | 379 | uint16_t length); |
379 | boolean_t e1000_check_mng_mode(struct e1000_hw *hw); | 380 | boolean_t e1000_check_mng_mode(struct e1000_hw *hw); |
380 | boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); | 381 | boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); |
@@ -1801,7 +1802,7 @@ struct e1000_hw { | |||
1801 | * value2 = [0..64512], default=4096 | 1802 | * value2 = [0..64512], default=4096 |
1802 | * value3 = [0..64512], default=0 | 1803 | * value3 = [0..64512], default=0 |
1803 | */ | 1804 | */ |
1804 | 1805 | ||
1805 | #define E1000_PSRCTL_BSIZE0_MASK 0x0000007F | 1806 | #define E1000_PSRCTL_BSIZE0_MASK 0x0000007F |
1806 | #define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 | 1807 | #define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 |
1807 | #define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 | 1808 | #define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index ed15fcaedaf9..bd709e562778 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -22,51 +22,13 @@ | |||
22 | 22 | ||
23 | Contact Information: | 23 | Contact Information: |
24 | Linux NICS <linux.nics@intel.com> | 24 | Linux NICS <linux.nics@intel.com> |
25 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 26 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 | 27 | ||
27 | *******************************************************************************/ | 28 | *******************************************************************************/ |
28 | 29 | ||
29 | #include "e1000.h" | 30 | #include "e1000.h" |
30 | 31 | ||
31 | /* Change Log | ||
32 | * 7.0.33 3-Feb-2006 | ||
33 | * o Added another fix for the pass false carrier bit | ||
34 | * 7.0.32 24-Jan-2006 | ||
35 | * o Need to rebuild with noew version number for the pass false carrier | ||
36 | * fix in e1000_hw.c | ||
37 | * 7.0.30 18-Jan-2006 | ||
38 | * o fixup for tso workaround to disable it for pci-x | ||
39 | * o fix mem leak on 82542 | ||
40 | * o fixes for 10 Mb/s connections and incorrect stats | ||
41 | * 7.0.28 01/06/2006 | ||
42 | * o hardware workaround to only set "speed mode" bit for 1G link. | ||
43 | * 7.0.26 12/23/2005 | ||
44 | * o wake on lan support modified for device ID 10B5 | ||
45 | * o fix dhcp + vlan issue not making it to the iAMT firmware | ||
46 | * 7.0.24 12/9/2005 | ||
47 | * o New hardware support for the Gigabit NIC embedded in the south bridge | ||
48 | * o Fixes to the recycling logic (skb->tail) from IBM LTC | ||
49 | * 6.3.9 12/16/2005 | ||
50 | * o incorporate fix for recycled skbs from IBM LTC | ||
51 | * 6.3.7 11/18/2005 | ||
52 | * o Honor eeprom setting for enabling/disabling Wake On Lan | ||
53 | * 6.3.5 11/17/2005 | ||
54 | * o Fix memory leak in rx ring handling for PCI Express adapters | ||
55 | * 6.3.4 11/8/05 | ||
56 | * o Patch from Jesper Juhl to remove redundant NULL checks for kfree | ||
57 | * 6.3.2 9/20/05 | ||
58 | * o Render logic that sets/resets DRV_LOAD as inline functions to | ||
59 | * avoid code replication. If f/w is AMT then set DRV_LOAD only when | ||
60 | * network interface is open. | ||
61 | * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs. | ||
62 | * o Adjust PBA partioning for Jumbo frames using MTU size and not | ||
63 | * rx_buffer_len | ||
64 | * 6.3.1 9/19/05 | ||
65 | * o Use adapter->tx_timeout_factor in Tx Hung Detect logic | ||
66 | * (e1000_clean_tx_irq) | ||
67 | * o Support for 8086:10B5 device (Quad Port) | ||
68 | */ | ||
69 | |||
70 | char e1000_driver_name[] = "e1000"; | 32 | char e1000_driver_name[] = "e1000"; |
71 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | 33 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; |
72 | #ifndef CONFIG_E1000_NAPI | 34 | #ifndef CONFIG_E1000_NAPI |
@@ -74,9 +36,9 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |||
74 | #else | 36 | #else |
75 | #define DRIVERNAPI "-NAPI" | 37 | #define DRIVERNAPI "-NAPI" |
76 | #endif | 38 | #endif |
77 | #define DRV_VERSION "7.0.33-k2"DRIVERNAPI | 39 | #define DRV_VERSION "7.0.38-k4"DRIVERNAPI |
78 | char e1000_driver_version[] = DRV_VERSION; | 40 | char e1000_driver_version[] = DRV_VERSION; |
79 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; | 41 | static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; |
80 | 42 | ||
81 | /* e1000_pci_tbl - PCI Device ID Table | 43 | /* e1000_pci_tbl - PCI Device ID Table |
82 | * | 44 | * |
@@ -208,8 +170,8 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter); | |||
208 | static void e1000_tx_timeout(struct net_device *dev); | 170 | static void e1000_tx_timeout(struct net_device *dev); |
209 | static void e1000_reset_task(struct net_device *dev); | 171 | static void e1000_reset_task(struct net_device *dev); |
210 | static void e1000_smartspeed(struct e1000_adapter *adapter); | 172 | static void e1000_smartspeed(struct e1000_adapter *adapter); |
211 | static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, | 173 | static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
212 | struct sk_buff *skb); | 174 | struct sk_buff *skb); |
213 | 175 | ||
214 | static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); | 176 | static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); |
215 | static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); | 177 | static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); |
@@ -293,7 +255,7 @@ module_exit(e1000_exit_module); | |||
293 | * @adapter: board private structure | 255 | * @adapter: board private structure |
294 | **/ | 256 | **/ |
295 | 257 | ||
296 | static inline void | 258 | static void |
297 | e1000_irq_disable(struct e1000_adapter *adapter) | 259 | e1000_irq_disable(struct e1000_adapter *adapter) |
298 | { | 260 | { |
299 | atomic_inc(&adapter->irq_sem); | 261 | atomic_inc(&adapter->irq_sem); |
@@ -307,7 +269,7 @@ e1000_irq_disable(struct e1000_adapter *adapter) | |||
307 | * @adapter: board private structure | 269 | * @adapter: board private structure |
308 | **/ | 270 | **/ |
309 | 271 | ||
310 | static inline void | 272 | static void |
311 | e1000_irq_enable(struct e1000_adapter *adapter) | 273 | e1000_irq_enable(struct e1000_adapter *adapter) |
312 | { | 274 | { |
313 | if (likely(atomic_dec_and_test(&adapter->irq_sem))) { | 275 | if (likely(atomic_dec_and_test(&adapter->irq_sem))) { |
@@ -348,10 +310,10 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
348 | * For ASF and Pass Through versions of f/w this means that the | 310 | * For ASF and Pass Through versions of f/w this means that the |
349 | * driver is no longer loaded. For AMT version (only with 82573) i | 311 | * driver is no longer loaded. For AMT version (only with 82573) i |
350 | * of the f/w this means that the netowrk i/f is closed. | 312 | * of the f/w this means that the netowrk i/f is closed. |
351 | * | 313 | * |
352 | **/ | 314 | **/ |
353 | 315 | ||
354 | static inline void | 316 | static void |
355 | e1000_release_hw_control(struct e1000_adapter *adapter) | 317 | e1000_release_hw_control(struct e1000_adapter *adapter) |
356 | { | 318 | { |
357 | uint32_t ctrl_ext; | 319 | uint32_t ctrl_ext; |
@@ -361,6 +323,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter) | |||
361 | switch (adapter->hw.mac_type) { | 323 | switch (adapter->hw.mac_type) { |
362 | case e1000_82571: | 324 | case e1000_82571: |
363 | case e1000_82572: | 325 | case e1000_82572: |
326 | case e1000_80003es2lan: | ||
364 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 327 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); |
365 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | 328 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, |
366 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | 329 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); |
@@ -379,13 +342,13 @@ e1000_release_hw_control(struct e1000_adapter *adapter) | |||
379 | * @adapter: address of board private structure | 342 | * @adapter: address of board private structure |
380 | * | 343 | * |
381 | * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. | 344 | * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. |
382 | * For ASF and Pass Through versions of f/w this means that | 345 | * For ASF and Pass Through versions of f/w this means that |
383 | * the driver is loaded. For AMT version (only with 82573) | 346 | * the driver is loaded. For AMT version (only with 82573) |
384 | * of the f/w this means that the netowrk i/f is open. | 347 | * of the f/w this means that the netowrk i/f is open. |
385 | * | 348 | * |
386 | **/ | 349 | **/ |
387 | 350 | ||
388 | static inline void | 351 | static void |
389 | e1000_get_hw_control(struct e1000_adapter *adapter) | 352 | e1000_get_hw_control(struct e1000_adapter *adapter) |
390 | { | 353 | { |
391 | uint32_t ctrl_ext; | 354 | uint32_t ctrl_ext; |
@@ -394,6 +357,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter) | |||
394 | switch (adapter->hw.mac_type) { | 357 | switch (adapter->hw.mac_type) { |
395 | case e1000_82571: | 358 | case e1000_82571: |
396 | case e1000_82572: | 359 | case e1000_82572: |
360 | case e1000_80003es2lan: | ||
397 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 361 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); |
398 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | 362 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, |
399 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | 363 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); |
@@ -421,7 +385,7 @@ e1000_up(struct e1000_adapter *adapter) | |||
421 | uint16_t mii_reg; | 385 | uint16_t mii_reg; |
422 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | 386 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); |
423 | if (mii_reg & MII_CR_POWER_DOWN) | 387 | if (mii_reg & MII_CR_POWER_DOWN) |
424 | e1000_phy_reset(&adapter->hw); | 388 | e1000_phy_hw_reset(&adapter->hw); |
425 | } | 389 | } |
426 | 390 | ||
427 | e1000_set_multi(netdev); | 391 | e1000_set_multi(netdev); |
@@ -711,8 +675,8 @@ e1000_probe(struct pci_dev *pdev, | |||
711 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); | 675 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); |
712 | 676 | ||
713 | /* if ksp3, indicate if it's port a being setup */ | 677 | /* if ksp3, indicate if it's port a being setup */ |
714 | if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 && | 678 | if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 && |
715 | e1000_ksp3_port_a == 0) | 679 | e1000_ksp3_port_a == 0) |
716 | adapter->ksp3_port_a = 1; | 680 | adapter->ksp3_port_a = 1; |
717 | e1000_ksp3_port_a++; | 681 | e1000_ksp3_port_a++; |
718 | /* Reset for multiple KP3 adapters */ | 682 | /* Reset for multiple KP3 adapters */ |
@@ -740,9 +704,9 @@ e1000_probe(struct pci_dev *pdev, | |||
740 | if (pci_using_dac) | 704 | if (pci_using_dac) |
741 | netdev->features |= NETIF_F_HIGHDMA; | 705 | netdev->features |= NETIF_F_HIGHDMA; |
742 | 706 | ||
743 | /* hard_start_xmit is safe against parallel locking */ | 707 | /* hard_start_xmit is safe against parallel locking */ |
744 | netdev->features |= NETIF_F_LLTX; | 708 | netdev->features |= NETIF_F_LLTX; |
745 | 709 | ||
746 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); | 710 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); |
747 | 711 | ||
748 | /* before reading the EEPROM, reset the controller to | 712 | /* before reading the EEPROM, reset the controller to |
@@ -972,8 +936,8 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
972 | 936 | ||
973 | pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); | 937 | pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); |
974 | 938 | ||
975 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | 939 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE; |
976 | adapter->rx_ps_bsize0 = E1000_RXBUFFER_256; | 940 | adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; |
977 | hw->max_frame_size = netdev->mtu + | 941 | hw->max_frame_size = netdev->mtu + |
978 | ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | 942 | ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
979 | hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; | 943 | hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; |
@@ -1181,7 +1145,7 @@ e1000_close(struct net_device *netdev) | |||
1181 | * @start: address of beginning of memory | 1145 | * @start: address of beginning of memory |
1182 | * @len: length of memory | 1146 | * @len: length of memory |
1183 | **/ | 1147 | **/ |
1184 | static inline boolean_t | 1148 | static boolean_t |
1185 | e1000_check_64k_bound(struct e1000_adapter *adapter, | 1149 | e1000_check_64k_bound(struct e1000_adapter *adapter, |
1186 | void *start, unsigned long len) | 1150 | void *start, unsigned long len) |
1187 | { | 1151 | { |
@@ -1599,14 +1563,21 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1599 | rctl |= E1000_RCTL_LPE; | 1563 | rctl |= E1000_RCTL_LPE; |
1600 | 1564 | ||
1601 | /* Setup buffer sizes */ | 1565 | /* Setup buffer sizes */ |
1602 | if (adapter->hw.mac_type >= e1000_82571) { | 1566 | rctl &= ~E1000_RCTL_SZ_4096; |
1603 | /* We can now specify buffers in 1K increments. | 1567 | rctl |= E1000_RCTL_BSEX; |
1604 | * BSIZE and BSEX are ignored in this case. */ | 1568 | switch (adapter->rx_buffer_len) { |
1605 | rctl |= adapter->rx_buffer_len << 0x11; | 1569 | case E1000_RXBUFFER_256: |
1606 | } else { | 1570 | rctl |= E1000_RCTL_SZ_256; |
1607 | rctl &= ~E1000_RCTL_SZ_4096; | 1571 | rctl &= ~E1000_RCTL_BSEX; |
1608 | rctl |= E1000_RCTL_BSEX; | 1572 | break; |
1609 | switch (adapter->rx_buffer_len) { | 1573 | case E1000_RXBUFFER_512: |
1574 | rctl |= E1000_RCTL_SZ_512; | ||
1575 | rctl &= ~E1000_RCTL_BSEX; | ||
1576 | break; | ||
1577 | case E1000_RXBUFFER_1024: | ||
1578 | rctl |= E1000_RCTL_SZ_1024; | ||
1579 | rctl &= ~E1000_RCTL_BSEX; | ||
1580 | break; | ||
1610 | case E1000_RXBUFFER_2048: | 1581 | case E1000_RXBUFFER_2048: |
1611 | default: | 1582 | default: |
1612 | rctl |= E1000_RCTL_SZ_2048; | 1583 | rctl |= E1000_RCTL_SZ_2048; |
@@ -1621,7 +1592,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1621 | case E1000_RXBUFFER_16384: | 1592 | case E1000_RXBUFFER_16384: |
1622 | rctl |= E1000_RCTL_SZ_16384; | 1593 | rctl |= E1000_RCTL_SZ_16384; |
1623 | break; | 1594 | break; |
1624 | } | ||
1625 | } | 1595 | } |
1626 | 1596 | ||
1627 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT | 1597 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT |
@@ -1715,7 +1685,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1715 | if (hw->mac_type >= e1000_82571) { | 1685 | if (hw->mac_type >= e1000_82571) { |
1716 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 1686 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); |
1717 | /* Reset delay timers after every interrupt */ | 1687 | /* Reset delay timers after every interrupt */ |
1718 | ctrl_ext |= E1000_CTRL_EXT_CANC; | 1688 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; |
1719 | #ifdef CONFIG_E1000_NAPI | 1689 | #ifdef CONFIG_E1000_NAPI |
1720 | /* Auto-Mask interrupts upon ICR read. */ | 1690 | /* Auto-Mask interrupts upon ICR read. */ |
1721 | ctrl_ext |= E1000_CTRL_EXT_IAME; | 1691 | ctrl_ext |= E1000_CTRL_EXT_IAME; |
@@ -1807,7 +1777,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter) | |||
1807 | e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); | 1777 | e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); |
1808 | } | 1778 | } |
1809 | 1779 | ||
1810 | static inline void | 1780 | static void |
1811 | e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, | 1781 | e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, |
1812 | struct e1000_buffer *buffer_info) | 1782 | struct e1000_buffer *buffer_info) |
1813 | { | 1783 | { |
@@ -2247,6 +2217,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2247 | 2217 | ||
2248 | if (link) { | 2218 | if (link) { |
2249 | if (!netif_carrier_ok(netdev)) { | 2219 | if (!netif_carrier_ok(netdev)) { |
2220 | boolean_t txb2b = 1; | ||
2250 | e1000_get_speed_and_duplex(&adapter->hw, | 2221 | e1000_get_speed_and_duplex(&adapter->hw, |
2251 | &adapter->link_speed, | 2222 | &adapter->link_speed, |
2252 | &adapter->link_duplex); | 2223 | &adapter->link_duplex); |
@@ -2260,23 +2231,22 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2260 | * and adjust the timeout factor */ | 2231 | * and adjust the timeout factor */ |
2261 | netdev->tx_queue_len = adapter->tx_queue_len; | 2232 | netdev->tx_queue_len = adapter->tx_queue_len; |
2262 | adapter->tx_timeout_factor = 1; | 2233 | adapter->tx_timeout_factor = 1; |
2263 | adapter->txb2b = 1; | ||
2264 | switch (adapter->link_speed) { | 2234 | switch (adapter->link_speed) { |
2265 | case SPEED_10: | 2235 | case SPEED_10: |
2266 | adapter->txb2b = 0; | 2236 | txb2b = 0; |
2267 | netdev->tx_queue_len = 10; | 2237 | netdev->tx_queue_len = 10; |
2268 | adapter->tx_timeout_factor = 8; | 2238 | adapter->tx_timeout_factor = 8; |
2269 | break; | 2239 | break; |
2270 | case SPEED_100: | 2240 | case SPEED_100: |
2271 | adapter->txb2b = 0; | 2241 | txb2b = 0; |
2272 | netdev->tx_queue_len = 100; | 2242 | netdev->tx_queue_len = 100; |
2273 | /* maybe add some timeout factor ? */ | 2243 | /* maybe add some timeout factor ? */ |
2274 | break; | 2244 | break; |
2275 | } | 2245 | } |
2276 | 2246 | ||
2277 | if ((adapter->hw.mac_type == e1000_82571 || | 2247 | if ((adapter->hw.mac_type == e1000_82571 || |
2278 | adapter->hw.mac_type == e1000_82572) && | 2248 | adapter->hw.mac_type == e1000_82572) && |
2279 | adapter->txb2b == 0) { | 2249 | txb2b == 0) { |
2280 | #define SPEED_MODE_BIT (1 << 21) | 2250 | #define SPEED_MODE_BIT (1 << 21) |
2281 | uint32_t tarc0; | 2251 | uint32_t tarc0; |
2282 | tarc0 = E1000_READ_REG(&adapter->hw, TARC0); | 2252 | tarc0 = E1000_READ_REG(&adapter->hw, TARC0); |
@@ -2400,7 +2370,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2400 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 | 2370 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 |
2401 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 | 2371 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 |
2402 | 2372 | ||
2403 | static inline int | 2373 | static int |
2404 | e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | 2374 | e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, |
2405 | struct sk_buff *skb) | 2375 | struct sk_buff *skb) |
2406 | { | 2376 | { |
@@ -2422,7 +2392,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2422 | 2392 | ||
2423 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 2393 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); |
2424 | mss = skb_shinfo(skb)->tso_size; | 2394 | mss = skb_shinfo(skb)->tso_size; |
2425 | if (skb->protocol == ntohs(ETH_P_IP)) { | 2395 | if (skb->protocol == htons(ETH_P_IP)) { |
2426 | skb->nh.iph->tot_len = 0; | 2396 | skb->nh.iph->tot_len = 0; |
2427 | skb->nh.iph->check = 0; | 2397 | skb->nh.iph->check = 0; |
2428 | skb->h.th->check = | 2398 | skb->h.th->check = |
@@ -2480,7 +2450,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2480 | return FALSE; | 2450 | return FALSE; |
2481 | } | 2451 | } |
2482 | 2452 | ||
2483 | static inline boolean_t | 2453 | static boolean_t |
2484 | e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | 2454 | e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, |
2485 | struct sk_buff *skb) | 2455 | struct sk_buff *skb) |
2486 | { | 2456 | { |
@@ -2516,7 +2486,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2516 | #define E1000_MAX_TXD_PWR 12 | 2486 | #define E1000_MAX_TXD_PWR 12 |
2517 | #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) | 2487 | #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) |
2518 | 2488 | ||
2519 | static inline int | 2489 | static int |
2520 | e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | 2490 | e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, |
2521 | struct sk_buff *skb, unsigned int first, unsigned int max_per_txd, | 2491 | struct sk_buff *skb, unsigned int first, unsigned int max_per_txd, |
2522 | unsigned int nr_frags, unsigned int mss) | 2492 | unsigned int nr_frags, unsigned int mss) |
@@ -2625,7 +2595,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2625 | return count; | 2595 | return count; |
2626 | } | 2596 | } |
2627 | 2597 | ||
2628 | static inline void | 2598 | static void |
2629 | e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | 2599 | e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, |
2630 | int tx_flags, int count) | 2600 | int tx_flags, int count) |
2631 | { | 2601 | { |
@@ -2689,7 +2659,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2689 | #define E1000_FIFO_HDR 0x10 | 2659 | #define E1000_FIFO_HDR 0x10 |
2690 | #define E1000_82547_PAD_LEN 0x3E0 | 2660 | #define E1000_82547_PAD_LEN 0x3E0 |
2691 | 2661 | ||
2692 | static inline int | 2662 | static int |
2693 | e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) | 2663 | e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) |
2694 | { | 2664 | { |
2695 | uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; | 2665 | uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; |
@@ -2716,7 +2686,7 @@ no_fifo_stall_required: | |||
2716 | } | 2686 | } |
2717 | 2687 | ||
2718 | #define MINIMUM_DHCP_PACKET_SIZE 282 | 2688 | #define MINIMUM_DHCP_PACKET_SIZE 282 |
2719 | static inline int | 2689 | static int |
2720 | e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) | 2690 | e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) |
2721 | { | 2691 | { |
2722 | struct e1000_hw *hw = &adapter->hw; | 2692 | struct e1000_hw *hw = &adapter->hw; |
@@ -2764,7 +2734,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2764 | unsigned int nr_frags = 0; | 2734 | unsigned int nr_frags = 0; |
2765 | unsigned int mss = 0; | 2735 | unsigned int mss = 0; |
2766 | int count = 0; | 2736 | int count = 0; |
2767 | int tso; | 2737 | int tso; |
2768 | unsigned int f; | 2738 | unsigned int f; |
2769 | len -= skb->data_len; | 2739 | len -= skb->data_len; |
2770 | 2740 | ||
@@ -2777,7 +2747,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2777 | 2747 | ||
2778 | #ifdef NETIF_F_TSO | 2748 | #ifdef NETIF_F_TSO |
2779 | mss = skb_shinfo(skb)->tso_size; | 2749 | mss = skb_shinfo(skb)->tso_size; |
2780 | /* The controller does a simple calculation to | 2750 | /* The controller does a simple calculation to |
2781 | * make sure there is enough room in the FIFO before | 2751 | * make sure there is enough room in the FIFO before |
2782 | * initiating the DMA for each buffer. The calc is: | 2752 | * initiating the DMA for each buffer. The calc is: |
2783 | * 4 = ceil(buffer len/mss). To make sure we don't | 2753 | * 4 = ceil(buffer len/mss). To make sure we don't |
@@ -2800,7 +2770,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2800 | case e1000_82573: | 2770 | case e1000_82573: |
2801 | pull_size = min((unsigned int)4, skb->data_len); | 2771 | pull_size = min((unsigned int)4, skb->data_len); |
2802 | if (!__pskb_pull_tail(skb, pull_size)) { | 2772 | if (!__pskb_pull_tail(skb, pull_size)) { |
2803 | printk(KERN_ERR | 2773 | printk(KERN_ERR |
2804 | "__pskb_pull_tail failed.\n"); | 2774 | "__pskb_pull_tail failed.\n"); |
2805 | dev_kfree_skb_any(skb); | 2775 | dev_kfree_skb_any(skb); |
2806 | return NETDEV_TX_OK; | 2776 | return NETDEV_TX_OK; |
@@ -2901,7 +2871,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2901 | /* Old method was to assume IPv4 packet by default if TSO was enabled. | 2871 | /* Old method was to assume IPv4 packet by default if TSO was enabled. |
2902 | * 82571 hardware supports TSO capabilities for IPv6 as well... | 2872 | * 82571 hardware supports TSO capabilities for IPv6 as well... |
2903 | * no longer assume, we must. */ | 2873 | * no longer assume, we must. */ |
2904 | if (likely(skb->protocol == ntohs(ETH_P_IP))) | 2874 | if (likely(skb->protocol == htons(ETH_P_IP))) |
2905 | tx_flags |= E1000_TX_FLAGS_IPV4; | 2875 | tx_flags |= E1000_TX_FLAGS_IPV4; |
2906 | 2876 | ||
2907 | e1000_tx_queue(adapter, tx_ring, tx_flags, | 2877 | e1000_tx_queue(adapter, tx_ring, tx_flags, |
@@ -2982,8 +2952,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
2982 | 2952 | ||
2983 | /* Adapter-specific max frame size limits. */ | 2953 | /* Adapter-specific max frame size limits. */ |
2984 | switch (adapter->hw.mac_type) { | 2954 | switch (adapter->hw.mac_type) { |
2985 | case e1000_82542_rev2_0: | 2955 | case e1000_undefined ... e1000_82542_rev2_1: |
2986 | case e1000_82542_rev2_1: | ||
2987 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | 2956 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
2988 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); | 2957 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); |
2989 | return -EINVAL; | 2958 | return -EINVAL; |
@@ -3017,27 +2986,32 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3017 | break; | 2986 | break; |
3018 | } | 2987 | } |
3019 | 2988 | ||
3020 | 2989 | /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | |
3021 | if (adapter->hw.mac_type > e1000_82547_rev_2) { | 2990 | * means we reserve 2 more, this pushes us to allocate from the next |
3022 | adapter->rx_buffer_len = max_frame; | 2991 | * larger slab size |
3023 | E1000_ROUNDUP(adapter->rx_buffer_len, 1024); | 2992 | * i.e. RXBUFFER_2048 --> size-4096 slab */ |
3024 | } else { | 2993 | |
3025 | if(unlikely((adapter->hw.mac_type < e1000_82543) && | 2994 | if (max_frame <= E1000_RXBUFFER_256) |
3026 | (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { | 2995 | adapter->rx_buffer_len = E1000_RXBUFFER_256; |
3027 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported " | 2996 | else if (max_frame <= E1000_RXBUFFER_512) |
3028 | "on 82542\n"); | 2997 | adapter->rx_buffer_len = E1000_RXBUFFER_512; |
3029 | return -EINVAL; | 2998 | else if (max_frame <= E1000_RXBUFFER_1024) |
3030 | } else { | 2999 | adapter->rx_buffer_len = E1000_RXBUFFER_1024; |
3031 | if(max_frame <= E1000_RXBUFFER_2048) | 3000 | else if (max_frame <= E1000_RXBUFFER_2048) |
3032 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | 3001 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; |
3033 | else if(max_frame <= E1000_RXBUFFER_4096) | 3002 | else if (max_frame <= E1000_RXBUFFER_4096) |
3034 | adapter->rx_buffer_len = E1000_RXBUFFER_4096; | 3003 | adapter->rx_buffer_len = E1000_RXBUFFER_4096; |
3035 | else if(max_frame <= E1000_RXBUFFER_8192) | 3004 | else if (max_frame <= E1000_RXBUFFER_8192) |
3036 | adapter->rx_buffer_len = E1000_RXBUFFER_8192; | 3005 | adapter->rx_buffer_len = E1000_RXBUFFER_8192; |
3037 | else if(max_frame <= E1000_RXBUFFER_16384) | 3006 | else if (max_frame <= E1000_RXBUFFER_16384) |
3038 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; | 3007 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; |
3039 | } | 3008 | |
3040 | } | 3009 | /* adjust allocation if LPE protects us, and we aren't using SBP */ |
3010 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | ||
3011 | if (!adapter->hw.tbi_compatibility_on && | ||
3012 | ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || | ||
3013 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) | ||
3014 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | ||
3041 | 3015 | ||
3042 | netdev->mtu = new_mtu; | 3016 | netdev->mtu = new_mtu; |
3043 | 3017 | ||
@@ -3165,7 +3139,6 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3165 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 3139 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3166 | adapter->stats.ruc + adapter->stats.roc + | 3140 | adapter->stats.ruc + adapter->stats.roc + |
3167 | adapter->stats.cexterr; | 3141 | adapter->stats.cexterr; |
3168 | adapter->net_stats.rx_dropped = 0; | ||
3169 | adapter->net_stats.rx_length_errors = adapter->stats.ruc + | 3142 | adapter->net_stats.rx_length_errors = adapter->stats.ruc + |
3170 | adapter->stats.roc; | 3143 | adapter->stats.roc; |
3171 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | 3144 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; |
@@ -3391,13 +3364,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3391 | 3364 | ||
3392 | tx_ring->next_to_clean = i; | 3365 | tx_ring->next_to_clean = i; |
3393 | 3366 | ||
3394 | spin_lock(&tx_ring->tx_lock); | 3367 | #define TX_WAKE_THRESHOLD 32 |
3395 | |||
3396 | if (unlikely(cleaned && netif_queue_stopped(netdev) && | 3368 | if (unlikely(cleaned && netif_queue_stopped(netdev) && |
3397 | netif_carrier_ok(netdev))) | 3369 | netif_carrier_ok(netdev))) { |
3398 | netif_wake_queue(netdev); | 3370 | spin_lock(&tx_ring->tx_lock); |
3399 | 3371 | if (netif_queue_stopped(netdev) && | |
3400 | spin_unlock(&tx_ring->tx_lock); | 3372 | (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) |
3373 | netif_wake_queue(netdev); | ||
3374 | spin_unlock(&tx_ring->tx_lock); | ||
3375 | } | ||
3401 | 3376 | ||
3402 | if (adapter->detect_tx_hung) { | 3377 | if (adapter->detect_tx_hung) { |
3403 | /* Detect a transmit hang in hardware, this serializes the | 3378 | /* Detect a transmit hang in hardware, this serializes the |
@@ -3445,7 +3420,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3445 | * @sk_buff: socket buffer with received data | 3420 | * @sk_buff: socket buffer with received data |
3446 | **/ | 3421 | **/ |
3447 | 3422 | ||
3448 | static inline void | 3423 | static void |
3449 | e1000_rx_checksum(struct e1000_adapter *adapter, | 3424 | e1000_rx_checksum(struct e1000_adapter *adapter, |
3450 | uint32_t status_err, uint32_t csum, | 3425 | uint32_t status_err, uint32_t csum, |
3451 | struct sk_buff *skb) | 3426 | struct sk_buff *skb) |
@@ -3569,7 +3544,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3569 | flags); | 3544 | flags); |
3570 | length--; | 3545 | length--; |
3571 | } else { | 3546 | } else { |
3572 | dev_kfree_skb_irq(skb); | 3547 | /* recycle */ |
3548 | buffer_info->skb = skb; | ||
3573 | goto next_desc; | 3549 | goto next_desc; |
3574 | } | 3550 | } |
3575 | } | 3551 | } |
@@ -3677,6 +3653,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3677 | i = rx_ring->next_to_clean; | 3653 | i = rx_ring->next_to_clean; |
3678 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 3654 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
3679 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | 3655 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
3656 | buffer_info = &rx_ring->buffer_info[i]; | ||
3680 | 3657 | ||
3681 | while (staterr & E1000_RXD_STAT_DD) { | 3658 | while (staterr & E1000_RXD_STAT_DD) { |
3682 | buffer_info = &rx_ring->buffer_info[i]; | 3659 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -3737,9 +3714,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3737 | 3714 | ||
3738 | /* page alloc/put takes too long and effects small packet | 3715 | /* page alloc/put takes too long and effects small packet |
3739 | * throughput, so unsplit small packets and save the alloc/put*/ | 3716 | * throughput, so unsplit small packets and save the alloc/put*/ |
3740 | if (l1 && ((length + l1) < E1000_CB_LENGTH)) { | 3717 | if (l1 && ((length + l1) <= adapter->rx_ps_bsize0)) { |
3741 | u8 *vaddr; | 3718 | u8 *vaddr; |
3742 | /* there is no documentation about how to call | 3719 | /* there is no documentation about how to call |
3743 | * kmap_atomic, so we can't hold the mapping | 3720 | * kmap_atomic, so we can't hold the mapping |
3744 | * very long */ | 3721 | * very long */ |
3745 | pci_dma_sync_single_for_cpu(pdev, | 3722 | pci_dma_sync_single_for_cpu(pdev, |
@@ -4159,7 +4136,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4159 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4136 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4160 | return -EIO; | 4137 | return -EIO; |
4161 | } | 4138 | } |
4162 | if (adapter->hw.phy_type == e1000_media_type_copper) { | 4139 | if (adapter->hw.media_type == e1000_media_type_copper) { |
4163 | switch (data->reg_num) { | 4140 | switch (data->reg_num) { |
4164 | case PHY_CTRL: | 4141 | case PHY_CTRL: |
4165 | if (mii_reg & MII_CR_POWER_DOWN) | 4142 | if (mii_reg & MII_CR_POWER_DOWN) |
@@ -4518,21 +4495,13 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4518 | 4495 | ||
4519 | E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); | 4496 | E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); |
4520 | E1000_WRITE_REG(&adapter->hw, WUFC, wufc); | 4497 | E1000_WRITE_REG(&adapter->hw, WUFC, wufc); |
4521 | retval = pci_enable_wake(pdev, PCI_D3hot, 1); | 4498 | pci_enable_wake(pdev, PCI_D3hot, 1); |
4522 | if (retval) | 4499 | pci_enable_wake(pdev, PCI_D3cold, 1); |
4523 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | ||
4524 | retval = pci_enable_wake(pdev, PCI_D3cold, 1); | ||
4525 | if (retval) | ||
4526 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | ||
4527 | } else { | 4500 | } else { |
4528 | E1000_WRITE_REG(&adapter->hw, WUC, 0); | 4501 | E1000_WRITE_REG(&adapter->hw, WUC, 0); |
4529 | E1000_WRITE_REG(&adapter->hw, WUFC, 0); | 4502 | E1000_WRITE_REG(&adapter->hw, WUFC, 0); |
4530 | retval = pci_enable_wake(pdev, PCI_D3hot, 0); | 4503 | pci_enable_wake(pdev, PCI_D3hot, 0); |
4531 | if (retval) | 4504 | pci_enable_wake(pdev, PCI_D3cold, 0); |
4532 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | ||
4533 | retval = pci_enable_wake(pdev, PCI_D3cold, 0); | ||
4534 | if (retval) | ||
4535 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | ||
4536 | } | 4505 | } |
4537 | 4506 | ||
4538 | if (adapter->hw.mac_type >= e1000_82540 && | 4507 | if (adapter->hw.mac_type >= e1000_82540 && |
@@ -4541,13 +4510,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4541 | if (manc & E1000_MANC_SMBUS_EN) { | 4510 | if (manc & E1000_MANC_SMBUS_EN) { |
4542 | manc |= E1000_MANC_ARP_EN; | 4511 | manc |= E1000_MANC_ARP_EN; |
4543 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 4512 | E1000_WRITE_REG(&adapter->hw, MANC, manc); |
4544 | retval = pci_enable_wake(pdev, PCI_D3hot, 1); | 4513 | pci_enable_wake(pdev, PCI_D3hot, 1); |
4545 | if (retval) | 4514 | pci_enable_wake(pdev, PCI_D3cold, 1); |
4546 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | ||
4547 | retval = pci_enable_wake(pdev, PCI_D3cold, 1); | ||
4548 | if (retval) | ||
4549 | DPRINTK(PROBE, ERR, | ||
4550 | "Error enabling D3 cold wake\n"); | ||
4551 | } | 4515 | } |
4552 | } | 4516 | } |
4553 | 4517 | ||
@@ -4557,9 +4521,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4557 | 4521 | ||
4558 | pci_disable_device(pdev); | 4522 | pci_disable_device(pdev); |
4559 | 4523 | ||
4560 | retval = pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 4524 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
4561 | if (retval) | ||
4562 | DPRINTK(PROBE, ERR, "Error in setting power state\n"); | ||
4563 | 4525 | ||
4564 | return 0; | 4526 | return 0; |
4565 | } | 4527 | } |
@@ -4570,22 +4532,15 @@ e1000_resume(struct pci_dev *pdev) | |||
4570 | { | 4532 | { |
4571 | struct net_device *netdev = pci_get_drvdata(pdev); | 4533 | struct net_device *netdev = pci_get_drvdata(pdev); |
4572 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4534 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4573 | int retval; | ||
4574 | uint32_t manc, ret_val; | 4535 | uint32_t manc, ret_val; |
4575 | 4536 | ||
4576 | retval = pci_set_power_state(pdev, PCI_D0); | 4537 | pci_set_power_state(pdev, PCI_D0); |
4577 | if (retval) | ||
4578 | DPRINTK(PROBE, ERR, "Error in setting power state\n"); | ||
4579 | e1000_pci_restore_state(adapter); | 4538 | e1000_pci_restore_state(adapter); |
4580 | ret_val = pci_enable_device(pdev); | 4539 | ret_val = pci_enable_device(pdev); |
4581 | pci_set_master(pdev); | 4540 | pci_set_master(pdev); |
4582 | 4541 | ||
4583 | retval = pci_enable_wake(pdev, PCI_D3hot, 0); | 4542 | pci_enable_wake(pdev, PCI_D3hot, 0); |
4584 | if (retval) | 4543 | pci_enable_wake(pdev, PCI_D3cold, 0); |
4585 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | ||
4586 | retval = pci_enable_wake(pdev, PCI_D3cold, 0); | ||
4587 | if (retval) | ||
4588 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | ||
4589 | 4544 | ||
4590 | e1000_reset(adapter); | 4545 | e1000_reset(adapter); |
4591 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); | 4546 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); |
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index 9790db974dc1..048d052be29d 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | Contact Information: | 23 | Contact Information: |
24 | Linux NICS <linux.nics@intel.com> | 24 | Linux NICS <linux.nics@intel.com> |
25 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 26 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 | 27 | ||
27 | *******************************************************************************/ | 28 | *******************************************************************************/ |
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index e0a4d37d1b85..e55f8969a0fb 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | Contact Information: | 23 | Contact Information: |
24 | Linux NICS <linux.nics@intel.com> | 24 | Linux NICS <linux.nics@intel.com> |
25 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 26 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 | 27 | ||
27 | *******************************************************************************/ | 28 | *******************************************************************************/ |
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 2f7b86837fe8..8d680ce600d7 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c | |||
@@ -21,15 +21,15 @@ | |||
21 | http://www.scyld.com/network/epic100.html | 21 | http://www.scyld.com/network/epic100.html |
22 | 22 | ||
23 | --------------------------------------------------------------------- | 23 | --------------------------------------------------------------------- |
24 | 24 | ||
25 | Linux kernel-specific changes: | 25 | Linux kernel-specific changes: |
26 | 26 | ||
27 | LK1.1.2 (jgarzik): | 27 | LK1.1.2 (jgarzik): |
28 | * Merge becker version 1.09 (4/08/2000) | 28 | * Merge becker version 1.09 (4/08/2000) |
29 | 29 | ||
30 | LK1.1.3: | 30 | LK1.1.3: |
31 | * Major bugfix to 1.09 driver (Francis Romieu) | 31 | * Major bugfix to 1.09 driver (Francis Romieu) |
32 | 32 | ||
33 | LK1.1.4 (jgarzik): | 33 | LK1.1.4 (jgarzik): |
34 | * Merge becker test version 1.09 (5/29/2000) | 34 | * Merge becker test version 1.09 (5/29/2000) |
35 | 35 | ||
@@ -66,7 +66,7 @@ | |||
66 | LK1.1.14 (Kryzsztof Halasa): | 66 | LK1.1.14 (Kryzsztof Halasa): |
67 | * fix spurious bad initializations | 67 | * fix spurious bad initializations |
68 | * pound phy a la SMSC's app note on the subject | 68 | * pound phy a la SMSC's app note on the subject |
69 | 69 | ||
70 | AC1.1.14ac | 70 | AC1.1.14ac |
71 | * fix power up/down for ethtool that broke in 1.11 | 71 | * fix power up/down for ethtool that broke in 1.11 |
72 | 72 | ||
@@ -244,7 +244,7 @@ static struct pci_device_id epic_pci_tbl[] = { | |||
244 | }; | 244 | }; |
245 | MODULE_DEVICE_TABLE (pci, epic_pci_tbl); | 245 | MODULE_DEVICE_TABLE (pci, epic_pci_tbl); |
246 | 246 | ||
247 | 247 | ||
248 | #ifndef USE_IO_OPS | 248 | #ifndef USE_IO_OPS |
249 | #undef inb | 249 | #undef inb |
250 | #undef inw | 250 | #undef inw |
@@ -370,7 +370,7 @@ static int epic_close(struct net_device *dev); | |||
370 | static struct net_device_stats *epic_get_stats(struct net_device *dev); | 370 | static struct net_device_stats *epic_get_stats(struct net_device *dev); |
371 | static void set_rx_mode(struct net_device *dev); | 371 | static void set_rx_mode(struct net_device *dev); |
372 | 372 | ||
373 | 373 | ||
374 | 374 | ||
375 | static int __devinit epic_init_one (struct pci_dev *pdev, | 375 | static int __devinit epic_init_one (struct pci_dev *pdev, |
376 | const struct pci_device_id *ent) | 376 | const struct pci_device_id *ent) |
@@ -392,9 +392,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev, | |||
392 | printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s", | 392 | printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s", |
393 | version, version2, version3); | 393 | version, version2, version3); |
394 | #endif | 394 | #endif |
395 | 395 | ||
396 | card_idx++; | 396 | card_idx++; |
397 | 397 | ||
398 | ret = pci_enable_device(pdev); | 398 | ret = pci_enable_device(pdev); |
399 | if (ret) | 399 | if (ret) |
400 | goto out; | 400 | goto out; |
@@ -405,7 +405,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, | |||
405 | ret = -ENODEV; | 405 | ret = -ENODEV; |
406 | goto err_out_disable; | 406 | goto err_out_disable; |
407 | } | 407 | } |
408 | 408 | ||
409 | pci_set_master(pdev); | 409 | pci_set_master(pdev); |
410 | 410 | ||
411 | ret = pci_request_regions(pdev, DRV_NAME); | 411 | ret = pci_request_regions(pdev, DRV_NAME); |
@@ -498,7 +498,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, | |||
498 | ep->pci_dev = pdev; | 498 | ep->pci_dev = pdev; |
499 | ep->chip_id = chip_idx; | 499 | ep->chip_id = chip_idx; |
500 | ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; | 500 | ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; |
501 | ep->irq_mask = | 501 | ep->irq_mask = |
502 | (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) | 502 | (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) |
503 | | CntFull | TxUnderrun | EpicNapiEvent; | 503 | | CntFull | TxUnderrun | EpicNapiEvent; |
504 | 504 | ||
@@ -587,7 +587,7 @@ err_out_disable: | |||
587 | pci_disable_device(pdev); | 587 | pci_disable_device(pdev); |
588 | goto out; | 588 | goto out; |
589 | } | 589 | } |
590 | 590 | ||
591 | /* Serial EEPROM section. */ | 591 | /* Serial EEPROM section. */ |
592 | 592 | ||
593 | /* EEPROM_Ctrl bits. */ | 593 | /* EEPROM_Ctrl bits. */ |
@@ -709,7 +709,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) | |||
709 | 709 | ||
710 | outw(value, ioaddr + MIIData); | 710 | outw(value, ioaddr + MIIData); |
711 | outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl); | 711 | outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl); |
712 | for (i = 10000; i > 0; i--) { | 712 | for (i = 10000; i > 0; i--) { |
713 | barrier(); | 713 | barrier(); |
714 | if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0) | 714 | if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0) |
715 | break; | 715 | break; |
@@ -717,7 +717,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) | |||
717 | return; | 717 | return; |
718 | } | 718 | } |
719 | 719 | ||
720 | 720 | ||
721 | static int epic_open(struct net_device *dev) | 721 | static int epic_open(struct net_device *dev) |
722 | { | 722 | { |
723 | struct epic_private *ep = dev->priv; | 723 | struct epic_private *ep = dev->priv; |
@@ -760,7 +760,7 @@ static int epic_open(struct net_device *dev) | |||
760 | #endif | 760 | #endif |
761 | 761 | ||
762 | udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ | 762 | udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ |
763 | 763 | ||
764 | for (i = 0; i < 3; i++) | 764 | for (i = 0; i < 3; i++) |
765 | outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); | 765 | outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); |
766 | 766 | ||
@@ -803,7 +803,7 @@ static int epic_open(struct net_device *dev) | |||
803 | 803 | ||
804 | /* Enable interrupts by setting the interrupt mask. */ | 804 | /* Enable interrupts by setting the interrupt mask. */ |
805 | outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) | 805 | outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) |
806 | | CntFull | TxUnderrun | 806 | | CntFull | TxUnderrun |
807 | | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); | 807 | | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); |
808 | 808 | ||
809 | if (debug > 1) | 809 | if (debug > 1) |
@@ -831,7 +831,7 @@ static void epic_pause(struct net_device *dev) | |||
831 | struct epic_private *ep = dev->priv; | 831 | struct epic_private *ep = dev->priv; |
832 | 832 | ||
833 | netif_stop_queue (dev); | 833 | netif_stop_queue (dev); |
834 | 834 | ||
835 | /* Disable interrupts by clearing the interrupt mask. */ | 835 | /* Disable interrupts by clearing the interrupt mask. */ |
836 | outl(0x00000000, ioaddr + INTMASK); | 836 | outl(0x00000000, ioaddr + INTMASK); |
837 | /* Stop the chip's Tx and Rx DMA processes. */ | 837 | /* Stop the chip's Tx and Rx DMA processes. */ |
@@ -987,7 +987,7 @@ static void epic_init_ring(struct net_device *dev) | |||
987 | for (i = 0; i < RX_RING_SIZE; i++) { | 987 | for (i = 0; i < RX_RING_SIZE; i++) { |
988 | ep->rx_ring[i].rxstatus = 0; | 988 | ep->rx_ring[i].rxstatus = 0; |
989 | ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz); | 989 | ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz); |
990 | ep->rx_ring[i].next = ep->rx_ring_dma + | 990 | ep->rx_ring[i].next = ep->rx_ring_dma + |
991 | (i+1)*sizeof(struct epic_rx_desc); | 991 | (i+1)*sizeof(struct epic_rx_desc); |
992 | ep->rx_skbuff[i] = NULL; | 992 | ep->rx_skbuff[i] = NULL; |
993 | } | 993 | } |
@@ -1002,7 +1002,7 @@ static void epic_init_ring(struct net_device *dev) | |||
1002 | break; | 1002 | break; |
1003 | skb->dev = dev; /* Mark as being used by this device. */ | 1003 | skb->dev = dev; /* Mark as being used by this device. */ |
1004 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ | 1004 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ |
1005 | ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, | 1005 | ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, |
1006 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1006 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1007 | ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn); | 1007 | ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn); |
1008 | } | 1008 | } |
@@ -1013,7 +1013,7 @@ static void epic_init_ring(struct net_device *dev) | |||
1013 | for (i = 0; i < TX_RING_SIZE; i++) { | 1013 | for (i = 0; i < TX_RING_SIZE; i++) { |
1014 | ep->tx_skbuff[i] = NULL; | 1014 | ep->tx_skbuff[i] = NULL; |
1015 | ep->tx_ring[i].txstatus = 0x0000; | 1015 | ep->tx_ring[i].txstatus = 0x0000; |
1016 | ep->tx_ring[i].next = ep->tx_ring_dma + | 1016 | ep->tx_ring[i].next = ep->tx_ring_dma + |
1017 | (i+1)*sizeof(struct epic_tx_desc); | 1017 | (i+1)*sizeof(struct epic_tx_desc); |
1018 | } | 1018 | } |
1019 | ep->tx_ring[i-1].next = ep->tx_ring_dma; | 1019 | ep->tx_ring[i-1].next = ep->tx_ring_dma; |
@@ -1026,7 +1026,7 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1026 | int entry, free_count; | 1026 | int entry, free_count; |
1027 | u32 ctrl_word; | 1027 | u32 ctrl_word; |
1028 | unsigned long flags; | 1028 | unsigned long flags; |
1029 | 1029 | ||
1030 | if (skb->len < ETH_ZLEN) { | 1030 | if (skb->len < ETH_ZLEN) { |
1031 | skb = skb_padto(skb, ETH_ZLEN); | 1031 | skb = skb_padto(skb, ETH_ZLEN); |
1032 | if (skb == NULL) | 1032 | if (skb == NULL) |
@@ -1042,7 +1042,7 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1042 | entry = ep->cur_tx % TX_RING_SIZE; | 1042 | entry = ep->cur_tx % TX_RING_SIZE; |
1043 | 1043 | ||
1044 | ep->tx_skbuff[entry] = skb; | 1044 | ep->tx_skbuff[entry] = skb; |
1045 | ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data, | 1045 | ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data, |
1046 | skb->len, PCI_DMA_TODEVICE); | 1046 | skb->len, PCI_DMA_TODEVICE); |
1047 | if (free_count < TX_QUEUE_LEN/2) {/* Typical path */ | 1047 | if (free_count < TX_QUEUE_LEN/2) {/* Typical path */ |
1048 | ctrl_word = cpu_to_le32(0x100000); /* No interrupt */ | 1048 | ctrl_word = cpu_to_le32(0x100000); /* No interrupt */ |
@@ -1126,7 +1126,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep) | |||
1126 | 1126 | ||
1127 | /* Free the original skb. */ | 1127 | /* Free the original skb. */ |
1128 | skb = ep->tx_skbuff[entry]; | 1128 | skb = ep->tx_skbuff[entry]; |
1129 | pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, | 1129 | pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, |
1130 | skb->len, PCI_DMA_TODEVICE); | 1130 | skb->len, PCI_DMA_TODEVICE); |
1131 | dev_kfree_skb_irq(skb); | 1131 | dev_kfree_skb_irq(skb); |
1132 | ep->tx_skbuff[entry] = NULL; | 1132 | ep->tx_skbuff[entry] = NULL; |
@@ -1281,8 +1281,8 @@ static int epic_rx(struct net_device *dev, int budget) | |||
1281 | ep->rx_buf_sz, | 1281 | ep->rx_buf_sz, |
1282 | PCI_DMA_FROMDEVICE); | 1282 | PCI_DMA_FROMDEVICE); |
1283 | } else { | 1283 | } else { |
1284 | pci_unmap_single(ep->pci_dev, | 1284 | pci_unmap_single(ep->pci_dev, |
1285 | ep->rx_ring[entry].bufaddr, | 1285 | ep->rx_ring[entry].bufaddr, |
1286 | ep->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1286 | ep->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1287 | skb_put(skb = ep->rx_skbuff[entry], pkt_len); | 1287 | skb_put(skb = ep->rx_skbuff[entry], pkt_len); |
1288 | ep->rx_skbuff[entry] = NULL; | 1288 | ep->rx_skbuff[entry] = NULL; |
@@ -1307,7 +1307,7 @@ static int epic_rx(struct net_device *dev, int budget) | |||
1307 | break; | 1307 | break; |
1308 | skb->dev = dev; /* Mark as being used by this device. */ | 1308 | skb->dev = dev; /* Mark as being used by this device. */ |
1309 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1309 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1310 | ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, | 1310 | ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, |
1311 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1311 | skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1312 | work_done++; | 1312 | work_done++; |
1313 | } | 1313 | } |
@@ -1403,7 +1403,7 @@ static int epic_close(struct net_device *dev) | |||
1403 | ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ | 1403 | ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ |
1404 | ep->rx_ring[i].buflength = 0; | 1404 | ep->rx_ring[i].buflength = 0; |
1405 | if (skb) { | 1405 | if (skb) { |
1406 | pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr, | 1406 | pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr, |
1407 | ep->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1407 | ep->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1408 | dev_kfree_skb(skb); | 1408 | dev_kfree_skb(skb); |
1409 | } | 1409 | } |
@@ -1414,7 +1414,7 @@ static int epic_close(struct net_device *dev) | |||
1414 | ep->tx_skbuff[i] = NULL; | 1414 | ep->tx_skbuff[i] = NULL; |
1415 | if (!skb) | 1415 | if (!skb) |
1416 | continue; | 1416 | continue; |
1417 | pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr, | 1417 | pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr, |
1418 | skb->len, PCI_DMA_TODEVICE); | 1418 | skb->len, PCI_DMA_TODEVICE); |
1419 | dev_kfree_skb(skb); | 1419 | dev_kfree_skb(skb); |
1420 | } | 1420 | } |
@@ -1607,7 +1607,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev) | |||
1607 | { | 1607 | { |
1608 | struct net_device *dev = pci_get_drvdata(pdev); | 1608 | struct net_device *dev = pci_get_drvdata(pdev); |
1609 | struct epic_private *ep = dev->priv; | 1609 | struct epic_private *ep = dev->priv; |
1610 | 1610 | ||
1611 | pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); | 1611 | pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); |
1612 | pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); | 1612 | pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); |
1613 | unregister_netdev(dev); | 1613 | unregister_netdev(dev); |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index feb5b223cd60..5669b95162b3 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -107,6 +107,7 @@ | |||
107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. | 107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. |
108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. | 108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. |
109 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. | 109 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. |
110 | * 0.55: 22 Mar 2006: Add flow control (pause frame). | ||
110 | * | 111 | * |
111 | * Known bugs: | 112 | * Known bugs: |
112 | * We suspect that on some hardware no TX done interrupts are generated. | 113 | * We suspect that on some hardware no TX done interrupts are generated. |
@@ -118,7 +119,7 @@ | |||
118 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 119 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
119 | * superfluous timer interrupts from the nic. | 120 | * superfluous timer interrupts from the nic. |
120 | */ | 121 | */ |
121 | #define FORCEDETH_VERSION "0.54" | 122 | #define FORCEDETH_VERSION "0.55" |
122 | #define DRV_NAME "forcedeth" | 123 | #define DRV_NAME "forcedeth" |
123 | 124 | ||
124 | #include <linux/module.h> | 125 | #include <linux/module.h> |
@@ -163,6 +164,7 @@ | |||
163 | #define DEV_HAS_MSI 0x0040 /* device supports MSI */ | 164 | #define DEV_HAS_MSI 0x0040 /* device supports MSI */ |
164 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ | 165 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ |
165 | #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ | 166 | #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ |
167 | #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ | ||
166 | 168 | ||
167 | enum { | 169 | enum { |
168 | NvRegIrqStatus = 0x000, | 170 | NvRegIrqStatus = 0x000, |
@@ -203,6 +205,7 @@ enum { | |||
203 | NvRegMSIIrqMask = 0x030, | 205 | NvRegMSIIrqMask = 0x030, |
204 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 | 206 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 |
205 | NvRegMisc1 = 0x080, | 207 | NvRegMisc1 = 0x080, |
208 | #define NVREG_MISC1_PAUSE_TX 0x01 | ||
206 | #define NVREG_MISC1_HD 0x02 | 209 | #define NVREG_MISC1_HD 0x02 |
207 | #define NVREG_MISC1_FORCE 0x3b0f3c | 210 | #define NVREG_MISC1_FORCE 0x3b0f3c |
208 | 211 | ||
@@ -214,7 +217,8 @@ enum { | |||
214 | #define NVREG_XMITSTAT_BUSY 0x01 | 217 | #define NVREG_XMITSTAT_BUSY 0x01 |
215 | 218 | ||
216 | NvRegPacketFilterFlags = 0x8c, | 219 | NvRegPacketFilterFlags = 0x8c, |
217 | #define NVREG_PFF_ALWAYS 0x7F0008 | 220 | #define NVREG_PFF_PAUSE_RX 0x08 |
221 | #define NVREG_PFF_ALWAYS 0x7F0000 | ||
218 | #define NVREG_PFF_PROMISC 0x80 | 222 | #define NVREG_PFF_PROMISC 0x80 |
219 | #define NVREG_PFF_MYADDR 0x20 | 223 | #define NVREG_PFF_MYADDR 0x20 |
220 | 224 | ||
@@ -277,6 +281,9 @@ enum { | |||
277 | #define NVREG_TXRXCTL_VLANINS 0x00080 | 281 | #define NVREG_TXRXCTL_VLANINS 0x00080 |
278 | NvRegTxRingPhysAddrHigh = 0x148, | 282 | NvRegTxRingPhysAddrHigh = 0x148, |
279 | NvRegRxRingPhysAddrHigh = 0x14C, | 283 | NvRegRxRingPhysAddrHigh = 0x14C, |
284 | NvRegTxPauseFrame = 0x170, | ||
285 | #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 | ||
286 | #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 | ||
280 | NvRegMIIStatus = 0x180, | 287 | NvRegMIIStatus = 0x180, |
281 | #define NVREG_MIISTAT_ERROR 0x0001 | 288 | #define NVREG_MIISTAT_ERROR 0x0001 |
282 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 | 289 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 |
@@ -451,7 +458,7 @@ typedef union _ring_type { | |||
451 | 458 | ||
452 | #define RX_RING 128 | 459 | #define RX_RING 128 |
453 | #define TX_RING 256 | 460 | #define TX_RING 256 |
454 | /* | 461 | /* |
455 | * If your nic mysteriously hangs then try to reduce the limits | 462 | * If your nic mysteriously hangs then try to reduce the limits |
456 | * to 1/0: It might be required to set NV_TX_LASTPACKET in the | 463 | * to 1/0: It might be required to set NV_TX_LASTPACKET in the |
457 | * last valid ring entry. But this would be impossible to | 464 | * last valid ring entry. But this would be impossible to |
@@ -473,7 +480,7 @@ typedef union _ring_type { | |||
473 | #define POLL_WAIT (1+HZ/100) | 480 | #define POLL_WAIT (1+HZ/100) |
474 | #define LINK_TIMEOUT (3*HZ) | 481 | #define LINK_TIMEOUT (3*HZ) |
475 | 482 | ||
476 | /* | 483 | /* |
477 | * desc_ver values: | 484 | * desc_ver values: |
478 | * The nic supports three different descriptor types: | 485 | * The nic supports three different descriptor types: |
479 | * - DESC_VER_1: Original | 486 | * - DESC_VER_1: Original |
@@ -506,13 +513,10 @@ typedef union _ring_type { | |||
506 | #define PHY_1000 0x2 | 513 | #define PHY_1000 0x2 |
507 | #define PHY_HALF 0x100 | 514 | #define PHY_HALF 0x100 |
508 | 515 | ||
509 | /* FIXME: MII defines that should be added to <linux/mii.h> */ | 516 | #define NV_PAUSEFRAME_RX_CAPABLE 0x0001 |
510 | #define MII_1000BT_CR 0x09 | 517 | #define NV_PAUSEFRAME_TX_CAPABLE 0x0002 |
511 | #define MII_1000BT_SR 0x0a | 518 | #define NV_PAUSEFRAME_RX_ENABLE 0x0004 |
512 | #define ADVERTISE_1000FULL 0x0200 | 519 | #define NV_PAUSEFRAME_TX_ENABLE 0x0008 |
513 | #define ADVERTISE_1000HALF 0x0100 | ||
514 | #define LPA_1000FULL 0x0800 | ||
515 | #define LPA_1000HALF 0x0400 | ||
516 | 520 | ||
517 | /* MSI/MSI-X defines */ | 521 | /* MSI/MSI-X defines */ |
518 | #define NV_MSI_X_MAX_VECTORS 8 | 522 | #define NV_MSI_X_MAX_VECTORS 8 |
@@ -602,6 +606,9 @@ struct fe_priv { | |||
602 | /* msi/msi-x fields */ | 606 | /* msi/msi-x fields */ |
603 | u32 msi_flags; | 607 | u32 msi_flags; |
604 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; | 608 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; |
609 | |||
610 | /* flow control */ | ||
611 | u32 pause_flags; | ||
605 | }; | 612 | }; |
606 | 613 | ||
607 | /* | 614 | /* |
@@ -612,7 +619,7 @@ static int max_interrupt_work = 5; | |||
612 | 619 | ||
613 | /* | 620 | /* |
614 | * Optimization can be either throuput mode or cpu mode | 621 | * Optimization can be either throuput mode or cpu mode |
615 | * | 622 | * |
616 | * Throughput Mode: Every tx and rx packet will generate an interrupt. | 623 | * Throughput Mode: Every tx and rx packet will generate an interrupt. |
617 | * CPU Mode: Interrupts are controlled by a timer. | 624 | * CPU Mode: Interrupts are controlled by a timer. |
618 | */ | 625 | */ |
@@ -860,7 +867,7 @@ static int phy_init(struct net_device *dev) | |||
860 | 867 | ||
861 | /* set advertise register */ | 868 | /* set advertise register */ |
862 | reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | 869 | reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); |
863 | reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400); | 870 | reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); |
864 | if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { | 871 | if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { |
865 | printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); | 872 | printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); |
866 | return PHY_ERROR; | 873 | return PHY_ERROR; |
@@ -873,14 +880,14 @@ static int phy_init(struct net_device *dev) | |||
873 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | 880 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); |
874 | if (mii_status & PHY_GIGABIT) { | 881 | if (mii_status & PHY_GIGABIT) { |
875 | np->gigabit = PHY_GIGABIT; | 882 | np->gigabit = PHY_GIGABIT; |
876 | mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); | 883 | mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
877 | mii_control_1000 &= ~ADVERTISE_1000HALF; | 884 | mii_control_1000 &= ~ADVERTISE_1000HALF; |
878 | if (phyinterface & PHY_RGMII) | 885 | if (phyinterface & PHY_RGMII) |
879 | mii_control_1000 |= ADVERTISE_1000FULL; | 886 | mii_control_1000 |= ADVERTISE_1000FULL; |
880 | else | 887 | else |
881 | mii_control_1000 &= ~ADVERTISE_1000FULL; | 888 | mii_control_1000 &= ~ADVERTISE_1000FULL; |
882 | 889 | ||
883 | if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) { | 890 | if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { |
884 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | 891 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); |
885 | return PHY_ERROR; | 892 | return PHY_ERROR; |
886 | } | 893 | } |
@@ -918,6 +925,8 @@ static int phy_init(struct net_device *dev) | |||
918 | return PHY_ERROR; | 925 | return PHY_ERROR; |
919 | } | 926 | } |
920 | } | 927 | } |
928 | /* some phys clear out pause advertisment on reset, set it back */ | ||
929 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); | ||
921 | 930 | ||
922 | /* restart auto negotiation */ | 931 | /* restart auto negotiation */ |
923 | mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | 932 | mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
@@ -1110,7 +1119,7 @@ static void nv_do_rx_refill(unsigned long data) | |||
1110 | } | 1119 | } |
1111 | } | 1120 | } |
1112 | 1121 | ||
1113 | static void nv_init_rx(struct net_device *dev) | 1122 | static void nv_init_rx(struct net_device *dev) |
1114 | { | 1123 | { |
1115 | struct fe_priv *np = netdev_priv(dev); | 1124 | struct fe_priv *np = netdev_priv(dev); |
1116 | int i; | 1125 | int i; |
@@ -1174,7 +1183,7 @@ static void nv_drain_tx(struct net_device *dev) | |||
1174 | { | 1183 | { |
1175 | struct fe_priv *np = netdev_priv(dev); | 1184 | struct fe_priv *np = netdev_priv(dev); |
1176 | unsigned int i; | 1185 | unsigned int i; |
1177 | 1186 | ||
1178 | for (i = 0; i < TX_RING; i++) { | 1187 | for (i = 0; i < TX_RING; i++) { |
1179 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1188 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1180 | np->tx_ring.orig[i].FlagLen = 0; | 1189 | np->tx_ring.orig[i].FlagLen = 0; |
@@ -1320,7 +1329,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1320 | } else { | 1329 | } else { |
1321 | np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); | 1330 | np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); |
1322 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1331 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1323 | } | 1332 | } |
1324 | 1333 | ||
1325 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", | 1334 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", |
1326 | dev->name, np->next_tx, entries, tx_flags_extra); | 1335 | dev->name, np->next_tx, entries, tx_flags_extra); |
@@ -1395,7 +1404,7 @@ static void nv_tx_done(struct net_device *dev) | |||
1395 | } else { | 1404 | } else { |
1396 | np->stats.tx_packets++; | 1405 | np->stats.tx_packets++; |
1397 | np->stats.tx_bytes += skb->len; | 1406 | np->stats.tx_bytes += skb->len; |
1398 | } | 1407 | } |
1399 | } | 1408 | } |
1400 | } | 1409 | } |
1401 | nv_release_txskb(dev, i); | 1410 | nv_release_txskb(dev, i); |
@@ -1441,7 +1450,7 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1441 | for (i=0;i<TX_RING;i+= 4) { | 1450 | for (i=0;i<TX_RING;i+= 4) { |
1442 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1451 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1443 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", | 1452 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", |
1444 | i, | 1453 | i, |
1445 | le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), | 1454 | le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), |
1446 | le32_to_cpu(np->tx_ring.orig[i].FlagLen), | 1455 | le32_to_cpu(np->tx_ring.orig[i].FlagLen), |
1447 | le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), | 1456 | le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), |
@@ -1452,7 +1461,7 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1452 | le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); | 1461 | le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); |
1453 | } else { | 1462 | } else { |
1454 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", | 1463 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", |
1455 | i, | 1464 | i, |
1456 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), | 1465 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), |
1457 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), | 1466 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), |
1458 | le32_to_cpu(np->tx_ring.ex[i].FlagLen), | 1467 | le32_to_cpu(np->tx_ring.ex[i].FlagLen), |
@@ -1550,7 +1559,6 @@ static void nv_rx_process(struct net_device *dev) | |||
1550 | u32 Flags; | 1559 | u32 Flags; |
1551 | u32 vlanflags = 0; | 1560 | u32 vlanflags = 0; |
1552 | 1561 | ||
1553 | |||
1554 | for (;;) { | 1562 | for (;;) { |
1555 | struct sk_buff *skb; | 1563 | struct sk_buff *skb; |
1556 | int len; | 1564 | int len; |
@@ -1901,7 +1909,9 @@ static int nv_update_linkspeed(struct net_device *dev) | |||
1901 | { | 1909 | { |
1902 | struct fe_priv *np = netdev_priv(dev); | 1910 | struct fe_priv *np = netdev_priv(dev); |
1903 | u8 __iomem *base = get_hwbase(dev); | 1911 | u8 __iomem *base = get_hwbase(dev); |
1904 | int adv, lpa; | 1912 | int adv = 0; |
1913 | int lpa = 0; | ||
1914 | int adv_lpa, adv_pause, lpa_pause; | ||
1905 | int newls = np->linkspeed; | 1915 | int newls = np->linkspeed; |
1906 | int newdup = np->duplex; | 1916 | int newdup = np->duplex; |
1907 | int mii_status; | 1917 | int mii_status; |
@@ -1954,8 +1964,8 @@ static int nv_update_linkspeed(struct net_device *dev) | |||
1954 | 1964 | ||
1955 | retval = 1; | 1965 | retval = 1; |
1956 | if (np->gigabit == PHY_GIGABIT) { | 1966 | if (np->gigabit == PHY_GIGABIT) { |
1957 | control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); | 1967 | control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1958 | status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ); | 1968 | status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); |
1959 | 1969 | ||
1960 | if ((control_1000 & ADVERTISE_1000FULL) && | 1970 | if ((control_1000 & ADVERTISE_1000FULL) && |
1961 | (status_1000 & LPA_1000FULL)) { | 1971 | (status_1000 & LPA_1000FULL)) { |
@@ -1973,21 +1983,21 @@ static int nv_update_linkspeed(struct net_device *dev) | |||
1973 | dev->name, adv, lpa); | 1983 | dev->name, adv, lpa); |
1974 | 1984 | ||
1975 | /* FIXME: handle parallel detection properly */ | 1985 | /* FIXME: handle parallel detection properly */ |
1976 | lpa = lpa & adv; | 1986 | adv_lpa = lpa & adv; |
1977 | if (lpa & LPA_100FULL) { | 1987 | if (adv_lpa & LPA_100FULL) { |
1978 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | 1988 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
1979 | newdup = 1; | 1989 | newdup = 1; |
1980 | } else if (lpa & LPA_100HALF) { | 1990 | } else if (adv_lpa & LPA_100HALF) { |
1981 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | 1991 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
1982 | newdup = 0; | 1992 | newdup = 0; |
1983 | } else if (lpa & LPA_10FULL) { | 1993 | } else if (adv_lpa & LPA_10FULL) { |
1984 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | 1994 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
1985 | newdup = 1; | 1995 | newdup = 1; |
1986 | } else if (lpa & LPA_10HALF) { | 1996 | } else if (adv_lpa & LPA_10HALF) { |
1987 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | 1997 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
1988 | newdup = 0; | 1998 | newdup = 0; |
1989 | } else { | 1999 | } else { |
1990 | dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa); | 2000 | dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); |
1991 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | 2001 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
1992 | newdup = 0; | 2002 | newdup = 0; |
1993 | } | 2003 | } |
@@ -2030,6 +2040,56 @@ set_speed: | |||
2030 | writel(np->linkspeed, base + NvRegLinkSpeed); | 2040 | writel(np->linkspeed, base + NvRegLinkSpeed); |
2031 | pci_push(base); | 2041 | pci_push(base); |
2032 | 2042 | ||
2043 | /* setup pause frame based on advertisement and link partner */ | ||
2044 | np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); | ||
2045 | |||
2046 | if (np->duplex != 0) { | ||
2047 | adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); | ||
2048 | lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); | ||
2049 | |||
2050 | switch (adv_pause) { | ||
2051 | case (ADVERTISE_PAUSE_CAP): | ||
2052 | if (lpa_pause & LPA_PAUSE_CAP) { | ||
2053 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE; | ||
2054 | } | ||
2055 | break; | ||
2056 | case (ADVERTISE_PAUSE_ASYM): | ||
2057 | if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) | ||
2058 | { | ||
2059 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | ||
2060 | } | ||
2061 | break; | ||
2062 | case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): | ||
2063 | if (lpa_pause & LPA_PAUSE_CAP) | ||
2064 | { | ||
2065 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE; | ||
2066 | } | ||
2067 | if (lpa_pause == LPA_PAUSE_ASYM) | ||
2068 | { | ||
2069 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | ||
2070 | } | ||
2071 | break; | ||
2072 | } | ||
2073 | } | ||
2074 | |||
2075 | if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { | ||
2076 | u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; | ||
2077 | if (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) | ||
2078 | writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); | ||
2079 | else | ||
2080 | writel(pff, base + NvRegPacketFilterFlags); | ||
2081 | } | ||
2082 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { | ||
2083 | u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; | ||
2084 | if (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) { | ||
2085 | writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); | ||
2086 | writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); | ||
2087 | } else { | ||
2088 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | ||
2089 | writel(regmisc, base + NvRegMisc1); | ||
2090 | } | ||
2091 | } | ||
2092 | |||
2033 | return retval; | 2093 | return retval; |
2034 | } | 2094 | } |
2035 | 2095 | ||
@@ -2090,7 +2150,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
2090 | spin_lock(&np->lock); | 2150 | spin_lock(&np->lock); |
2091 | nv_tx_done(dev); | 2151 | nv_tx_done(dev); |
2092 | spin_unlock(&np->lock); | 2152 | spin_unlock(&np->lock); |
2093 | 2153 | ||
2094 | nv_rx_process(dev); | 2154 | nv_rx_process(dev); |
2095 | if (nv_alloc_rx(dev)) { | 2155 | if (nv_alloc_rx(dev)) { |
2096 | spin_lock(&np->lock); | 2156 | spin_lock(&np->lock); |
@@ -2098,7 +2158,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
2098 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 2158 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
2099 | spin_unlock(&np->lock); | 2159 | spin_unlock(&np->lock); |
2100 | } | 2160 | } |
2101 | 2161 | ||
2102 | if (events & NVREG_IRQ_LINK) { | 2162 | if (events & NVREG_IRQ_LINK) { |
2103 | spin_lock(&np->lock); | 2163 | spin_lock(&np->lock); |
2104 | nv_link_irq(dev); | 2164 | nv_link_irq(dev); |
@@ -2163,7 +2223,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | |||
2163 | spin_lock_irq(&np->lock); | 2223 | spin_lock_irq(&np->lock); |
2164 | nv_tx_done(dev); | 2224 | nv_tx_done(dev); |
2165 | spin_unlock_irq(&np->lock); | 2225 | spin_unlock_irq(&np->lock); |
2166 | 2226 | ||
2167 | if (events & (NVREG_IRQ_TX_ERR)) { | 2227 | if (events & (NVREG_IRQ_TX_ERR)) { |
2168 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | 2228 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
2169 | dev->name, events); | 2229 | dev->name, events); |
@@ -2206,7 +2266,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
2206 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); | 2266 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); |
2207 | if (!(events & np->irqmask)) | 2267 | if (!(events & np->irqmask)) |
2208 | break; | 2268 | break; |
2209 | 2269 | ||
2210 | nv_rx_process(dev); | 2270 | nv_rx_process(dev); |
2211 | if (nv_alloc_rx(dev)) { | 2271 | if (nv_alloc_rx(dev)) { |
2212 | spin_lock_irq(&np->lock); | 2272 | spin_lock_irq(&np->lock); |
@@ -2214,7 +2274,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
2214 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 2274 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
2215 | spin_unlock_irq(&np->lock); | 2275 | spin_unlock_irq(&np->lock); |
2216 | } | 2276 | } |
2217 | 2277 | ||
2218 | if (i > max_interrupt_work) { | 2278 | if (i > max_interrupt_work) { |
2219 | spin_lock_irq(&np->lock); | 2279 | spin_lock_irq(&np->lock); |
2220 | /* disable interrupts on the nic */ | 2280 | /* disable interrupts on the nic */ |
@@ -2253,7 +2313,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
2253 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | 2313 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
2254 | if (!(events & np->irqmask)) | 2314 | if (!(events & np->irqmask)) |
2255 | break; | 2315 | break; |
2256 | 2316 | ||
2257 | if (events & NVREG_IRQ_LINK) { | 2317 | if (events & NVREG_IRQ_LINK) { |
2258 | spin_lock_irq(&np->lock); | 2318 | spin_lock_irq(&np->lock); |
2259 | nv_link_irq(dev); | 2319 | nv_link_irq(dev); |
@@ -2326,7 +2386,7 @@ static void nv_do_nic_poll(unsigned long data) | |||
2326 | np->nic_poll_irq = 0; | 2386 | np->nic_poll_irq = 0; |
2327 | 2387 | ||
2328 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | 2388 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ |
2329 | 2389 | ||
2330 | writel(mask, base + NvRegIrqMask); | 2390 | writel(mask, base + NvRegIrqMask); |
2331 | pci_push(base); | 2391 | pci_push(base); |
2332 | 2392 | ||
@@ -2441,7 +2501,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2441 | if (adv & ADVERTISE_100FULL) | 2501 | if (adv & ADVERTISE_100FULL) |
2442 | ecmd->advertising |= ADVERTISED_100baseT_Full; | 2502 | ecmd->advertising |= ADVERTISED_100baseT_Full; |
2443 | if (np->autoneg && np->gigabit == PHY_GIGABIT) { | 2503 | if (np->autoneg && np->gigabit == PHY_GIGABIT) { |
2444 | adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); | 2504 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
2445 | if (adv & ADVERTISE_1000FULL) | 2505 | if (adv & ADVERTISE_1000FULL) |
2446 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | 2506 | ecmd->advertising |= ADVERTISED_1000baseT_Full; |
2447 | } | 2507 | } |
@@ -2505,23 +2565,23 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2505 | 2565 | ||
2506 | /* advertise only what has been requested */ | 2566 | /* advertise only what has been requested */ |
2507 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | 2567 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); |
2508 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); | 2568 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
2509 | if (ecmd->advertising & ADVERTISED_10baseT_Half) | 2569 | if (ecmd->advertising & ADVERTISED_10baseT_Half) |
2510 | adv |= ADVERTISE_10HALF; | 2570 | adv |= ADVERTISE_10HALF; |
2511 | if (ecmd->advertising & ADVERTISED_10baseT_Full) | 2571 | if (ecmd->advertising & ADVERTISED_10baseT_Full) |
2512 | adv |= ADVERTISE_10FULL; | 2572 | adv |= ADVERTISE_10FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; |
2513 | if (ecmd->advertising & ADVERTISED_100baseT_Half) | 2573 | if (ecmd->advertising & ADVERTISED_100baseT_Half) |
2514 | adv |= ADVERTISE_100HALF; | 2574 | adv |= ADVERTISE_100HALF; |
2515 | if (ecmd->advertising & ADVERTISED_100baseT_Full) | 2575 | if (ecmd->advertising & ADVERTISED_100baseT_Full) |
2516 | adv |= ADVERTISE_100FULL; | 2576 | adv |= ADVERTISE_100FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; |
2517 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); | 2577 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); |
2518 | 2578 | ||
2519 | if (np->gigabit == PHY_GIGABIT) { | 2579 | if (np->gigabit == PHY_GIGABIT) { |
2520 | adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); | 2580 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
2521 | adv &= ~ADVERTISE_1000FULL; | 2581 | adv &= ~ADVERTISE_1000FULL; |
2522 | if (ecmd->advertising & ADVERTISED_1000baseT_Full) | 2582 | if (ecmd->advertising & ADVERTISED_1000baseT_Full) |
2523 | adv |= ADVERTISE_1000FULL; | 2583 | adv |= ADVERTISE_1000FULL; |
2524 | mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv); | 2584 | mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); |
2525 | } | 2585 | } |
2526 | 2586 | ||
2527 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | 2587 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
@@ -2534,22 +2594,22 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
2534 | np->autoneg = 0; | 2594 | np->autoneg = 0; |
2535 | 2595 | ||
2536 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | 2596 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); |
2537 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); | 2597 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
2538 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) | 2598 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) |
2539 | adv |= ADVERTISE_10HALF; | 2599 | adv |= ADVERTISE_10HALF; |
2540 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) | 2600 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) |
2541 | adv |= ADVERTISE_10FULL; | 2601 | adv |= ADVERTISE_10FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; |
2542 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) | 2602 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) |
2543 | adv |= ADVERTISE_100HALF; | 2603 | adv |= ADVERTISE_100HALF; |
2544 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) | 2604 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) |
2545 | adv |= ADVERTISE_100FULL; | 2605 | adv |= ADVERTISE_100FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; |
2546 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); | 2606 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); |
2547 | np->fixed_mode = adv; | 2607 | np->fixed_mode = adv; |
2548 | 2608 | ||
2549 | if (np->gigabit == PHY_GIGABIT) { | 2609 | if (np->gigabit == PHY_GIGABIT) { |
2550 | adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); | 2610 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
2551 | adv &= ~ADVERTISE_1000FULL; | 2611 | adv &= ~ADVERTISE_1000FULL; |
2552 | mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv); | 2612 | mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); |
2553 | } | 2613 | } |
2554 | 2614 | ||
2555 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | 2615 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
@@ -2829,6 +2889,9 @@ static int nv_open(struct net_device *dev) | |||
2829 | 2889 | ||
2830 | writel(0, base + NvRegAdapterControl); | 2890 | writel(0, base + NvRegAdapterControl); |
2831 | 2891 | ||
2892 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) | ||
2893 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | ||
2894 | |||
2832 | /* 2) initialize descriptor rings */ | 2895 | /* 2) initialize descriptor rings */ |
2833 | set_bufsize(dev); | 2896 | set_bufsize(dev); |
2834 | oom = nv_init_ring(dev); | 2897 | oom = nv_init_ring(dev); |
@@ -3114,6 +3177,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3114 | np->msi_flags |= NV_MSI_X_CAPABLE; | 3177 | np->msi_flags |= NV_MSI_X_CAPABLE; |
3115 | } | 3178 | } |
3116 | 3179 | ||
3180 | np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE; | ||
3181 | if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { | ||
3182 | np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE; | ||
3183 | } | ||
3184 | |||
3185 | |||
3117 | err = -ENOMEM; | 3186 | err = -ENOMEM; |
3118 | np->base = ioremap(addr, np->register_size); | 3187 | np->base = ioremap(addr, np->register_size); |
3119 | if (!np->base) | 3188 | if (!np->base) |
@@ -3260,7 +3329,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3260 | pci_name(pci_dev)); | 3329 | pci_name(pci_dev)); |
3261 | goto out_freering; | 3330 | goto out_freering; |
3262 | } | 3331 | } |
3263 | 3332 | ||
3264 | /* reset it */ | 3333 | /* reset it */ |
3265 | phy_init(dev); | 3334 | phy_init(dev); |
3266 | 3335 | ||
@@ -3374,11 +3443,11 @@ static struct pci_device_id pci_tbl[] = { | |||
3374 | }, | 3443 | }, |
3375 | { /* MCP55 Ethernet Controller */ | 3444 | { /* MCP55 Ethernet Controller */ |
3376 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), | 3445 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
3377 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL, | 3446 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX, |
3378 | }, | 3447 | }, |
3379 | { /* MCP55 Ethernet Controller */ | 3448 | { /* MCP55 Ethernet Controller */ |
3380 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), | 3449 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
3381 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL, | 3450 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX, |
3382 | }, | 3451 | }, |
3383 | {0,}, | 3452 | {0,}, |
3384 | }; | 3453 | }; |
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c index 01ad904215a1..51fd51609ea9 100644 --- a/drivers/net/ibmlana.c +++ b/drivers/net/ibmlana.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | net-3-driver for the IBM LAN Adapter/A | 2 | net-3-driver for the IBM LAN Adapter/A |
3 | 3 | ||
4 | This is an extension to the Linux operating system, and is covered by the | 4 | This is an extension to the Linux operating system, and is covered by the |
@@ -11,9 +11,9 @@ This driver is based both on the SK_MCA driver, which is itself based on the | |||
11 | SK_G16 and 3C523 driver. | 11 | SK_G16 and 3C523 driver. |
12 | 12 | ||
13 | paper sources: | 13 | paper sources: |
14 | 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by | 14 | 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by |
15 | Hans-Peter Messmer for the basic Microchannel stuff | 15 | Hans-Peter Messmer for the basic Microchannel stuff |
16 | 16 | ||
17 | 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer | 17 | 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer |
18 | for help on Ethernet driver programming | 18 | for help on Ethernet driver programming |
19 | 19 | ||
@@ -27,14 +27,14 @@ paper sources: | |||
27 | 27 | ||
28 | special acknowledgements to: | 28 | special acknowledgements to: |
29 | - Bob Eager for helping me out with documentation from IBM | 29 | - Bob Eager for helping me out with documentation from IBM |
30 | - Jim Shorney for his endless patience with me while I was using | 30 | - Jim Shorney for his endless patience with me while I was using |
31 | him as a beta tester to trace down the address filter bug ;-) | 31 | him as a beta tester to trace down the address filter bug ;-) |
32 | 32 | ||
33 | Missing things: | 33 | Missing things: |
34 | 34 | ||
35 | -> set debug level via ioctl instead of compile-time switches | 35 | -> set debug level via ioctl instead of compile-time switches |
36 | -> I didn't follow the development of the 2.1.x kernels, so my | 36 | -> I didn't follow the development of the 2.1.x kernels, so my |
37 | assumptions about which things changed with which kernel version | 37 | assumptions about which things changed with which kernel version |
38 | are probably nonsense | 38 | are probably nonsense |
39 | 39 | ||
40 | History: | 40 | History: |
@@ -275,7 +275,7 @@ static void InitDscrs(struct net_device *dev) | |||
275 | priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE); | 275 | priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE); |
276 | priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t)); | 276 | priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t)); |
277 | priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t)); | 277 | priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t)); |
278 | 278 | ||
279 | for (z = 0; z < priv->rxbufcnt; z++) { | 279 | for (z = 0; z < priv->rxbufcnt; z++) { |
280 | rra.startlo = baddr; | 280 | rra.startlo = baddr; |
281 | rra.starthi = 0; | 281 | rra.starthi = 0; |
@@ -570,7 +570,7 @@ static void irqrx_handler(struct net_device *dev) | |||
570 | lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t)); | 570 | lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t)); |
571 | memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t)); | 571 | memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t)); |
572 | 572 | ||
573 | /* iron out upper word halves of fields we use - SONIC will duplicate | 573 | /* iron out upper word halves of fields we use - SONIC will duplicate |
574 | bits 0..15 to 16..31 */ | 574 | bits 0..15 to 16..31 */ |
575 | 575 | ||
576 | rda.status &= 0xffff; | 576 | rda.status &= 0xffff; |
@@ -836,9 +836,9 @@ static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev) | |||
836 | baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE); | 836 | baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE); |
837 | memcpy_toio(priv->base + baddr, skb->data, skb->len); | 837 | memcpy_toio(priv->base + baddr, skb->data, skb->len); |
838 | 838 | ||
839 | /* copy filler into RAM - in case we're filling up... | 839 | /* copy filler into RAM - in case we're filling up... |
840 | we're filling a bit more than necessary, but that doesn't harm | 840 | we're filling a bit more than necessary, but that doesn't harm |
841 | since the buffer is far larger... | 841 | since the buffer is far larger... |
842 | Sorry Linus for the filler string but I couldn't resist ;-) */ | 842 | Sorry Linus for the filler string but I couldn't resist ;-) */ |
843 | 843 | ||
844 | if (tmplen > skb->len) { | 844 | if (tmplen > skb->len) { |
@@ -952,7 +952,7 @@ static int ibmlana_probe(struct net_device *dev) | |||
952 | priv->realirq = irq; | 952 | priv->realirq = irq; |
953 | priv->medium = medium; | 953 | priv->medium = medium; |
954 | spin_lock_init(&priv->lock); | 954 | spin_lock_init(&priv->lock); |
955 | 955 | ||
956 | 956 | ||
957 | /* set base + irq for this device (irq not allocated so far) */ | 957 | /* set base + irq for this device (irq not allocated so far) */ |
958 | 958 | ||
diff --git a/drivers/net/ibmlana.h b/drivers/net/ibmlana.h index 458ee226e537..6b58bab9e308 100644 --- a/drivers/net/ibmlana.h +++ b/drivers/net/ibmlana.h | |||
@@ -17,7 +17,7 @@ | |||
17 | /* media enumeration - defined in a way that it fits onto the LAN/A's | 17 | /* media enumeration - defined in a way that it fits onto the LAN/A's |
18 | POS registers... */ | 18 | POS registers... */ |
19 | 19 | ||
20 | typedef enum { | 20 | typedef enum { |
21 | Media_10BaseT, Media_10Base5, | 21 | Media_10BaseT, Media_10Base5, |
22 | Media_Unknown, Media_10Base2, Media_Count | 22 | Media_Unknown, Media_10Base2, Media_Count |
23 | } ibmlana_medium; | 23 | } ibmlana_medium; |
@@ -27,7 +27,7 @@ typedef enum { | |||
27 | typedef struct { | 27 | typedef struct { |
28 | unsigned int slot; /* MCA-Slot-# */ | 28 | unsigned int slot; /* MCA-Slot-# */ |
29 | struct net_device_stats stat; /* packet statistics */ | 29 | struct net_device_stats stat; /* packet statistics */ |
30 | int realirq; /* memorizes actual IRQ, even when | 30 | int realirq; /* memorizes actual IRQ, even when |
31 | currently not allocated */ | 31 | currently not allocated */ |
32 | ibmlana_medium medium; /* physical cannector */ | 32 | ibmlana_medium medium; /* physical cannector */ |
33 | u32 tdastart, txbufstart, /* addresses */ | 33 | u32 tdastart, txbufstart, /* addresses */ |
@@ -41,7 +41,7 @@ typedef struct { | |||
41 | spinlock_t lock; | 41 | spinlock_t lock; |
42 | } ibmlana_priv; | 42 | } ibmlana_priv; |
43 | 43 | ||
44 | /* this card uses quite a lot of I/O ports...luckily the MCA bus decodes | 44 | /* this card uses quite a lot of I/O ports...luckily the MCA bus decodes |
45 | a full 64K I/O range... */ | 45 | a full 64K I/O range... */ |
46 | 46 | ||
47 | #define IBM_LANA_IORANGE 0xa0 | 47 | #define IBM_LANA_IORANGE 0xa0 |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 52d01027d9e7..666346f6469e 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -24,7 +24,7 @@ | |||
24 | /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ | 24 | /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ |
25 | /* option of the RS/6000 Platform Architechture to interface with virtual */ | 25 | /* option of the RS/6000 Platform Architechture to interface with virtual */ |
26 | /* ethernet NICs that are presented to the partition by the hypervisor. */ | 26 | /* ethernet NICs that are presented to the partition by the hypervisor. */ |
27 | /* */ | 27 | /* */ |
28 | /**************************************************************************/ | 28 | /**************************************************************************/ |
29 | /* | 29 | /* |
30 | TODO: | 30 | TODO: |
@@ -79,7 +79,7 @@ | |||
79 | #else | 79 | #else |
80 | #define ibmveth_debug_printk_no_adapter(fmt, args...) | 80 | #define ibmveth_debug_printk_no_adapter(fmt, args...) |
81 | #define ibmveth_debug_printk(fmt, args...) | 81 | #define ibmveth_debug_printk(fmt, args...) |
82 | #define ibmveth_assert(expr) | 82 | #define ibmveth_assert(expr) |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | static int ibmveth_open(struct net_device *dev); | 85 | static int ibmveth_open(struct net_device *dev); |
@@ -96,6 +96,7 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); | |||
96 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); | 96 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); |
97 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); | 97 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); |
98 | static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); | 98 | static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); |
99 | static struct kobj_type ktype_veth_pool; | ||
99 | 100 | ||
100 | #ifdef CONFIG_PROC_FS | 101 | #ifdef CONFIG_PROC_FS |
101 | #define IBMVETH_PROC_DIR "net/ibmveth" | 102 | #define IBMVETH_PROC_DIR "net/ibmveth" |
@@ -133,12 +134,13 @@ static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) | |||
133 | } | 134 | } |
134 | 135 | ||
135 | /* setup the initial settings for a buffer pool */ | 136 | /* setup the initial settings for a buffer pool */ |
136 | static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size) | 137 | static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active) |
137 | { | 138 | { |
138 | pool->size = pool_size; | 139 | pool->size = pool_size; |
139 | pool->index = pool_index; | 140 | pool->index = pool_index; |
140 | pool->buff_size = buff_size; | 141 | pool->buff_size = buff_size; |
141 | pool->threshold = pool_size / 2; | 142 | pool->threshold = pool_size / 2; |
143 | pool->active = pool_active; | ||
142 | } | 144 | } |
143 | 145 | ||
144 | /* allocate and setup an buffer pool - called during open */ | 146 | /* allocate and setup an buffer pool - called during open */ |
@@ -146,13 +148,13 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
146 | { | 148 | { |
147 | int i; | 149 | int i; |
148 | 150 | ||
149 | pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); | 151 | pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); |
150 | 152 | ||
151 | if(!pool->free_map) { | 153 | if(!pool->free_map) { |
152 | return -1; | 154 | return -1; |
153 | } | 155 | } |
154 | 156 | ||
155 | pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); | 157 | pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); |
156 | if(!pool->dma_addr) { | 158 | if(!pool->dma_addr) { |
157 | kfree(pool->free_map); | 159 | kfree(pool->free_map); |
158 | pool->free_map = NULL; | 160 | pool->free_map = NULL; |
@@ -180,7 +182,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) | |||
180 | atomic_set(&pool->available, 0); | 182 | atomic_set(&pool->available, 0); |
181 | pool->producer_index = 0; | 183 | pool->producer_index = 0; |
182 | pool->consumer_index = 0; | 184 | pool->consumer_index = 0; |
183 | pool->active = 0; | ||
184 | 185 | ||
185 | return 0; | 186 | return 0; |
186 | } | 187 | } |
@@ -214,7 +215,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
214 | 215 | ||
215 | free_index = pool->consumer_index++ % pool->size; | 216 | free_index = pool->consumer_index++ % pool->size; |
216 | index = pool->free_map[free_index]; | 217 | index = pool->free_map[free_index]; |
217 | 218 | ||
218 | ibmveth_assert(index != IBM_VETH_INVALID_MAP); | 219 | ibmveth_assert(index != IBM_VETH_INVALID_MAP); |
219 | ibmveth_assert(pool->skbuff[index] == NULL); | 220 | ibmveth_assert(pool->skbuff[index] == NULL); |
220 | 221 | ||
@@ -231,10 +232,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
231 | desc.desc = 0; | 232 | desc.desc = 0; |
232 | desc.fields.valid = 1; | 233 | desc.fields.valid = 1; |
233 | desc.fields.length = pool->buff_size; | 234 | desc.fields.length = pool->buff_size; |
234 | desc.fields.address = dma_addr; | 235 | desc.fields.address = dma_addr; |
235 | 236 | ||
236 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 237 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); |
237 | 238 | ||
238 | if(lpar_rc != H_SUCCESS) { | 239 | if(lpar_rc != H_SUCCESS) { |
239 | pool->free_map[free_index] = index; | 240 | pool->free_map[free_index] = index; |
240 | pool->skbuff[index] = NULL; | 241 | pool->skbuff[index] = NULL; |
@@ -250,13 +251,13 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
250 | adapter->replenish_add_buff_success++; | 251 | adapter->replenish_add_buff_success++; |
251 | } | 252 | } |
252 | } | 253 | } |
253 | 254 | ||
254 | mb(); | 255 | mb(); |
255 | atomic_add(buffers_added, &(pool->available)); | 256 | atomic_add(buffers_added, &(pool->available)); |
256 | } | 257 | } |
257 | 258 | ||
258 | /* replenish routine */ | 259 | /* replenish routine */ |
259 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | 260 | static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) |
260 | { | 261 | { |
261 | int i; | 262 | int i; |
262 | 263 | ||
@@ -264,7 +265,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | |||
264 | 265 | ||
265 | for(i = 0; i < IbmVethNumBufferPools; i++) | 266 | for(i = 0; i < IbmVethNumBufferPools; i++) |
266 | if(adapter->rx_buff_pool[i].active) | 267 | if(adapter->rx_buff_pool[i].active) |
267 | ibmveth_replenish_buffer_pool(adapter, | 268 | ibmveth_replenish_buffer_pool(adapter, |
268 | &adapter->rx_buff_pool[i]); | 269 | &adapter->rx_buff_pool[i]); |
269 | 270 | ||
270 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); | 271 | adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); |
@@ -301,7 +302,6 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm | |||
301 | kfree(pool->skbuff); | 302 | kfree(pool->skbuff); |
302 | pool->skbuff = NULL; | 303 | pool->skbuff = NULL; |
303 | } | 304 | } |
304 | pool->active = 0; | ||
305 | } | 305 | } |
306 | 306 | ||
307 | /* remove a buffer from a pool */ | 307 | /* remove a buffer from a pool */ |
@@ -372,7 +372,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
372 | desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; | 372 | desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; |
373 | 373 | ||
374 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 374 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); |
375 | 375 | ||
376 | if(lpar_rc != H_SUCCESS) { | 376 | if(lpar_rc != H_SUCCESS) { |
377 | ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); | 377 | ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); |
378 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); | 378 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); |
@@ -407,7 +407,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
407 | } | 407 | } |
408 | free_page((unsigned long)adapter->buffer_list_addr); | 408 | free_page((unsigned long)adapter->buffer_list_addr); |
409 | adapter->buffer_list_addr = NULL; | 409 | adapter->buffer_list_addr = NULL; |
410 | } | 410 | } |
411 | 411 | ||
412 | if(adapter->filter_list_addr != NULL) { | 412 | if(adapter->filter_list_addr != NULL) { |
413 | if(!dma_mapping_error(adapter->filter_list_dma)) { | 413 | if(!dma_mapping_error(adapter->filter_list_dma)) { |
@@ -433,7 +433,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
433 | } | 433 | } |
434 | 434 | ||
435 | for(i = 0; i<IbmVethNumBufferPools; i++) | 435 | for(i = 0; i<IbmVethNumBufferPools; i++) |
436 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); | 436 | if (adapter->rx_buff_pool[i].active) |
437 | ibmveth_free_buffer_pool(adapter, | ||
438 | &adapter->rx_buff_pool[i]); | ||
437 | } | 439 | } |
438 | 440 | ||
439 | static int ibmveth_open(struct net_device *netdev) | 441 | static int ibmveth_open(struct net_device *netdev) |
@@ -450,10 +452,10 @@ static int ibmveth_open(struct net_device *netdev) | |||
450 | 452 | ||
451 | for(i = 0; i<IbmVethNumBufferPools; i++) | 453 | for(i = 0; i<IbmVethNumBufferPools; i++) |
452 | rxq_entries += adapter->rx_buff_pool[i].size; | 454 | rxq_entries += adapter->rx_buff_pool[i].size; |
453 | 455 | ||
454 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 456 | adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
455 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); | 457 | adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); |
456 | 458 | ||
457 | if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { | 459 | if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { |
458 | ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); | 460 | ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); |
459 | ibmveth_cleanup(adapter); | 461 | ibmveth_cleanup(adapter); |
@@ -489,9 +491,6 @@ static int ibmveth_open(struct net_device *netdev) | |||
489 | adapter->rx_queue.num_slots = rxq_entries; | 491 | adapter->rx_queue.num_slots = rxq_entries; |
490 | adapter->rx_queue.toggle = 1; | 492 | adapter->rx_queue.toggle = 1; |
491 | 493 | ||
492 | /* call change_mtu to init the buffer pools based in initial mtu */ | ||
493 | ibmveth_change_mtu(netdev, netdev->mtu); | ||
494 | |||
495 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); | 494 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); |
496 | mac_address = mac_address >> 16; | 495 | mac_address = mac_address >> 16; |
497 | 496 | ||
@@ -504,7 +503,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
504 | ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); | 503 | ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); |
505 | ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); | 504 | ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); |
506 | 505 | ||
507 | 506 | ||
508 | lpar_rc = h_register_logical_lan(adapter->vdev->unit_address, | 507 | lpar_rc = h_register_logical_lan(adapter->vdev->unit_address, |
509 | adapter->buffer_list_dma, | 508 | adapter->buffer_list_dma, |
510 | rxq_desc.desc, | 509 | rxq_desc.desc, |
@@ -519,7 +518,18 @@ static int ibmveth_open(struct net_device *netdev) | |||
519 | rxq_desc.desc, | 518 | rxq_desc.desc, |
520 | mac_address); | 519 | mac_address); |
521 | ibmveth_cleanup(adapter); | 520 | ibmveth_cleanup(adapter); |
522 | return -ENONET; | 521 | return -ENONET; |
522 | } | ||
523 | |||
524 | for(i = 0; i<IbmVethNumBufferPools; i++) { | ||
525 | if(!adapter->rx_buff_pool[i].active) | ||
526 | continue; | ||
527 | if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { | ||
528 | ibmveth_error_printk("unable to alloc pool\n"); | ||
529 | adapter->rx_buff_pool[i].active = 0; | ||
530 | ibmveth_cleanup(adapter); | ||
531 | return -ENOMEM ; | ||
532 | } | ||
523 | } | 533 | } |
524 | 534 | ||
525 | ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); | 535 | ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); |
@@ -547,10 +557,11 @@ static int ibmveth_close(struct net_device *netdev) | |||
547 | { | 557 | { |
548 | struct ibmveth_adapter *adapter = netdev->priv; | 558 | struct ibmveth_adapter *adapter = netdev->priv; |
549 | long lpar_rc; | 559 | long lpar_rc; |
550 | 560 | ||
551 | ibmveth_debug_printk("close starting\n"); | 561 | ibmveth_debug_printk("close starting\n"); |
552 | 562 | ||
553 | netif_stop_queue(netdev); | 563 | if (!adapter->pool_config) |
564 | netif_stop_queue(netdev); | ||
554 | 565 | ||
555 | free_irq(netdev->irq, netdev); | 566 | free_irq(netdev->irq, netdev); |
556 | 567 | ||
@@ -694,7 +705,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
694 | desc[5].desc, | 705 | desc[5].desc, |
695 | correlator); | 706 | correlator); |
696 | } while ((lpar_rc == H_BUSY) && (retry_count--)); | 707 | } while ((lpar_rc == H_BUSY) && (retry_count--)); |
697 | 708 | ||
698 | if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { | 709 | if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { |
699 | int i; | 710 | int i; |
700 | ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); | 711 | ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); |
@@ -780,7 +791,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget) | |||
780 | /* more work to do - return that we are not done yet */ | 791 | /* more work to do - return that we are not done yet */ |
781 | netdev->quota -= frames_processed; | 792 | netdev->quota -= frames_processed; |
782 | *budget -= frames_processed; | 793 | *budget -= frames_processed; |
783 | return 1; | 794 | return 1; |
784 | } | 795 | } |
785 | 796 | ||
786 | /* we think we are done - reenable interrupts, then check once more to make sure we are done */ | 797 | /* we think we are done - reenable interrupts, then check once more to make sure we are done */ |
@@ -806,7 +817,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget) | |||
806 | } | 817 | } |
807 | 818 | ||
808 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | 819 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs) |
809 | { | 820 | { |
810 | struct net_device *netdev = dev_instance; | 821 | struct net_device *netdev = dev_instance; |
811 | struct ibmveth_adapter *adapter = netdev->priv; | 822 | struct ibmveth_adapter *adapter = netdev->priv; |
812 | unsigned long lpar_rc; | 823 | unsigned long lpar_rc; |
@@ -862,7 +873,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
862 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); | 873 | ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); |
863 | } | 874 | } |
864 | } | 875 | } |
865 | 876 | ||
866 | /* re-enable filtering */ | 877 | /* re-enable filtering */ |
867 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | 878 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
868 | IbmVethMcastEnableFiltering, | 879 | IbmVethMcastEnableFiltering, |
@@ -876,46 +887,22 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
876 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | 887 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) |
877 | { | 888 | { |
878 | struct ibmveth_adapter *adapter = dev->priv; | 889 | struct ibmveth_adapter *adapter = dev->priv; |
890 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; | ||
879 | int i; | 891 | int i; |
880 | int prev_smaller = 1; | ||
881 | 892 | ||
882 | if ((new_mtu < 68) || | 893 | if (new_mtu < IBMVETH_MAX_MTU) |
883 | (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH)) | ||
884 | return -EINVAL; | 894 | return -EINVAL; |
885 | 895 | ||
896 | /* Look for an active buffer pool that can hold the new MTU */ | ||
886 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 897 | for(i = 0; i<IbmVethNumBufferPools; i++) { |
887 | int activate = 0; | 898 | if (!adapter->rx_buff_pool[i].active) |
888 | if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) { | 899 | continue; |
889 | activate = 1; | 900 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { |
890 | prev_smaller= 1; | 901 | dev->mtu = new_mtu; |
891 | } else { | 902 | return 0; |
892 | if (prev_smaller) | ||
893 | activate = 1; | ||
894 | prev_smaller= 0; | ||
895 | } | 903 | } |
896 | |||
897 | if (activate && !adapter->rx_buff_pool[i].active) { | ||
898 | struct ibmveth_buff_pool *pool = | ||
899 | &adapter->rx_buff_pool[i]; | ||
900 | if(ibmveth_alloc_buffer_pool(pool)) { | ||
901 | ibmveth_error_printk("unable to alloc pool\n"); | ||
902 | return -ENOMEM; | ||
903 | } | ||
904 | adapter->rx_buff_pool[i].active = 1; | ||
905 | } else if (!activate && adapter->rx_buff_pool[i].active) { | ||
906 | adapter->rx_buff_pool[i].active = 0; | ||
907 | h_free_logical_lan_buffer(adapter->vdev->unit_address, | ||
908 | (u64)pool_size[i]); | ||
909 | } | ||
910 | |||
911 | } | 904 | } |
912 | 905 | return -EINVAL; | |
913 | /* kick the interrupt handler so that the new buffer pools get | ||
914 | replenished or deallocated */ | ||
915 | ibmveth_interrupt(dev->irq, dev, NULL); | ||
916 | |||
917 | dev->mtu = new_mtu; | ||
918 | return 0; | ||
919 | } | 906 | } |
920 | 907 | ||
921 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | 908 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
@@ -928,7 +915,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
928 | unsigned int *mcastFilterSize_p; | 915 | unsigned int *mcastFilterSize_p; |
929 | 916 | ||
930 | 917 | ||
931 | ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", | 918 | ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", |
932 | dev->unit_address); | 919 | dev->unit_address); |
933 | 920 | ||
934 | mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0); | 921 | mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0); |
@@ -937,7 +924,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
937 | "attribute\n", __FILE__, __LINE__); | 924 | "attribute\n", __FILE__, __LINE__); |
938 | return 0; | 925 | return 0; |
939 | } | 926 | } |
940 | 927 | ||
941 | mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0); | 928 | mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0); |
942 | if(!mcastFilterSize_p) { | 929 | if(!mcastFilterSize_p) { |
943 | printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " | 930 | printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " |
@@ -945,7 +932,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
945 | __FILE__, __LINE__); | 932 | __FILE__, __LINE__); |
946 | return 0; | 933 | return 0; |
947 | } | 934 | } |
948 | 935 | ||
949 | netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); | 936 | netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); |
950 | 937 | ||
951 | if(!netdev) | 938 | if(!netdev) |
@@ -960,13 +947,14 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
960 | adapter->vdev = dev; | 947 | adapter->vdev = dev; |
961 | adapter->netdev = netdev; | 948 | adapter->netdev = netdev; |
962 | adapter->mcastFilterSize= *mcastFilterSize_p; | 949 | adapter->mcastFilterSize= *mcastFilterSize_p; |
963 | 950 | adapter->pool_config = 0; | |
951 | |||
964 | /* Some older boxes running PHYP non-natively have an OF that | 952 | /* Some older boxes running PHYP non-natively have an OF that |
965 | returns a 8-byte local-mac-address field (and the first | 953 | returns a 8-byte local-mac-address field (and the first |
966 | 2 bytes have to be ignored) while newer boxes' OF return | 954 | 2 bytes have to be ignored) while newer boxes' OF return |
967 | a 6-byte field. Note that IEEE 1275 specifies that | 955 | a 6-byte field. Note that IEEE 1275 specifies that |
968 | local-mac-address must be a 6-byte field. | 956 | local-mac-address must be a 6-byte field. |
969 | The RPA doc specifies that the first byte must be 10b, so | 957 | The RPA doc specifies that the first byte must be 10b, so |
970 | we'll just look for it to solve this 8 vs. 6 byte field issue */ | 958 | we'll just look for it to solve this 8 vs. 6 byte field issue */ |
971 | 959 | ||
972 | if ((*mac_addr_p & 0x3) != 0x02) | 960 | if ((*mac_addr_p & 0x3) != 0x02) |
@@ -976,7 +964,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
976 | memcpy(&adapter->mac_addr, mac_addr_p, 6); | 964 | memcpy(&adapter->mac_addr, mac_addr_p, 6); |
977 | 965 | ||
978 | adapter->liobn = dev->iommu_table->it_index; | 966 | adapter->liobn = dev->iommu_table->it_index; |
979 | 967 | ||
980 | netdev->irq = dev->irq; | 968 | netdev->irq = dev->irq; |
981 | netdev->open = ibmveth_open; | 969 | netdev->open = ibmveth_open; |
982 | netdev->poll = ibmveth_poll; | 970 | netdev->poll = ibmveth_poll; |
@@ -989,14 +977,21 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
989 | netdev->ethtool_ops = &netdev_ethtool_ops; | 977 | netdev->ethtool_ops = &netdev_ethtool_ops; |
990 | netdev->change_mtu = ibmveth_change_mtu; | 978 | netdev->change_mtu = ibmveth_change_mtu; |
991 | SET_NETDEV_DEV(netdev, &dev->dev); | 979 | SET_NETDEV_DEV(netdev, &dev->dev); |
992 | netdev->features |= NETIF_F_LLTX; | 980 | netdev->features |= NETIF_F_LLTX; |
993 | spin_lock_init(&adapter->stats_lock); | 981 | spin_lock_init(&adapter->stats_lock); |
994 | 982 | ||
995 | memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); | 983 | memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); |
996 | 984 | ||
997 | for(i = 0; i<IbmVethNumBufferPools; i++) | 985 | for(i = 0; i<IbmVethNumBufferPools; i++) { |
998 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, | 986 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; |
999 | pool_count[i], pool_size[i]); | 987 | ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, |
988 | pool_count[i], pool_size[i], | ||
989 | pool_active[i]); | ||
990 | kobj->parent = &dev->dev.kobj; | ||
991 | sprintf(kobj->name, "pool%d", i); | ||
992 | kobj->ktype = &ktype_veth_pool; | ||
993 | kobject_register(kobj); | ||
994 | } | ||
1000 | 995 | ||
1001 | ibmveth_debug_printk("adapter @ 0x%p\n", adapter); | 996 | ibmveth_debug_printk("adapter @ 0x%p\n", adapter); |
1002 | 997 | ||
@@ -1025,6 +1020,10 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) | |||
1025 | { | 1020 | { |
1026 | struct net_device *netdev = dev->dev.driver_data; | 1021 | struct net_device *netdev = dev->dev.driver_data; |
1027 | struct ibmveth_adapter *adapter = netdev->priv; | 1022 | struct ibmveth_adapter *adapter = netdev->priv; |
1023 | int i; | ||
1024 | |||
1025 | for(i = 0; i<IbmVethNumBufferPools; i++) | ||
1026 | kobject_unregister(&adapter->rx_buff_pool[i].kobj); | ||
1028 | 1027 | ||
1029 | unregister_netdev(netdev); | 1028 | unregister_netdev(netdev); |
1030 | 1029 | ||
@@ -1048,7 +1047,7 @@ static void ibmveth_proc_unregister_driver(void) | |||
1048 | remove_proc_entry(IBMVETH_PROC_DIR, NULL); | 1047 | remove_proc_entry(IBMVETH_PROC_DIR, NULL); |
1049 | } | 1048 | } |
1050 | 1049 | ||
1051 | static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) | 1050 | static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) |
1052 | { | 1051 | { |
1053 | if (*pos == 0) { | 1052 | if (*pos == 0) { |
1054 | return (void *)1; | 1053 | return (void *)1; |
@@ -1063,18 +1062,18 @@ static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1063 | return NULL; | 1062 | return NULL; |
1064 | } | 1063 | } |
1065 | 1064 | ||
1066 | static void ibmveth_seq_stop(struct seq_file *seq, void *v) | 1065 | static void ibmveth_seq_stop(struct seq_file *seq, void *v) |
1067 | { | 1066 | { |
1068 | } | 1067 | } |
1069 | 1068 | ||
1070 | static int ibmveth_seq_show(struct seq_file *seq, void *v) | 1069 | static int ibmveth_seq_show(struct seq_file *seq, void *v) |
1071 | { | 1070 | { |
1072 | struct ibmveth_adapter *adapter = seq->private; | 1071 | struct ibmveth_adapter *adapter = seq->private; |
1073 | char *current_mac = ((char*) &adapter->netdev->dev_addr); | 1072 | char *current_mac = ((char*) &adapter->netdev->dev_addr); |
1074 | char *firmware_mac = ((char*) &adapter->mac_addr) ; | 1073 | char *firmware_mac = ((char*) &adapter->mac_addr) ; |
1075 | 1074 | ||
1076 | seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); | 1075 | seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); |
1077 | 1076 | ||
1078 | seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); | 1077 | seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); |
1079 | seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn); | 1078 | seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn); |
1080 | seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", | 1079 | seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", |
@@ -1083,7 +1082,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v) | |||
1083 | seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", | 1082 | seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", |
1084 | firmware_mac[0], firmware_mac[1], firmware_mac[2], | 1083 | firmware_mac[0], firmware_mac[1], firmware_mac[2], |
1085 | firmware_mac[3], firmware_mac[4], firmware_mac[5]); | 1084 | firmware_mac[3], firmware_mac[4], firmware_mac[5]); |
1086 | 1085 | ||
1087 | seq_printf(seq, "\nAdapter Statistics:\n"); | 1086 | seq_printf(seq, "\nAdapter Statistics:\n"); |
1088 | seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized); | 1087 | seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized); |
1089 | seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send); | 1088 | seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send); |
@@ -1095,7 +1094,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v) | |||
1095 | seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure); | 1094 | seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure); |
1096 | seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer); | 1095 | seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer); |
1097 | seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer); | 1096 | seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer); |
1098 | 1097 | ||
1099 | return 0; | 1098 | return 0; |
1100 | } | 1099 | } |
1101 | static struct seq_operations ibmveth_seq_ops = { | 1100 | static struct seq_operations ibmveth_seq_ops = { |
@@ -1153,11 +1152,11 @@ static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) | |||
1153 | } | 1152 | } |
1154 | 1153 | ||
1155 | #else /* CONFIG_PROC_FS */ | 1154 | #else /* CONFIG_PROC_FS */ |
1156 | static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) | 1155 | static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) |
1157 | { | 1156 | { |
1158 | } | 1157 | } |
1159 | 1158 | ||
1160 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) | 1159 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) |
1161 | { | 1160 | { |
1162 | } | 1161 | } |
1163 | static void ibmveth_proc_register_driver(void) | 1162 | static void ibmveth_proc_register_driver(void) |
@@ -1169,6 +1168,132 @@ static void ibmveth_proc_unregister_driver(void) | |||
1169 | } | 1168 | } |
1170 | #endif /* CONFIG_PROC_FS */ | 1169 | #endif /* CONFIG_PROC_FS */ |
1171 | 1170 | ||
1171 | static struct attribute veth_active_attr; | ||
1172 | static struct attribute veth_num_attr; | ||
1173 | static struct attribute veth_size_attr; | ||
1174 | |||
1175 | static ssize_t veth_pool_show(struct kobject * kobj, | ||
1176 | struct attribute * attr, char * buf) | ||
1177 | { | ||
1178 | struct ibmveth_buff_pool *pool = container_of(kobj, | ||
1179 | struct ibmveth_buff_pool, | ||
1180 | kobj); | ||
1181 | |||
1182 | if (attr == &veth_active_attr) | ||
1183 | return sprintf(buf, "%d\n", pool->active); | ||
1184 | else if (attr == &veth_num_attr) | ||
1185 | return sprintf(buf, "%d\n", pool->size); | ||
1186 | else if (attr == &veth_size_attr) | ||
1187 | return sprintf(buf, "%d\n", pool->buff_size); | ||
1188 | return 0; | ||
1189 | } | ||
1190 | |||
1191 | static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr, | ||
1192 | const char * buf, size_t count) | ||
1193 | { | ||
1194 | struct ibmveth_buff_pool *pool = container_of(kobj, | ||
1195 | struct ibmveth_buff_pool, | ||
1196 | kobj); | ||
1197 | struct net_device *netdev = | ||
1198 | container_of(kobj->parent, struct device, kobj)->driver_data; | ||
1199 | struct ibmveth_adapter *adapter = netdev->priv; | ||
1200 | long value = simple_strtol(buf, NULL, 10); | ||
1201 | long rc; | ||
1202 | |||
1203 | if (attr == &veth_active_attr) { | ||
1204 | if (value && !pool->active) { | ||
1205 | if(ibmveth_alloc_buffer_pool(pool)) { | ||
1206 | ibmveth_error_printk("unable to alloc pool\n"); | ||
1207 | return -ENOMEM; | ||
1208 | } | ||
1209 | pool->active = 1; | ||
1210 | adapter->pool_config = 1; | ||
1211 | ibmveth_close(netdev); | ||
1212 | adapter->pool_config = 0; | ||
1213 | if ((rc = ibmveth_open(netdev))) | ||
1214 | return rc; | ||
1215 | } else if (!value && pool->active) { | ||
1216 | int mtu = netdev->mtu + IBMVETH_BUFF_OH; | ||
1217 | int i; | ||
1218 | /* Make sure there is a buffer pool with buffers that | ||
1219 | can hold a packet of the size of the MTU */ | ||
1220 | for(i = 0; i<IbmVethNumBufferPools; i++) { | ||
1221 | if (pool == &adapter->rx_buff_pool[i]) | ||
1222 | continue; | ||
1223 | if (!adapter->rx_buff_pool[i].active) | ||
1224 | continue; | ||
1225 | if (mtu < adapter->rx_buff_pool[i].buff_size) { | ||
1226 | pool->active = 0; | ||
1227 | h_free_logical_lan_buffer(adapter-> | ||
1228 | vdev-> | ||
1229 | unit_address, | ||
1230 | pool-> | ||
1231 | buff_size); | ||
1232 | } | ||
1233 | } | ||
1234 | if (pool->active) { | ||
1235 | ibmveth_error_printk("no active pool >= MTU\n"); | ||
1236 | return -EPERM; | ||
1237 | } | ||
1238 | } | ||
1239 | } else if (attr == &veth_num_attr) { | ||
1240 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) | ||
1241 | return -EINVAL; | ||
1242 | else { | ||
1243 | adapter->pool_config = 1; | ||
1244 | ibmveth_close(netdev); | ||
1245 | adapter->pool_config = 0; | ||
1246 | pool->size = value; | ||
1247 | if ((rc = ibmveth_open(netdev))) | ||
1248 | return rc; | ||
1249 | } | ||
1250 | } else if (attr == &veth_size_attr) { | ||
1251 | if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) | ||
1252 | return -EINVAL; | ||
1253 | else { | ||
1254 | adapter->pool_config = 1; | ||
1255 | ibmveth_close(netdev); | ||
1256 | adapter->pool_config = 0; | ||
1257 | pool->buff_size = value; | ||
1258 | if ((rc = ibmveth_open(netdev))) | ||
1259 | return rc; | ||
1260 | } | ||
1261 | } | ||
1262 | |||
1263 | /* kick the interrupt handler to allocate/deallocate pools */ | ||
1264 | ibmveth_interrupt(netdev->irq, netdev, NULL); | ||
1265 | return count; | ||
1266 | } | ||
1267 | |||
1268 | |||
1269 | #define ATTR(_name, _mode) \ | ||
1270 | struct attribute veth_##_name##_attr = { \ | ||
1271 | .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \ | ||
1272 | }; | ||
1273 | |||
1274 | static ATTR(active, 0644); | ||
1275 | static ATTR(num, 0644); | ||
1276 | static ATTR(size, 0644); | ||
1277 | |||
1278 | static struct attribute * veth_pool_attrs[] = { | ||
1279 | &veth_active_attr, | ||
1280 | &veth_num_attr, | ||
1281 | &veth_size_attr, | ||
1282 | NULL, | ||
1283 | }; | ||
1284 | |||
1285 | static struct sysfs_ops veth_pool_ops = { | ||
1286 | .show = veth_pool_show, | ||
1287 | .store = veth_pool_store, | ||
1288 | }; | ||
1289 | |||
1290 | static struct kobj_type ktype_veth_pool = { | ||
1291 | .release = NULL, | ||
1292 | .sysfs_ops = &veth_pool_ops, | ||
1293 | .default_attrs = veth_pool_attrs, | ||
1294 | }; | ||
1295 | |||
1296 | |||
1172 | static struct vio_device_id ibmveth_device_table[] __devinitdata= { | 1297 | static struct vio_device_id ibmveth_device_table[] __devinitdata= { |
1173 | { "network", "IBM,l-lan"}, | 1298 | { "network", "IBM,l-lan"}, |
1174 | { "", "" } | 1299 | { "", "" } |
@@ -1198,7 +1323,7 @@ static void __exit ibmveth_module_exit(void) | |||
1198 | { | 1323 | { |
1199 | vio_unregister_driver(&ibmveth_driver); | 1324 | vio_unregister_driver(&ibmveth_driver); |
1200 | ibmveth_proc_unregister_driver(); | 1325 | ibmveth_proc_unregister_driver(); |
1201 | } | 1326 | } |
1202 | 1327 | ||
1203 | module_init(ibmveth_module_init); | 1328 | module_init(ibmveth_module_init); |
1204 | module_exit(ibmveth_module_exit); | 1329 | module_exit(ibmveth_module_exit); |
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index 46919a814fca..8385bf836507 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h | |||
@@ -75,10 +75,13 @@ | |||
75 | 75 | ||
76 | #define IbmVethNumBufferPools 5 | 76 | #define IbmVethNumBufferPools 5 |
77 | #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ | 77 | #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ |
78 | #define IBMVETH_MAX_MTU 68 | ||
79 | #define IBMVETH_MAX_POOL_COUNT 4096 | ||
80 | #define IBMVETH_MAX_BUF_SIZE (1024 * 128) | ||
78 | 81 | ||
79 | /* pool_size should be sorted */ | ||
80 | static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; | 82 | static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; |
81 | static int pool_count[] = { 256, 768, 256, 256, 256 }; | 83 | static int pool_count[] = { 256, 768, 256, 256, 256 }; |
84 | static int pool_active[] = { 1, 1, 0, 0, 0}; | ||
82 | 85 | ||
83 | #define IBM_VETH_INVALID_MAP ((u16)0xffff) | 86 | #define IBM_VETH_INVALID_MAP ((u16)0xffff) |
84 | 87 | ||
@@ -94,6 +97,7 @@ struct ibmveth_buff_pool { | |||
94 | dma_addr_t *dma_addr; | 97 | dma_addr_t *dma_addr; |
95 | struct sk_buff **skbuff; | 98 | struct sk_buff **skbuff; |
96 | int active; | 99 | int active; |
100 | struct kobject kobj; | ||
97 | }; | 101 | }; |
98 | 102 | ||
99 | struct ibmveth_rx_q { | 103 | struct ibmveth_rx_q { |
@@ -118,6 +122,7 @@ struct ibmveth_adapter { | |||
118 | dma_addr_t filter_list_dma; | 122 | dma_addr_t filter_list_dma; |
119 | struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; | 123 | struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; |
120 | struct ibmveth_rx_q rx_queue; | 124 | struct ibmveth_rx_q rx_queue; |
125 | int pool_config; | ||
121 | 126 | ||
122 | /* adapter specific stats */ | 127 | /* adapter specific stats */ |
123 | u64 replenish_task_cycles; | 128 | u64 replenish_task_cycles; |
@@ -134,7 +139,7 @@ struct ibmveth_adapter { | |||
134 | spinlock_t stats_lock; | 139 | spinlock_t stats_lock; |
135 | }; | 140 | }; |
136 | 141 | ||
137 | struct ibmveth_buf_desc_fields { | 142 | struct ibmveth_buf_desc_fields { |
138 | u32 valid : 1; | 143 | u32 valid : 1; |
139 | u32 toggle : 1; | 144 | u32 toggle : 1; |
140 | u32 reserved : 6; | 145 | u32 reserved : 6; |
@@ -143,7 +148,7 @@ struct ibmveth_buf_desc_fields { | |||
143 | }; | 148 | }; |
144 | 149 | ||
145 | union ibmveth_buf_desc { | 150 | union ibmveth_buf_desc { |
146 | u64 desc; | 151 | u64 desc; |
147 | struct ibmveth_buf_desc_fields fields; | 152 | struct ibmveth_buf_desc_fields fields; |
148 | }; | 153 | }; |
149 | 154 | ||
diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ixgb/Makefile index 7c7aff1ea7d5..a8a2d3d03567 100644 --- a/drivers/net/ixgb/Makefile +++ b/drivers/net/ixgb/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | ################################################################################ | 1 | ################################################################################ |
2 | # | 2 | # |
3 | # | 3 | # |
4 | # Copyright(c) 1999 - 2002 Intel Corporation. All rights reserved. | 4 | # Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | # | 5 | # |
6 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
7 | # under the terms of the GNU General Public License as published by the Free | 7 | # under the terms of the GNU General Public License as published by the Free |
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index c83271b38621..a83ef28dadb0 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -84,7 +84,12 @@ struct ixgb_adapter; | |||
84 | #define IXGB_DBG(args...) | 84 | #define IXGB_DBG(args...) |
85 | #endif | 85 | #endif |
86 | 86 | ||
87 | #define IXGB_ERR(args...) printk(KERN_ERR "ixgb: " args) | 87 | #define PFX "ixgb: " |
88 | #define DPRINTK(nlevel, klevel, fmt, args...) \ | ||
89 | (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ | ||
90 | printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ | ||
91 | __FUNCTION__ , ## args)) | ||
92 | |||
88 | 93 | ||
89 | /* TX/RX descriptor defines */ | 94 | /* TX/RX descriptor defines */ |
90 | #define DEFAULT_TXD 256 | 95 | #define DEFAULT_TXD 256 |
@@ -175,6 +180,7 @@ struct ixgb_adapter { | |||
175 | uint64_t hw_csum_tx_good; | 180 | uint64_t hw_csum_tx_good; |
176 | uint64_t hw_csum_tx_error; | 181 | uint64_t hw_csum_tx_error; |
177 | uint32_t tx_int_delay; | 182 | uint32_t tx_int_delay; |
183 | uint32_t tx_timeout_count; | ||
178 | boolean_t tx_int_delay_enable; | 184 | boolean_t tx_int_delay_enable; |
179 | boolean_t detect_tx_hung; | 185 | boolean_t detect_tx_hung; |
180 | 186 | ||
@@ -192,7 +198,9 @@ struct ixgb_adapter { | |||
192 | 198 | ||
193 | /* structs defined in ixgb_hw.h */ | 199 | /* structs defined in ixgb_hw.h */ |
194 | struct ixgb_hw hw; | 200 | struct ixgb_hw hw; |
201 | u16 msg_enable; | ||
195 | struct ixgb_hw_stats stats; | 202 | struct ixgb_hw_stats stats; |
203 | uint32_t alloc_rx_buff_failed; | ||
196 | #ifdef CONFIG_PCI_MSI | 204 | #ifdef CONFIG_PCI_MSI |
197 | boolean_t have_msi; | 205 | boolean_t have_msi; |
198 | #endif | 206 | #endif |
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index 661a46b95a61..8357c5590bfb 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h index 5190aa8761a2..bf6fa220f38e 100644 --- a/drivers/net/ixgb/ixgb_ee.h +++ b/drivers/net/ixgb/ixgb_ee.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index d38ade5f2f4e..cf19b898ba9b 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -44,6 +44,8 @@ extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter); | |||
44 | extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter); | 44 | extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter); |
45 | extern void ixgb_update_stats(struct ixgb_adapter *adapter); | 45 | extern void ixgb_update_stats(struct ixgb_adapter *adapter); |
46 | 46 | ||
47 | #define IXGB_ALL_RAR_ENTRIES 16 | ||
48 | |||
47 | struct ixgb_stats { | 49 | struct ixgb_stats { |
48 | char stat_string[ETH_GSTRING_LEN]; | 50 | char stat_string[ETH_GSTRING_LEN]; |
49 | int sizeof_stat; | 51 | int sizeof_stat; |
@@ -76,6 +78,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = { | |||
76 | {"tx_heartbeat_errors", IXGB_STAT(net_stats.tx_heartbeat_errors)}, | 78 | {"tx_heartbeat_errors", IXGB_STAT(net_stats.tx_heartbeat_errors)}, |
77 | {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, | 79 | {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, |
78 | {"tx_deferred_ok", IXGB_STAT(stats.dc)}, | 80 | {"tx_deferred_ok", IXGB_STAT(stats.dc)}, |
81 | {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, | ||
79 | {"rx_long_length_errors", IXGB_STAT(stats.roc)}, | 82 | {"rx_long_length_errors", IXGB_STAT(stats.roc)}, |
80 | {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, | 83 | {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, |
81 | #ifdef NETIF_F_TSO | 84 | #ifdef NETIF_F_TSO |
@@ -117,6 +120,16 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
117 | return 0; | 120 | return 0; |
118 | } | 121 | } |
119 | 122 | ||
123 | static void ixgb_set_speed_duplex(struct net_device *netdev) | ||
124 | { | ||
125 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
126 | /* be optimistic about our link, since we were up before */ | ||
127 | adapter->link_speed = 10000; | ||
128 | adapter->link_duplex = FULL_DUPLEX; | ||
129 | netif_carrier_on(netdev); | ||
130 | netif_wake_queue(netdev); | ||
131 | } | ||
132 | |||
120 | static int | 133 | static int |
121 | ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | 134 | ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) |
122 | { | 135 | { |
@@ -130,12 +143,7 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
130 | ixgb_down(adapter, TRUE); | 143 | ixgb_down(adapter, TRUE); |
131 | ixgb_reset(adapter); | 144 | ixgb_reset(adapter); |
132 | ixgb_up(adapter); | 145 | ixgb_up(adapter); |
133 | /* be optimistic about our link, since we were up before */ | 146 | ixgb_set_speed_duplex(netdev); |
134 | adapter->link_speed = 10000; | ||
135 | adapter->link_duplex = FULL_DUPLEX; | ||
136 | netif_carrier_on(netdev); | ||
137 | netif_wake_queue(netdev); | ||
138 | |||
139 | } else | 147 | } else |
140 | ixgb_reset(adapter); | 148 | ixgb_reset(adapter); |
141 | 149 | ||
@@ -183,11 +191,7 @@ ixgb_set_pauseparam(struct net_device *netdev, | |||
183 | if(netif_running(adapter->netdev)) { | 191 | if(netif_running(adapter->netdev)) { |
184 | ixgb_down(adapter, TRUE); | 192 | ixgb_down(adapter, TRUE); |
185 | ixgb_up(adapter); | 193 | ixgb_up(adapter); |
186 | /* be optimistic about our link, since we were up before */ | 194 | ixgb_set_speed_duplex(netdev); |
187 | adapter->link_speed = 10000; | ||
188 | adapter->link_duplex = FULL_DUPLEX; | ||
189 | netif_carrier_on(netdev); | ||
190 | netif_wake_queue(netdev); | ||
191 | } else | 195 | } else |
192 | ixgb_reset(adapter); | 196 | ixgb_reset(adapter); |
193 | 197 | ||
@@ -212,11 +216,7 @@ ixgb_set_rx_csum(struct net_device *netdev, uint32_t data) | |||
212 | if(netif_running(netdev)) { | 216 | if(netif_running(netdev)) { |
213 | ixgb_down(adapter,TRUE); | 217 | ixgb_down(adapter,TRUE); |
214 | ixgb_up(adapter); | 218 | ixgb_up(adapter); |
215 | /* be optimistic about our link, since we were up before */ | 219 | ixgb_set_speed_duplex(netdev); |
216 | adapter->link_speed = 10000; | ||
217 | adapter->link_duplex = FULL_DUPLEX; | ||
218 | netif_carrier_on(netdev); | ||
219 | netif_wake_queue(netdev); | ||
220 | } else | 220 | } else |
221 | ixgb_reset(adapter); | 221 | ixgb_reset(adapter); |
222 | return 0; | 222 | return 0; |
@@ -251,6 +251,19 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data) | |||
251 | } | 251 | } |
252 | #endif /* NETIF_F_TSO */ | 252 | #endif /* NETIF_F_TSO */ |
253 | 253 | ||
254 | static uint32_t | ||
255 | ixgb_get_msglevel(struct net_device *netdev) | ||
256 | { | ||
257 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
258 | return adapter->msg_enable; | ||
259 | } | ||
260 | |||
261 | static void | ||
262 | ixgb_set_msglevel(struct net_device *netdev, uint32_t data) | ||
263 | { | ||
264 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
265 | adapter->msg_enable = data; | ||
266 | } | ||
254 | #define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_ | 267 | #define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_ |
255 | 268 | ||
256 | static int | 269 | static int |
@@ -303,7 +316,7 @@ ixgb_get_regs(struct net_device *netdev, | |||
303 | *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ | 316 | *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ |
304 | 317 | ||
305 | /* there are 16 RAR entries in hardware, we only use 3 */ | 318 | /* there are 16 RAR entries in hardware, we only use 3 */ |
306 | for(i = 0; i < 16; i++) { | 319 | for(i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) { |
307 | *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ | 320 | *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ |
308 | *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ | 321 | *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ |
309 | } | 322 | } |
@@ -593,11 +606,7 @@ ixgb_set_ringparam(struct net_device *netdev, | |||
593 | adapter->tx_ring = tx_new; | 606 | adapter->tx_ring = tx_new; |
594 | if((err = ixgb_up(adapter))) | 607 | if((err = ixgb_up(adapter))) |
595 | return err; | 608 | return err; |
596 | /* be optimistic about our link, since we were up before */ | 609 | ixgb_set_speed_duplex(netdev); |
597 | adapter->link_speed = 10000; | ||
598 | adapter->link_duplex = FULL_DUPLEX; | ||
599 | netif_carrier_on(netdev); | ||
600 | netif_wake_queue(netdev); | ||
601 | } | 610 | } |
602 | 611 | ||
603 | return 0; | 612 | return 0; |
@@ -714,6 +723,8 @@ static struct ethtool_ops ixgb_ethtool_ops = { | |||
714 | .set_tx_csum = ixgb_set_tx_csum, | 723 | .set_tx_csum = ixgb_set_tx_csum, |
715 | .get_sg = ethtool_op_get_sg, | 724 | .get_sg = ethtool_op_get_sg, |
716 | .set_sg = ethtool_op_set_sg, | 725 | .set_sg = ethtool_op_set_sg, |
726 | .get_msglevel = ixgb_get_msglevel, | ||
727 | .set_msglevel = ixgb_set_msglevel, | ||
717 | #ifdef NETIF_F_TSO | 728 | #ifdef NETIF_F_TSO |
718 | .get_tso = ethtool_op_get_tso, | 729 | .get_tso = ethtool_op_get_tso, |
719 | .set_tso = ixgb_set_tso, | 730 | .set_tso = ixgb_set_tso, |
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c index 620cad48bdea..f7fa10e47fa2 100644 --- a/drivers/net/ixgb/ixgb_hw.c +++ b/drivers/net/ixgb/ixgb_hw.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h index 382c6300ccc2..cb4568915ada 100644 --- a/drivers/net/ixgb/ixgb_hw.h +++ b/drivers/net/ixgb/ixgb_hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -57,6 +57,7 @@ typedef enum { | |||
57 | typedef enum { | 57 | typedef enum { |
58 | ixgb_media_type_unknown = 0, | 58 | ixgb_media_type_unknown = 0, |
59 | ixgb_media_type_fiber = 1, | 59 | ixgb_media_type_fiber = 1, |
60 | ixgb_media_type_copper = 2, | ||
60 | ixgb_num_media_types | 61 | ixgb_num_media_types |
61 | } ixgb_media_type; | 62 | } ixgb_media_type; |
62 | 63 | ||
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h index aee207eaa287..40a085f94c7b 100644 --- a/drivers/net/ixgb/ixgb_ids.h +++ b/drivers/net/ixgb/ixgb_ids.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -43,6 +43,8 @@ | |||
43 | #define IXGB_SUBDEVICE_ID_A11F 0xA11F | 43 | #define IXGB_SUBDEVICE_ID_A11F 0xA11F |
44 | #define IXGB_SUBDEVICE_ID_A01F 0xA01F | 44 | #define IXGB_SUBDEVICE_ID_A01F 0xA01F |
45 | 45 | ||
46 | #endif /* #ifndef _IXGB_IDS_H_ */ | 46 | #define IXGB_DEVICE_ID_82597EX_CX4 0x109E |
47 | #define IXGB_SUBDEVICE_ID_A00C 0xA00C | ||
47 | 48 | ||
49 | #endif /* #ifndef _IXGB_IDS_H_ */ | ||
48 | /* End of File */ | 50 | /* End of File */ |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index cfd67d812f0d..57006fb8840e 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -28,22 +28,6 @@ | |||
28 | 28 | ||
29 | #include "ixgb.h" | 29 | #include "ixgb.h" |
30 | 30 | ||
31 | /* Change Log | ||
32 | * 1.0.96 04/19/05 | ||
33 | * - Make needlessly global code static -- bunk@stusta.de | ||
34 | * - ethtool cleanup -- shemminger@osdl.org | ||
35 | * - Support for MODULE_VERSION -- linville@tuxdriver.com | ||
36 | * - add skb_header_cloned check to the tso path -- herbert@apana.org.au | ||
37 | * 1.0.88 01/05/05 | ||
38 | * - include fix to the condition that determines when to quit NAPI - Robert Olsson | ||
39 | * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down | ||
40 | * 1.0.84 10/26/04 | ||
41 | * - reset buffer_info->dma in Tx resource cleanup logic | ||
42 | * 1.0.83 10/12/04 | ||
43 | * - sparse cleanup - shemminger@osdl.org | ||
44 | * - fix tx resource cleanup logic | ||
45 | */ | ||
46 | |||
47 | char ixgb_driver_name[] = "ixgb"; | 31 | char ixgb_driver_name[] = "ixgb"; |
48 | static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; | 32 | static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; |
49 | 33 | ||
@@ -52,9 +36,9 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; | |||
52 | #else | 36 | #else |
53 | #define DRIVERNAPI "-NAPI" | 37 | #define DRIVERNAPI "-NAPI" |
54 | #endif | 38 | #endif |
55 | #define DRV_VERSION "1.0.100-k2"DRIVERNAPI | 39 | #define DRV_VERSION "1.0.109-k2"DRIVERNAPI |
56 | char ixgb_driver_version[] = DRV_VERSION; | 40 | char ixgb_driver_version[] = DRV_VERSION; |
57 | static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; | 41 | static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; |
58 | 42 | ||
59 | /* ixgb_pci_tbl - PCI Device ID Table | 43 | /* ixgb_pci_tbl - PCI Device ID Table |
60 | * | 44 | * |
@@ -67,6 +51,8 @@ static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; | |||
67 | static struct pci_device_id ixgb_pci_tbl[] = { | 51 | static struct pci_device_id ixgb_pci_tbl[] = { |
68 | {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, | 52 | {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, |
69 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 53 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, |
54 | {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4, | ||
55 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | ||
70 | {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, | 56 | {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, |
71 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 57 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, |
72 | {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, | 58 | {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, |
@@ -148,6 +134,11 @@ MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver"); | |||
148 | MODULE_LICENSE("GPL"); | 134 | MODULE_LICENSE("GPL"); |
149 | MODULE_VERSION(DRV_VERSION); | 135 | MODULE_VERSION(DRV_VERSION); |
150 | 136 | ||
137 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 | ||
138 | static int debug = DEFAULT_DEBUG_LEVEL_SHIFT; | ||
139 | module_param(debug, int, 0); | ||
140 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | ||
141 | |||
151 | /* some defines for controlling descriptor fetches in h/w */ | 142 | /* some defines for controlling descriptor fetches in h/w */ |
152 | #define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */ | 143 | #define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */ |
153 | #define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below | 144 | #define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below |
@@ -196,7 +187,7 @@ module_exit(ixgb_exit_module); | |||
196 | * @adapter: board private structure | 187 | * @adapter: board private structure |
197 | **/ | 188 | **/ |
198 | 189 | ||
199 | static inline void | 190 | static void |
200 | ixgb_irq_disable(struct ixgb_adapter *adapter) | 191 | ixgb_irq_disable(struct ixgb_adapter *adapter) |
201 | { | 192 | { |
202 | atomic_inc(&adapter->irq_sem); | 193 | atomic_inc(&adapter->irq_sem); |
@@ -210,7 +201,7 @@ ixgb_irq_disable(struct ixgb_adapter *adapter) | |||
210 | * @adapter: board private structure | 201 | * @adapter: board private structure |
211 | **/ | 202 | **/ |
212 | 203 | ||
213 | static inline void | 204 | static void |
214 | ixgb_irq_enable(struct ixgb_adapter *adapter) | 205 | ixgb_irq_enable(struct ixgb_adapter *adapter) |
215 | { | 206 | { |
216 | if(atomic_dec_and_test(&adapter->irq_sem)) { | 207 | if(atomic_dec_and_test(&adapter->irq_sem)) { |
@@ -231,6 +222,7 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
231 | 222 | ||
232 | /* hardware has been reset, we need to reload some things */ | 223 | /* hardware has been reset, we need to reload some things */ |
233 | 224 | ||
225 | ixgb_rar_set(hw, netdev->dev_addr, 0); | ||
234 | ixgb_set_multi(netdev); | 226 | ixgb_set_multi(netdev); |
235 | 227 | ||
236 | ixgb_restore_vlan(adapter); | 228 | ixgb_restore_vlan(adapter); |
@@ -240,6 +232,9 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
240 | ixgb_configure_rx(adapter); | 232 | ixgb_configure_rx(adapter); |
241 | ixgb_alloc_rx_buffers(adapter); | 233 | ixgb_alloc_rx_buffers(adapter); |
242 | 234 | ||
235 | /* disable interrupts and get the hardware into a known state */ | ||
236 | IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); | ||
237 | |||
243 | #ifdef CONFIG_PCI_MSI | 238 | #ifdef CONFIG_PCI_MSI |
244 | { | 239 | { |
245 | boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) & | 240 | boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) & |
@@ -249,7 +244,7 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
249 | if (!pcix) | 244 | if (!pcix) |
250 | adapter->have_msi = FALSE; | 245 | adapter->have_msi = FALSE; |
251 | else if((err = pci_enable_msi(adapter->pdev))) { | 246 | else if((err = pci_enable_msi(adapter->pdev))) { |
252 | printk (KERN_ERR | 247 | DPRINTK(PROBE, ERR, |
253 | "Unable to allocate MSI interrupt Error: %d\n", err); | 248 | "Unable to allocate MSI interrupt Error: %d\n", err); |
254 | adapter->have_msi = FALSE; | 249 | adapter->have_msi = FALSE; |
255 | /* proceed to try to request regular interrupt */ | 250 | /* proceed to try to request regular interrupt */ |
@@ -259,11 +254,11 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
259 | #endif | 254 | #endif |
260 | if((err = request_irq(adapter->pdev->irq, &ixgb_intr, | 255 | if((err = request_irq(adapter->pdev->irq, &ixgb_intr, |
261 | SA_SHIRQ | SA_SAMPLE_RANDOM, | 256 | SA_SHIRQ | SA_SAMPLE_RANDOM, |
262 | netdev->name, netdev))) | 257 | netdev->name, netdev))) { |
258 | DPRINTK(PROBE, ERR, | ||
259 | "Unable to allocate interrupt Error: %d\n", err); | ||
263 | return err; | 260 | return err; |
264 | 261 | } | |
265 | /* disable interrupts and get the hardware into a known state */ | ||
266 | IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); | ||
267 | 262 | ||
268 | if((hw->max_frame_size != max_frame) || | 263 | if((hw->max_frame_size != max_frame) || |
269 | (hw->max_frame_size != | 264 | (hw->max_frame_size != |
@@ -285,11 +280,12 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
285 | } | 280 | } |
286 | 281 | ||
287 | mod_timer(&adapter->watchdog_timer, jiffies); | 282 | mod_timer(&adapter->watchdog_timer, jiffies); |
288 | ixgb_irq_enable(adapter); | ||
289 | 283 | ||
290 | #ifdef CONFIG_IXGB_NAPI | 284 | #ifdef CONFIG_IXGB_NAPI |
291 | netif_poll_enable(netdev); | 285 | netif_poll_enable(netdev); |
292 | #endif | 286 | #endif |
287 | ixgb_irq_enable(adapter); | ||
288 | |||
293 | return 0; | 289 | return 0; |
294 | } | 290 | } |
295 | 291 | ||
@@ -326,7 +322,7 @@ ixgb_reset(struct ixgb_adapter *adapter) | |||
326 | 322 | ||
327 | ixgb_adapter_stop(&adapter->hw); | 323 | ixgb_adapter_stop(&adapter->hw); |
328 | if(!ixgb_init_hw(&adapter->hw)) | 324 | if(!ixgb_init_hw(&adapter->hw)) |
329 | IXGB_DBG("ixgb_init_hw failed.\n"); | 325 | DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n"); |
330 | } | 326 | } |
331 | 327 | ||
332 | /** | 328 | /** |
@@ -363,7 +359,8 @@ ixgb_probe(struct pci_dev *pdev, | |||
363 | } else { | 359 | } else { |
364 | if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || | 360 | if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || |
365 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { | 361 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { |
366 | IXGB_ERR("No usable DMA configuration, aborting\n"); | 362 | printk(KERN_ERR |
363 | "ixgb: No usable DMA configuration, aborting\n"); | ||
367 | goto err_dma_mask; | 364 | goto err_dma_mask; |
368 | } | 365 | } |
369 | pci_using_dac = 0; | 366 | pci_using_dac = 0; |
@@ -388,6 +385,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
388 | adapter->netdev = netdev; | 385 | adapter->netdev = netdev; |
389 | adapter->pdev = pdev; | 386 | adapter->pdev = pdev; |
390 | adapter->hw.back = adapter; | 387 | adapter->hw.back = adapter; |
388 | adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT); | ||
391 | 389 | ||
392 | mmio_start = pci_resource_start(pdev, BAR_0); | 390 | mmio_start = pci_resource_start(pdev, BAR_0); |
393 | mmio_len = pci_resource_len(pdev, BAR_0); | 391 | mmio_len = pci_resource_len(pdev, BAR_0); |
@@ -416,7 +414,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
416 | netdev->change_mtu = &ixgb_change_mtu; | 414 | netdev->change_mtu = &ixgb_change_mtu; |
417 | ixgb_set_ethtool_ops(netdev); | 415 | ixgb_set_ethtool_ops(netdev); |
418 | netdev->tx_timeout = &ixgb_tx_timeout; | 416 | netdev->tx_timeout = &ixgb_tx_timeout; |
419 | netdev->watchdog_timeo = HZ; | 417 | netdev->watchdog_timeo = 5 * HZ; |
420 | #ifdef CONFIG_IXGB_NAPI | 418 | #ifdef CONFIG_IXGB_NAPI |
421 | netdev->poll = &ixgb_clean; | 419 | netdev->poll = &ixgb_clean; |
422 | netdev->weight = 64; | 420 | netdev->weight = 64; |
@@ -428,6 +426,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
428 | netdev->poll_controller = ixgb_netpoll; | 426 | netdev->poll_controller = ixgb_netpoll; |
429 | #endif | 427 | #endif |
430 | 428 | ||
429 | strcpy(netdev->name, pci_name(pdev)); | ||
431 | netdev->mem_start = mmio_start; | 430 | netdev->mem_start = mmio_start; |
432 | netdev->mem_end = mmio_start + mmio_len; | 431 | netdev->mem_end = mmio_start + mmio_len; |
433 | netdev->base_addr = adapter->hw.io_base; | 432 | netdev->base_addr = adapter->hw.io_base; |
@@ -449,6 +448,9 @@ ixgb_probe(struct pci_dev *pdev, | |||
449 | #ifdef NETIF_F_TSO | 448 | #ifdef NETIF_F_TSO |
450 | netdev->features |= NETIF_F_TSO; | 449 | netdev->features |= NETIF_F_TSO; |
451 | #endif | 450 | #endif |
451 | #ifdef NETIF_F_LLTX | ||
452 | netdev->features |= NETIF_F_LLTX; | ||
453 | #endif | ||
452 | 454 | ||
453 | if(pci_using_dac) | 455 | if(pci_using_dac) |
454 | netdev->features |= NETIF_F_HIGHDMA; | 456 | netdev->features |= NETIF_F_HIGHDMA; |
@@ -456,7 +458,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
456 | /* make sure the EEPROM is good */ | 458 | /* make sure the EEPROM is good */ |
457 | 459 | ||
458 | if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { | 460 | if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { |
459 | printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n"); | 461 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); |
460 | err = -EIO; | 462 | err = -EIO; |
461 | goto err_eeprom; | 463 | goto err_eeprom; |
462 | } | 464 | } |
@@ -465,6 +467,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
465 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); | 467 | memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); |
466 | 468 | ||
467 | if(!is_valid_ether_addr(netdev->perm_addr)) { | 469 | if(!is_valid_ether_addr(netdev->perm_addr)) { |
470 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); | ||
468 | err = -EIO; | 471 | err = -EIO; |
469 | goto err_eeprom; | 472 | goto err_eeprom; |
470 | } | 473 | } |
@@ -478,6 +481,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
478 | INIT_WORK(&adapter->tx_timeout_task, | 481 | INIT_WORK(&adapter->tx_timeout_task, |
479 | (void (*)(void *))ixgb_tx_timeout_task, netdev); | 482 | (void (*)(void *))ixgb_tx_timeout_task, netdev); |
480 | 483 | ||
484 | strcpy(netdev->name, "eth%d"); | ||
481 | if((err = register_netdev(netdev))) | 485 | if((err = register_netdev(netdev))) |
482 | goto err_register; | 486 | goto err_register; |
483 | 487 | ||
@@ -486,8 +490,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
486 | netif_carrier_off(netdev); | 490 | netif_carrier_off(netdev); |
487 | netif_stop_queue(netdev); | 491 | netif_stop_queue(netdev); |
488 | 492 | ||
489 | printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n", | 493 | DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n"); |
490 | netdev->name); | ||
491 | ixgb_check_options(adapter); | 494 | ixgb_check_options(adapter); |
492 | /* reset the hardware with the new settings */ | 495 | /* reset the hardware with the new settings */ |
493 | 496 | ||
@@ -557,17 +560,17 @@ ixgb_sw_init(struct ixgb_adapter *adapter) | |||
557 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | 560 | hw->subsystem_vendor_id = pdev->subsystem_vendor; |
558 | hw->subsystem_id = pdev->subsystem_device; | 561 | hw->subsystem_id = pdev->subsystem_device; |
559 | 562 | ||
560 | adapter->rx_buffer_len = IXGB_RXBUFFER_2048; | ||
561 | |||
562 | hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; | 563 | hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; |
564 | adapter->rx_buffer_len = hw->max_frame_size; | ||
563 | 565 | ||
564 | if((hw->device_id == IXGB_DEVICE_ID_82597EX) | 566 | if((hw->device_id == IXGB_DEVICE_ID_82597EX) |
565 | ||(hw->device_id == IXGB_DEVICE_ID_82597EX_LR) | 567 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) |
566 | ||(hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) | 568 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) |
569 | || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) | ||
567 | hw->mac_type = ixgb_82597; | 570 | hw->mac_type = ixgb_82597; |
568 | else { | 571 | else { |
569 | /* should never have loaded on this device */ | 572 | /* should never have loaded on this device */ |
570 | printk(KERN_ERR "ixgb: unsupported device id\n"); | 573 | DPRINTK(PROBE, ERR, "unsupported device id\n"); |
571 | } | 574 | } |
572 | 575 | ||
573 | /* enable flow control to be programmed */ | 576 | /* enable flow control to be programmed */ |
@@ -665,6 +668,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | |||
665 | size = sizeof(struct ixgb_buffer) * txdr->count; | 668 | size = sizeof(struct ixgb_buffer) * txdr->count; |
666 | txdr->buffer_info = vmalloc(size); | 669 | txdr->buffer_info = vmalloc(size); |
667 | if(!txdr->buffer_info) { | 670 | if(!txdr->buffer_info) { |
671 | DPRINTK(PROBE, ERR, | ||
672 | "Unable to allocate transmit descriptor ring memory\n"); | ||
668 | return -ENOMEM; | 673 | return -ENOMEM; |
669 | } | 674 | } |
670 | memset(txdr->buffer_info, 0, size); | 675 | memset(txdr->buffer_info, 0, size); |
@@ -677,6 +682,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | |||
677 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); | 682 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); |
678 | if(!txdr->desc) { | 683 | if(!txdr->desc) { |
679 | vfree(txdr->buffer_info); | 684 | vfree(txdr->buffer_info); |
685 | DPRINTK(PROBE, ERR, | ||
686 | "Unable to allocate transmit descriptor memory\n"); | ||
680 | return -ENOMEM; | 687 | return -ENOMEM; |
681 | } | 688 | } |
682 | memset(txdr->desc, 0, txdr->size); | 689 | memset(txdr->desc, 0, txdr->size); |
@@ -750,6 +757,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
750 | size = sizeof(struct ixgb_buffer) * rxdr->count; | 757 | size = sizeof(struct ixgb_buffer) * rxdr->count; |
751 | rxdr->buffer_info = vmalloc(size); | 758 | rxdr->buffer_info = vmalloc(size); |
752 | if(!rxdr->buffer_info) { | 759 | if(!rxdr->buffer_info) { |
760 | DPRINTK(PROBE, ERR, | ||
761 | "Unable to allocate receive descriptor ring\n"); | ||
753 | return -ENOMEM; | 762 | return -ENOMEM; |
754 | } | 763 | } |
755 | memset(rxdr->buffer_info, 0, size); | 764 | memset(rxdr->buffer_info, 0, size); |
@@ -763,6 +772,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
763 | 772 | ||
764 | if(!rxdr->desc) { | 773 | if(!rxdr->desc) { |
765 | vfree(rxdr->buffer_info); | 774 | vfree(rxdr->buffer_info); |
775 | DPRINTK(PROBE, ERR, | ||
776 | "Unable to allocate receive descriptors\n"); | ||
766 | return -ENOMEM; | 777 | return -ENOMEM; |
767 | } | 778 | } |
768 | memset(rxdr->desc, 0, rxdr->size); | 779 | memset(rxdr->desc, 0, rxdr->size); |
@@ -794,21 +805,14 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter) | |||
794 | 805 | ||
795 | rctl |= IXGB_RCTL_SECRC; | 806 | rctl |= IXGB_RCTL_SECRC; |
796 | 807 | ||
797 | switch (adapter->rx_buffer_len) { | 808 | if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048) |
798 | case IXGB_RXBUFFER_2048: | ||
799 | default: | ||
800 | rctl |= IXGB_RCTL_BSIZE_2048; | 809 | rctl |= IXGB_RCTL_BSIZE_2048; |
801 | break; | 810 | else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096) |
802 | case IXGB_RXBUFFER_4096: | ||
803 | rctl |= IXGB_RCTL_BSIZE_4096; | 811 | rctl |= IXGB_RCTL_BSIZE_4096; |
804 | break; | 812 | else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192) |
805 | case IXGB_RXBUFFER_8192: | ||
806 | rctl |= IXGB_RCTL_BSIZE_8192; | 813 | rctl |= IXGB_RCTL_BSIZE_8192; |
807 | break; | 814 | else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384) |
808 | case IXGB_RXBUFFER_16384: | ||
809 | rctl |= IXGB_RCTL_BSIZE_16384; | 815 | rctl |= IXGB_RCTL_BSIZE_16384; |
810 | break; | ||
811 | } | ||
812 | 816 | ||
813 | IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); | 817 | IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); |
814 | } | 818 | } |
@@ -898,22 +902,25 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter) | |||
898 | adapter->tx_ring.desc = NULL; | 902 | adapter->tx_ring.desc = NULL; |
899 | } | 903 | } |
900 | 904 | ||
901 | static inline void | 905 | static void |
902 | ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, | 906 | ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, |
903 | struct ixgb_buffer *buffer_info) | 907 | struct ixgb_buffer *buffer_info) |
904 | { | 908 | { |
905 | struct pci_dev *pdev = adapter->pdev; | 909 | struct pci_dev *pdev = adapter->pdev; |
906 | if(buffer_info->dma) { | 910 | |
907 | pci_unmap_page(pdev, | 911 | if (buffer_info->dma) |
908 | buffer_info->dma, | 912 | pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, |
909 | buffer_info->length, | 913 | PCI_DMA_TODEVICE); |
910 | PCI_DMA_TODEVICE); | 914 | |
911 | buffer_info->dma = 0; | 915 | if (buffer_info->skb) |
912 | } | ||
913 | if(buffer_info->skb) { | ||
914 | dev_kfree_skb_any(buffer_info->skb); | 916 | dev_kfree_skb_any(buffer_info->skb); |
915 | buffer_info->skb = NULL; | 917 | |
916 | } | 918 | buffer_info->skb = NULL; |
919 | buffer_info->dma = 0; | ||
920 | buffer_info->time_stamp = 0; | ||
921 | /* these fields must always be initialized in tx | ||
922 | * buffer_info->length = 0; | ||
923 | * buffer_info->next_to_watch = 0; */ | ||
917 | } | 924 | } |
918 | 925 | ||
919 | /** | 926 | /** |
@@ -1112,8 +1119,8 @@ ixgb_watchdog(unsigned long data) | |||
1112 | 1119 | ||
1113 | if(adapter->hw.link_up) { | 1120 | if(adapter->hw.link_up) { |
1114 | if(!netif_carrier_ok(netdev)) { | 1121 | if(!netif_carrier_ok(netdev)) { |
1115 | printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n", | 1122 | DPRINTK(LINK, INFO, |
1116 | netdev->name, 10000, "Full Duplex"); | 1123 | "NIC Link is Up 10000 Mbps Full Duplex\n"); |
1117 | adapter->link_speed = 10000; | 1124 | adapter->link_speed = 10000; |
1118 | adapter->link_duplex = FULL_DUPLEX; | 1125 | adapter->link_duplex = FULL_DUPLEX; |
1119 | netif_carrier_on(netdev); | 1126 | netif_carrier_on(netdev); |
@@ -1123,9 +1130,7 @@ ixgb_watchdog(unsigned long data) | |||
1123 | if(netif_carrier_ok(netdev)) { | 1130 | if(netif_carrier_ok(netdev)) { |
1124 | adapter->link_speed = 0; | 1131 | adapter->link_speed = 0; |
1125 | adapter->link_duplex = 0; | 1132 | adapter->link_duplex = 0; |
1126 | printk(KERN_INFO | 1133 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); |
1127 | "ixgb: %s NIC Link is Down\n", | ||
1128 | netdev->name); | ||
1129 | netif_carrier_off(netdev); | 1134 | netif_carrier_off(netdev); |
1130 | netif_stop_queue(netdev); | 1135 | netif_stop_queue(netdev); |
1131 | 1136 | ||
@@ -1158,7 +1163,7 @@ ixgb_watchdog(unsigned long data) | |||
1158 | #define IXGB_TX_FLAGS_VLAN 0x00000002 | 1163 | #define IXGB_TX_FLAGS_VLAN 0x00000002 |
1159 | #define IXGB_TX_FLAGS_TSO 0x00000004 | 1164 | #define IXGB_TX_FLAGS_TSO 0x00000004 |
1160 | 1165 | ||
1161 | static inline int | 1166 | static int |
1162 | ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | 1167 | ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) |
1163 | { | 1168 | { |
1164 | #ifdef NETIF_F_TSO | 1169 | #ifdef NETIF_F_TSO |
@@ -1220,7 +1225,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1220 | return 0; | 1225 | return 0; |
1221 | } | 1226 | } |
1222 | 1227 | ||
1223 | static inline boolean_t | 1228 | static boolean_t |
1224 | ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) | 1229 | ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) |
1225 | { | 1230 | { |
1226 | struct ixgb_context_desc *context_desc; | 1231 | struct ixgb_context_desc *context_desc; |
@@ -1258,7 +1263,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1258 | #define IXGB_MAX_TXD_PWR 14 | 1263 | #define IXGB_MAX_TXD_PWR 14 |
1259 | #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR) | 1264 | #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR) |
1260 | 1265 | ||
1261 | static inline int | 1266 | static int |
1262 | ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | 1267 | ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, |
1263 | unsigned int first) | 1268 | unsigned int first) |
1264 | { | 1269 | { |
@@ -1284,6 +1289,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1284 | size, | 1289 | size, |
1285 | PCI_DMA_TODEVICE); | 1290 | PCI_DMA_TODEVICE); |
1286 | buffer_info->time_stamp = jiffies; | 1291 | buffer_info->time_stamp = jiffies; |
1292 | buffer_info->next_to_watch = 0; | ||
1287 | 1293 | ||
1288 | len -= size; | 1294 | len -= size; |
1289 | offset += size; | 1295 | offset += size; |
@@ -1309,6 +1315,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1309 | size, | 1315 | size, |
1310 | PCI_DMA_TODEVICE); | 1316 | PCI_DMA_TODEVICE); |
1311 | buffer_info->time_stamp = jiffies; | 1317 | buffer_info->time_stamp = jiffies; |
1318 | buffer_info->next_to_watch = 0; | ||
1312 | 1319 | ||
1313 | len -= size; | 1320 | len -= size; |
1314 | offset += size; | 1321 | offset += size; |
@@ -1323,7 +1330,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1323 | return count; | 1330 | return count; |
1324 | } | 1331 | } |
1325 | 1332 | ||
1326 | static inline void | 1333 | static void |
1327 | ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) | 1334 | ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) |
1328 | { | 1335 | { |
1329 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; | 1336 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; |
@@ -1395,13 +1402,26 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1395 | return 0; | 1402 | return 0; |
1396 | } | 1403 | } |
1397 | 1404 | ||
1405 | #ifdef NETIF_F_LLTX | ||
1406 | local_irq_save(flags); | ||
1407 | if (!spin_trylock(&adapter->tx_lock)) { | ||
1408 | /* Collision - tell upper layer to requeue */ | ||
1409 | local_irq_restore(flags); | ||
1410 | return NETDEV_TX_LOCKED; | ||
1411 | } | ||
1412 | #else | ||
1398 | spin_lock_irqsave(&adapter->tx_lock, flags); | 1413 | spin_lock_irqsave(&adapter->tx_lock, flags); |
1414 | #endif | ||
1415 | |||
1399 | if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) { | 1416 | if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) { |
1400 | netif_stop_queue(netdev); | 1417 | netif_stop_queue(netdev); |
1401 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | 1418 | spin_unlock_irqrestore(&adapter->tx_lock, flags); |
1402 | return 1; | 1419 | return NETDEV_TX_BUSY; |
1403 | } | 1420 | } |
1421 | |||
1422 | #ifndef NETIF_F_LLTX | ||
1404 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | 1423 | spin_unlock_irqrestore(&adapter->tx_lock, flags); |
1424 | #endif | ||
1405 | 1425 | ||
1406 | if(adapter->vlgrp && vlan_tx_tag_present(skb)) { | 1426 | if(adapter->vlgrp && vlan_tx_tag_present(skb)) { |
1407 | tx_flags |= IXGB_TX_FLAGS_VLAN; | 1427 | tx_flags |= IXGB_TX_FLAGS_VLAN; |
@@ -1413,10 +1433,13 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1413 | tso = ixgb_tso(adapter, skb); | 1433 | tso = ixgb_tso(adapter, skb); |
1414 | if (tso < 0) { | 1434 | if (tso < 0) { |
1415 | dev_kfree_skb_any(skb); | 1435 | dev_kfree_skb_any(skb); |
1436 | #ifdef NETIF_F_LLTX | ||
1437 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
1438 | #endif | ||
1416 | return NETDEV_TX_OK; | 1439 | return NETDEV_TX_OK; |
1417 | } | 1440 | } |
1418 | 1441 | ||
1419 | if (tso) | 1442 | if (likely(tso)) |
1420 | tx_flags |= IXGB_TX_FLAGS_TSO; | 1443 | tx_flags |= IXGB_TX_FLAGS_TSO; |
1421 | else if(ixgb_tx_csum(adapter, skb)) | 1444 | else if(ixgb_tx_csum(adapter, skb)) |
1422 | tx_flags |= IXGB_TX_FLAGS_CSUM; | 1445 | tx_flags |= IXGB_TX_FLAGS_CSUM; |
@@ -1426,7 +1449,15 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1426 | 1449 | ||
1427 | netdev->trans_start = jiffies; | 1450 | netdev->trans_start = jiffies; |
1428 | 1451 | ||
1429 | return 0; | 1452 | #ifdef NETIF_F_LLTX |
1453 | /* Make sure there is space in the ring for the next send. */ | ||
1454 | if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) | ||
1455 | netif_stop_queue(netdev); | ||
1456 | |||
1457 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
1458 | |||
1459 | #endif | ||
1460 | return NETDEV_TX_OK; | ||
1430 | } | 1461 | } |
1431 | 1462 | ||
1432 | /** | 1463 | /** |
@@ -1448,6 +1479,7 @@ ixgb_tx_timeout_task(struct net_device *netdev) | |||
1448 | { | 1479 | { |
1449 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 1480 | struct ixgb_adapter *adapter = netdev_priv(netdev); |
1450 | 1481 | ||
1482 | adapter->tx_timeout_count++; | ||
1451 | ixgb_down(adapter, TRUE); | 1483 | ixgb_down(adapter, TRUE); |
1452 | ixgb_up(adapter); | 1484 | ixgb_up(adapter); |
1453 | } | 1485 | } |
@@ -1486,28 +1518,15 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu) | |||
1486 | 1518 | ||
1487 | if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) | 1519 | if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) |
1488 | || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { | 1520 | || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { |
1489 | IXGB_ERR("Invalid MTU setting\n"); | 1521 | DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu); |
1490 | return -EINVAL; | 1522 | return -EINVAL; |
1491 | } | 1523 | } |
1492 | 1524 | ||
1493 | if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) | 1525 | adapter->rx_buffer_len = max_frame; |
1494 | || (max_frame <= IXGB_RXBUFFER_2048)) { | ||
1495 | adapter->rx_buffer_len = IXGB_RXBUFFER_2048; | ||
1496 | |||
1497 | } else if(max_frame <= IXGB_RXBUFFER_4096) { | ||
1498 | adapter->rx_buffer_len = IXGB_RXBUFFER_4096; | ||
1499 | |||
1500 | } else if(max_frame <= IXGB_RXBUFFER_8192) { | ||
1501 | adapter->rx_buffer_len = IXGB_RXBUFFER_8192; | ||
1502 | |||
1503 | } else { | ||
1504 | adapter->rx_buffer_len = IXGB_RXBUFFER_16384; | ||
1505 | } | ||
1506 | 1526 | ||
1507 | netdev->mtu = new_mtu; | 1527 | netdev->mtu = new_mtu; |
1508 | 1528 | ||
1509 | if(old_max_frame != max_frame && netif_running(netdev)) { | 1529 | if ((old_max_frame != max_frame) && netif_running(netdev)) { |
1510 | |||
1511 | ixgb_down(adapter, TRUE); | 1530 | ixgb_down(adapter, TRUE); |
1512 | ixgb_up(adapter); | 1531 | ixgb_up(adapter); |
1513 | } | 1532 | } |
@@ -1765,23 +1784,43 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) | |||
1765 | 1784 | ||
1766 | tx_ring->next_to_clean = i; | 1785 | tx_ring->next_to_clean = i; |
1767 | 1786 | ||
1768 | spin_lock(&adapter->tx_lock); | 1787 | if (unlikely(netif_queue_stopped(netdev))) { |
1769 | if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && | 1788 | spin_lock(&adapter->tx_lock); |
1770 | (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) { | 1789 | if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && |
1771 | 1790 | (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) | |
1772 | netif_wake_queue(netdev); | 1791 | netif_wake_queue(netdev); |
1792 | spin_unlock(&adapter->tx_lock); | ||
1773 | } | 1793 | } |
1774 | spin_unlock(&adapter->tx_lock); | ||
1775 | 1794 | ||
1776 | if(adapter->detect_tx_hung) { | 1795 | if(adapter->detect_tx_hung) { |
1777 | /* detect a transmit hang in hardware, this serializes the | 1796 | /* detect a transmit hang in hardware, this serializes the |
1778 | * check with the clearing of time_stamp and movement of i */ | 1797 | * check with the clearing of time_stamp and movement of i */ |
1779 | adapter->detect_tx_hung = FALSE; | 1798 | adapter->detect_tx_hung = FALSE; |
1780 | if(tx_ring->buffer_info[i].dma && | 1799 | if (tx_ring->buffer_info[eop].dma && |
1781 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) | 1800 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ) |
1782 | && !(IXGB_READ_REG(&adapter->hw, STATUS) & | 1801 | && !(IXGB_READ_REG(&adapter->hw, STATUS) & |
1783 | IXGB_STATUS_TXOFF)) | 1802 | IXGB_STATUS_TXOFF)) { |
1803 | /* detected Tx unit hang */ | ||
1804 | DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" | ||
1805 | " TDH <%x>\n" | ||
1806 | " TDT <%x>\n" | ||
1807 | " next_to_use <%x>\n" | ||
1808 | " next_to_clean <%x>\n" | ||
1809 | "buffer_info[next_to_clean]\n" | ||
1810 | " time_stamp <%lx>\n" | ||
1811 | " next_to_watch <%x>\n" | ||
1812 | " jiffies <%lx>\n" | ||
1813 | " next_to_watch.status <%x>\n", | ||
1814 | IXGB_READ_REG(&adapter->hw, TDH), | ||
1815 | IXGB_READ_REG(&adapter->hw, TDT), | ||
1816 | tx_ring->next_to_use, | ||
1817 | tx_ring->next_to_clean, | ||
1818 | tx_ring->buffer_info[eop].time_stamp, | ||
1819 | eop, | ||
1820 | jiffies, | ||
1821 | eop_desc->status); | ||
1784 | netif_stop_queue(netdev); | 1822 | netif_stop_queue(netdev); |
1823 | } | ||
1785 | } | 1824 | } |
1786 | 1825 | ||
1787 | return cleaned; | 1826 | return cleaned; |
@@ -1794,7 +1833,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) | |||
1794 | * @sk_buff: socket buffer with received data | 1833 | * @sk_buff: socket buffer with received data |
1795 | **/ | 1834 | **/ |
1796 | 1835 | ||
1797 | static inline void | 1836 | static void |
1798 | ixgb_rx_checksum(struct ixgb_adapter *adapter, | 1837 | ixgb_rx_checksum(struct ixgb_adapter *adapter, |
1799 | struct ixgb_rx_desc *rx_desc, | 1838 | struct ixgb_rx_desc *rx_desc, |
1800 | struct sk_buff *skb) | 1839 | struct sk_buff *skb) |
@@ -1858,6 +1897,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1858 | #endif | 1897 | #endif |
1859 | status = rx_desc->status; | 1898 | status = rx_desc->status; |
1860 | skb = buffer_info->skb; | 1899 | skb = buffer_info->skb; |
1900 | buffer_info->skb = NULL; | ||
1861 | 1901 | ||
1862 | prefetch(skb->data); | 1902 | prefetch(skb->data); |
1863 | 1903 | ||
@@ -1902,6 +1942,26 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1902 | goto rxdesc_done; | 1942 | goto rxdesc_done; |
1903 | } | 1943 | } |
1904 | 1944 | ||
1945 | /* code added for copybreak, this should improve | ||
1946 | * performance for small packets with large amounts | ||
1947 | * of reassembly being done in the stack */ | ||
1948 | #define IXGB_CB_LENGTH 256 | ||
1949 | if (length < IXGB_CB_LENGTH) { | ||
1950 | struct sk_buff *new_skb = | ||
1951 | dev_alloc_skb(length + NET_IP_ALIGN); | ||
1952 | if (new_skb) { | ||
1953 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1954 | new_skb->dev = netdev; | ||
1955 | memcpy(new_skb->data - NET_IP_ALIGN, | ||
1956 | skb->data - NET_IP_ALIGN, | ||
1957 | length + NET_IP_ALIGN); | ||
1958 | /* save the skb in buffer_info as good */ | ||
1959 | buffer_info->skb = skb; | ||
1960 | skb = new_skb; | ||
1961 | } | ||
1962 | } | ||
1963 | /* end copybreak code */ | ||
1964 | |||
1905 | /* Good Receive */ | 1965 | /* Good Receive */ |
1906 | skb_put(skb, length); | 1966 | skb_put(skb, length); |
1907 | 1967 | ||
@@ -1931,7 +1991,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1931 | rxdesc_done: | 1991 | rxdesc_done: |
1932 | /* clean up descriptor, might be written over by hw */ | 1992 | /* clean up descriptor, might be written over by hw */ |
1933 | rx_desc->status = 0; | 1993 | rx_desc->status = 0; |
1934 | buffer_info->skb = NULL; | ||
1935 | 1994 | ||
1936 | /* use prefetched values */ | 1995 | /* use prefetched values */ |
1937 | rx_desc = next_rxd; | 1996 | rx_desc = next_rxd; |
@@ -1971,12 +2030,18 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) | |||
1971 | 2030 | ||
1972 | /* leave three descriptors unused */ | 2031 | /* leave three descriptors unused */ |
1973 | while(--cleancount > 2) { | 2032 | while(--cleancount > 2) { |
1974 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | 2033 | /* recycle! its good for you */ |
1975 | 2034 | if (!(skb = buffer_info->skb)) | |
1976 | skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); | 2035 | skb = dev_alloc_skb(adapter->rx_buffer_len |
2036 | + NET_IP_ALIGN); | ||
2037 | else { | ||
2038 | skb_trim(skb, 0); | ||
2039 | goto map_skb; | ||
2040 | } | ||
1977 | 2041 | ||
1978 | if(unlikely(!skb)) { | 2042 | if (unlikely(!skb)) { |
1979 | /* Better luck next round */ | 2043 | /* Better luck next round */ |
2044 | adapter->alloc_rx_buff_failed++; | ||
1980 | break; | 2045 | break; |
1981 | } | 2046 | } |
1982 | 2047 | ||
@@ -1990,33 +2055,36 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) | |||
1990 | 2055 | ||
1991 | buffer_info->skb = skb; | 2056 | buffer_info->skb = skb; |
1992 | buffer_info->length = adapter->rx_buffer_len; | 2057 | buffer_info->length = adapter->rx_buffer_len; |
1993 | buffer_info->dma = | 2058 | map_skb: |
1994 | pci_map_single(pdev, | 2059 | buffer_info->dma = pci_map_single(pdev, |
1995 | skb->data, | 2060 | skb->data, |
1996 | adapter->rx_buffer_len, | 2061 | adapter->rx_buffer_len, |
1997 | PCI_DMA_FROMDEVICE); | 2062 | PCI_DMA_FROMDEVICE); |
1998 | 2063 | ||
2064 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | ||
1999 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); | 2065 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); |
2000 | /* guarantee DD bit not set now before h/w gets descriptor | 2066 | /* guarantee DD bit not set now before h/w gets descriptor |
2001 | * this is the rest of the workaround for h/w double | 2067 | * this is the rest of the workaround for h/w double |
2002 | * writeback. */ | 2068 | * writeback. */ |
2003 | rx_desc->status = 0; | 2069 | rx_desc->status = 0; |
2004 | 2070 | ||
2005 | if((i & ~(num_group_tail_writes- 1)) == i) { | ||
2006 | /* Force memory writes to complete before letting h/w | ||
2007 | * know there are new descriptors to fetch. (Only | ||
2008 | * applicable for weak-ordered memory model archs, | ||
2009 | * such as IA-64). */ | ||
2010 | wmb(); | ||
2011 | |||
2012 | IXGB_WRITE_REG(&adapter->hw, RDT, i); | ||
2013 | } | ||
2014 | 2071 | ||
2015 | if(++i == rx_ring->count) i = 0; | 2072 | if(++i == rx_ring->count) i = 0; |
2016 | buffer_info = &rx_ring->buffer_info[i]; | 2073 | buffer_info = &rx_ring->buffer_info[i]; |
2017 | } | 2074 | } |
2018 | 2075 | ||
2019 | rx_ring->next_to_use = i; | 2076 | if (likely(rx_ring->next_to_use != i)) { |
2077 | rx_ring->next_to_use = i; | ||
2078 | if (unlikely(i-- == 0)) | ||
2079 | i = (rx_ring->count - 1); | ||
2080 | |||
2081 | /* Force memory writes to complete before letting h/w | ||
2082 | * know there are new descriptors to fetch. (Only | ||
2083 | * applicable for weak-ordered memory model archs, such | ||
2084 | * as IA-64). */ | ||
2085 | wmb(); | ||
2086 | IXGB_WRITE_REG(&adapter->hw, RDT, i); | ||
2087 | } | ||
2020 | } | 2088 | } |
2021 | 2089 | ||
2022 | /** | 2090 | /** |
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h index dba20481ee80..ee982feac64d 100644 --- a/drivers/net/ixgb/ixgb_osdep.h +++ b/drivers/net/ixgb/ixgb_osdep.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c index 8a83dfdf746d..39fbed29a3df 100644 --- a/drivers/net/ixgb/ixgb_param.c +++ b/drivers/net/ixgb/ixgb_param.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -76,7 +76,7 @@ IXGB_PARAM(RxDescriptors, "Number of receive descriptors"); | |||
76 | * - 2 - Tx only, generate PAUSE frames but ignore them on receive | 76 | * - 2 - Tx only, generate PAUSE frames but ignore them on receive |
77 | * - 3 - Full Flow Control Support | 77 | * - 3 - Full Flow Control Support |
78 | * | 78 | * |
79 | * Default Value: Read flow control settings from the EEPROM | 79 | * Default Value: 2 - Tx only (silicon bug avoidance) |
80 | */ | 80 | */ |
81 | 81 | ||
82 | IXGB_PARAM(FlowControl, "Flow Control setting"); | 82 | IXGB_PARAM(FlowControl, "Flow Control setting"); |
@@ -137,7 +137,7 @@ IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold"); | |||
137 | * | 137 | * |
138 | * Valid Range: 1 - 65535 | 138 | * Valid Range: 1 - 65535 |
139 | * | 139 | * |
140 | * Default Value: 256 (0x100) | 140 | * Default Value: 65535 (0xffff) (we'll send an xon if we recover) |
141 | */ | 141 | */ |
142 | 142 | ||
143 | IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout"); | 143 | IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout"); |
@@ -165,8 +165,6 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable"); | |||
165 | 165 | ||
166 | #define XSUMRX_DEFAULT OPTION_ENABLED | 166 | #define XSUMRX_DEFAULT OPTION_ENABLED |
167 | 167 | ||
168 | #define FLOW_CONTROL_FULL ixgb_fc_full | ||
169 | #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL | ||
170 | #define DEFAULT_FCRTL 0x28000 | 168 | #define DEFAULT_FCRTL 0x28000 |
171 | #define DEFAULT_FCRTH 0x30000 | 169 | #define DEFAULT_FCRTH 0x30000 |
172 | #define MIN_FCRTL 0 | 170 | #define MIN_FCRTL 0 |
@@ -174,9 +172,9 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable"); | |||
174 | #define MIN_FCRTH 8 | 172 | #define MIN_FCRTH 8 |
175 | #define MAX_FCRTH 0x3FFF0 | 173 | #define MAX_FCRTH 0x3FFF0 |
176 | 174 | ||
177 | #define DEFAULT_FCPAUSE 0x100 /* this may be too long */ | ||
178 | #define MIN_FCPAUSE 1 | 175 | #define MIN_FCPAUSE 1 |
179 | #define MAX_FCPAUSE 0xffff | 176 | #define MAX_FCPAUSE 0xffff |
177 | #define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */ | ||
180 | 178 | ||
181 | struct ixgb_option { | 179 | struct ixgb_option { |
182 | enum { enable_option, range_option, list_option } type; | 180 | enum { enable_option, range_option, list_option } type; |
@@ -336,7 +334,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
336 | .type = list_option, | 334 | .type = list_option, |
337 | .name = "Flow Control", | 335 | .name = "Flow Control", |
338 | .err = "reading default settings from EEPROM", | 336 | .err = "reading default settings from EEPROM", |
339 | .def = ixgb_fc_full, | 337 | .def = ixgb_fc_tx_pause, |
340 | .arg = { .l = { .nr = LIST_LEN(fc_list), | 338 | .arg = { .l = { .nr = LIST_LEN(fc_list), |
341 | .p = fc_list }} | 339 | .p = fc_list }} |
342 | }; | 340 | }; |
@@ -365,8 +363,8 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
365 | } else { | 363 | } else { |
366 | adapter->hw.fc.high_water = opt.def; | 364 | adapter->hw.fc.high_water = opt.def; |
367 | } | 365 | } |
368 | if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) ) | 366 | if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) |
369 | printk (KERN_INFO | 367 | printk (KERN_INFO |
370 | "Ignoring RxFCHighThresh when no RxFC\n"); | 368 | "Ignoring RxFCHighThresh when no RxFC\n"); |
371 | } | 369 | } |
372 | { /* Receive Flow Control Low Threshold */ | 370 | { /* Receive Flow Control Low Threshold */ |
@@ -385,8 +383,8 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
385 | } else { | 383 | } else { |
386 | adapter->hw.fc.low_water = opt.def; | 384 | adapter->hw.fc.low_water = opt.def; |
387 | } | 385 | } |
388 | if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) ) | 386 | if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) |
389 | printk (KERN_INFO | 387 | printk (KERN_INFO |
390 | "Ignoring RxFCLowThresh when no RxFC\n"); | 388 | "Ignoring RxFCLowThresh when no RxFC\n"); |
391 | } | 389 | } |
392 | { /* Flow Control Pause Time Request*/ | 390 | { /* Flow Control Pause Time Request*/ |
@@ -406,12 +404,12 @@ ixgb_check_options(struct ixgb_adapter *adapter) | |||
406 | } else { | 404 | } else { |
407 | adapter->hw.fc.pause_time = opt.def; | 405 | adapter->hw.fc.pause_time = opt.def; |
408 | } | 406 | } |
409 | if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) ) | 407 | if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) |
410 | printk (KERN_INFO | 408 | printk (KERN_INFO |
411 | "Ignoring FCReqTimeout when no RxFC\n"); | 409 | "Ignoring FCReqTimeout when no RxFC\n"); |
412 | } | 410 | } |
413 | /* high low and spacing check for rx flow control thresholds */ | 411 | /* high low and spacing check for rx flow control thresholds */ |
414 | if (adapter->hw.fc.type & ixgb_fc_rx_pause) { | 412 | if (adapter->hw.fc.type & ixgb_fc_tx_pause) { |
415 | /* high must be greater than low */ | 413 | /* high must be greater than low */ |
416 | if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { | 414 | if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { |
417 | /* set defaults */ | 415 | /* set defaults */ |
diff --git a/drivers/net/myri10ge/Makefile b/drivers/net/myri10ge/Makefile new file mode 100644 index 000000000000..5df891647aee --- /dev/null +++ b/drivers/net/myri10ge/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for the Myricom Myri-10G ethernet driver | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_MYRI10GE) += myri10ge.o | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c new file mode 100644 index 000000000000..87933cba7e22 --- /dev/null +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -0,0 +1,2851 @@ | |||
1 | /************************************************************************* | ||
2 | * myri10ge.c: Myricom Myri-10G Ethernet driver. | ||
3 | * | ||
4 | * Copyright (C) 2005, 2006 Myricom, Inc. | ||
5 | * All rights reserved. | ||
6 | * | ||
7 | * Redistribution and use in source and binary forms, with or without | ||
8 | * modification, are permitted provided that the following conditions | ||
9 | * are met: | ||
10 | * 1. Redistributions of source code must retain the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer. | ||
12 | * 2. Redistributions in binary form must reproduce the above copyright | ||
13 | * notice, this list of conditions and the following disclaimer in the | ||
14 | * documentation and/or other materials provided with the distribution. | ||
15 | * 3. Neither the name of Myricom, Inc. nor the names of its contributors | ||
16 | * may be used to endorse or promote products derived from this software | ||
17 | * without specific prior written permission. | ||
18 | * | ||
19 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | ||
20 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
22 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | ||
23 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
24 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
25 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
26 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
27 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
28 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
29 | * SUCH DAMAGE. | ||
30 | * | ||
31 | * | ||
32 | * If the eeprom on your board is not recent enough, you will need to get a | ||
33 | * newer firmware image at: | ||
34 | * http://www.myri.com/scs/download-Myri10GE.html | ||
35 | * | ||
36 | * Contact Information: | ||
37 | * <help@myri.com> | ||
38 | * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006 | ||
39 | *************************************************************************/ | ||
40 | |||
41 | #include <linux/tcp.h> | ||
42 | #include <linux/netdevice.h> | ||
43 | #include <linux/skbuff.h> | ||
44 | #include <linux/string.h> | ||
45 | #include <linux/module.h> | ||
46 | #include <linux/pci.h> | ||
47 | #include <linux/etherdevice.h> | ||
48 | #include <linux/if_ether.h> | ||
49 | #include <linux/if_vlan.h> | ||
50 | #include <linux/ip.h> | ||
51 | #include <linux/inet.h> | ||
52 | #include <linux/in.h> | ||
53 | #include <linux/ethtool.h> | ||
54 | #include <linux/firmware.h> | ||
55 | #include <linux/delay.h> | ||
56 | #include <linux/version.h> | ||
57 | #include <linux/timer.h> | ||
58 | #include <linux/vmalloc.h> | ||
59 | #include <linux/crc32.h> | ||
60 | #include <linux/moduleparam.h> | ||
61 | #include <linux/io.h> | ||
62 | #include <net/checksum.h> | ||
63 | #include <asm/byteorder.h> | ||
64 | #include <asm/io.h> | ||
65 | #include <asm/pci.h> | ||
66 | #include <asm/processor.h> | ||
67 | #ifdef CONFIG_MTRR | ||
68 | #include <asm/mtrr.h> | ||
69 | #endif | ||
70 | |||
71 | #include "myri10ge_mcp.h" | ||
72 | #include "myri10ge_mcp_gen_header.h" | ||
73 | |||
74 | #define MYRI10GE_VERSION_STR "0.9.0" | ||
75 | |||
76 | MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); | ||
77 | MODULE_AUTHOR("Maintainer: help@myri.com"); | ||
78 | MODULE_VERSION(MYRI10GE_VERSION_STR); | ||
79 | MODULE_LICENSE("Dual BSD/GPL"); | ||
80 | |||
81 | #define MYRI10GE_MAX_ETHER_MTU 9014 | ||
82 | |||
83 | #define MYRI10GE_ETH_STOPPED 0 | ||
84 | #define MYRI10GE_ETH_STOPPING 1 | ||
85 | #define MYRI10GE_ETH_STARTING 2 | ||
86 | #define MYRI10GE_ETH_RUNNING 3 | ||
87 | #define MYRI10GE_ETH_OPEN_FAILED 4 | ||
88 | |||
89 | #define MYRI10GE_EEPROM_STRINGS_SIZE 256 | ||
90 | #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2) | ||
91 | |||
92 | #define MYRI10GE_NO_CONFIRM_DATA 0xffffffff | ||
93 | #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff | ||
94 | |||
95 | struct myri10ge_rx_buffer_state { | ||
96 | struct sk_buff *skb; | ||
97 | DECLARE_PCI_UNMAP_ADDR(bus) | ||
98 | DECLARE_PCI_UNMAP_LEN(len) | ||
99 | }; | ||
100 | |||
101 | struct myri10ge_tx_buffer_state { | ||
102 | struct sk_buff *skb; | ||
103 | int last; | ||
104 | DECLARE_PCI_UNMAP_ADDR(bus) | ||
105 | DECLARE_PCI_UNMAP_LEN(len) | ||
106 | }; | ||
107 | |||
108 | struct myri10ge_cmd { | ||
109 | u32 data0; | ||
110 | u32 data1; | ||
111 | u32 data2; | ||
112 | }; | ||
113 | |||
114 | struct myri10ge_rx_buf { | ||
115 | struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */ | ||
116 | u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */ | ||
117 | struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */ | ||
118 | struct myri10ge_rx_buffer_state *info; | ||
119 | int cnt; | ||
120 | int alloc_fail; | ||
121 | int mask; /* number of rx slots -1 */ | ||
122 | }; | ||
123 | |||
124 | struct myri10ge_tx_buf { | ||
125 | struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */ | ||
126 | u8 __iomem *wc_fifo; /* w/c send fifo address */ | ||
127 | struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */ | ||
128 | char *req_bytes; | ||
129 | struct myri10ge_tx_buffer_state *info; | ||
130 | int mask; /* number of transmit slots -1 */ | ||
131 | int boundary; /* boundary transmits cannot cross */ | ||
132 | int req ____cacheline_aligned; /* transmit slots submitted */ | ||
133 | int pkt_start; /* packets started */ | ||
134 | int done ____cacheline_aligned; /* transmit slots completed */ | ||
135 | int pkt_done; /* packets completed */ | ||
136 | }; | ||
137 | |||
138 | struct myri10ge_rx_done { | ||
139 | struct mcp_slot *entry; | ||
140 | dma_addr_t bus; | ||
141 | int cnt; | ||
142 | int idx; | ||
143 | }; | ||
144 | |||
145 | struct myri10ge_priv { | ||
146 | int running; /* running? */ | ||
147 | int csum_flag; /* rx_csums? */ | ||
148 | struct myri10ge_tx_buf tx; /* transmit ring */ | ||
149 | struct myri10ge_rx_buf rx_small; | ||
150 | struct myri10ge_rx_buf rx_big; | ||
151 | struct myri10ge_rx_done rx_done; | ||
152 | int small_bytes; | ||
153 | struct net_device *dev; | ||
154 | struct net_device_stats stats; | ||
155 | u8 __iomem *sram; | ||
156 | int sram_size; | ||
157 | unsigned long board_span; | ||
158 | unsigned long iomem_base; | ||
159 | u32 __iomem *irq_claim; | ||
160 | u32 __iomem *irq_deassert; | ||
161 | char *mac_addr_string; | ||
162 | struct mcp_cmd_response *cmd; | ||
163 | dma_addr_t cmd_bus; | ||
164 | struct mcp_irq_data *fw_stats; | ||
165 | dma_addr_t fw_stats_bus; | ||
166 | struct pci_dev *pdev; | ||
167 | int msi_enabled; | ||
168 | unsigned int link_state; | ||
169 | unsigned int rdma_tags_available; | ||
170 | int intr_coal_delay; | ||
171 | u32 __iomem *intr_coal_delay_ptr; | ||
172 | int mtrr; | ||
173 | int wake_queue; | ||
174 | int stop_queue; | ||
175 | int down_cnt; | ||
176 | wait_queue_head_t down_wq; | ||
177 | struct work_struct watchdog_work; | ||
178 | struct timer_list watchdog_timer; | ||
179 | int watchdog_tx_done; | ||
180 | int watchdog_resets; | ||
181 | int tx_linearized; | ||
182 | int pause; | ||
183 | char *fw_name; | ||
184 | char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; | ||
185 | char fw_version[128]; | ||
186 | u8 mac_addr[6]; /* eeprom mac address */ | ||
187 | unsigned long serial_number; | ||
188 | int vendor_specific_offset; | ||
189 | u32 devctl; | ||
190 | u16 msi_flags; | ||
191 | u32 pm_state[16]; | ||
192 | u32 read_dma; | ||
193 | u32 write_dma; | ||
194 | u32 read_write_dma; | ||
195 | }; | ||
196 | |||
197 | static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat"; | ||
198 | static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat"; | ||
199 | |||
200 | static char *myri10ge_fw_name = NULL; | ||
201 | module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); | ||
202 | MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n"); | ||
203 | |||
204 | static int myri10ge_ecrc_enable = 1; | ||
205 | module_param(myri10ge_ecrc_enable, int, S_IRUGO); | ||
206 | MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n"); | ||
207 | |||
208 | static int myri10ge_max_intr_slots = 1024; | ||
209 | module_param(myri10ge_max_intr_slots, int, S_IRUGO); | ||
210 | MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n"); | ||
211 | |||
212 | static int myri10ge_small_bytes = -1; /* -1 == auto */ | ||
213 | module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); | ||
214 | MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n"); | ||
215 | |||
216 | static int myri10ge_msi = 1; /* enable msi by default */ | ||
217 | module_param(myri10ge_msi, int, S_IRUGO); | ||
218 | MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n"); | ||
219 | |||
220 | static int myri10ge_intr_coal_delay = 25; | ||
221 | module_param(myri10ge_intr_coal_delay, int, S_IRUGO); | ||
222 | MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n"); | ||
223 | |||
224 | static int myri10ge_flow_control = 1; | ||
225 | module_param(myri10ge_flow_control, int, S_IRUGO); | ||
226 | MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n"); | ||
227 | |||
228 | static int myri10ge_deassert_wait = 1; | ||
229 | module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); | ||
230 | MODULE_PARM_DESC(myri10ge_deassert_wait, | ||
231 | "Wait when deasserting legacy interrupts\n"); | ||
232 | |||
233 | static int myri10ge_force_firmware = 0; | ||
234 | module_param(myri10ge_force_firmware, int, S_IRUGO); | ||
235 | MODULE_PARM_DESC(myri10ge_force_firmware, | ||
236 | "Force firmware to assume aligned completions\n"); | ||
237 | |||
238 | static int myri10ge_skb_cross_4k = 0; | ||
239 | module_param(myri10ge_skb_cross_4k, int, S_IRUGO | S_IWUSR); | ||
240 | MODULE_PARM_DESC(myri10ge_skb_cross_4k, | ||
241 | "Can a small skb cross a 4KB boundary?\n"); | ||
242 | |||
243 | static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; | ||
244 | module_param(myri10ge_initial_mtu, int, S_IRUGO); | ||
245 | MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n"); | ||
246 | |||
247 | static int myri10ge_napi_weight = 64; | ||
248 | module_param(myri10ge_napi_weight, int, S_IRUGO); | ||
249 | MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n"); | ||
250 | |||
251 | static int myri10ge_watchdog_timeout = 1; | ||
252 | module_param(myri10ge_watchdog_timeout, int, S_IRUGO); | ||
253 | MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n"); | ||
254 | |||
255 | static int myri10ge_max_irq_loops = 1048576; | ||
256 | module_param(myri10ge_max_irq_loops, int, S_IRUGO); | ||
257 | MODULE_PARM_DESC(myri10ge_max_irq_loops, | ||
258 | "Set stuck legacy IRQ detection threshold\n"); | ||
259 | |||
260 | #define MYRI10GE_FW_OFFSET 1024*1024 | ||
261 | #define MYRI10GE_HIGHPART_TO_U32(X) \ | ||
262 | (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) | ||
263 | #define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X)) | ||
264 | |||
265 | #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8) | ||
266 | |||
267 | static int | ||
268 | myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd, | ||
269 | struct myri10ge_cmd *data, int atomic) | ||
270 | { | ||
271 | struct mcp_cmd *buf; | ||
272 | char buf_bytes[sizeof(*buf) + 8]; | ||
273 | struct mcp_cmd_response *response = mgp->cmd; | ||
274 | char __iomem *cmd_addr = mgp->sram + MXGEFW_CMD_OFFSET; | ||
275 | u32 dma_low, dma_high, result, value; | ||
276 | int sleep_total = 0; | ||
277 | |||
278 | /* ensure buf is aligned to 8 bytes */ | ||
279 | buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8); | ||
280 | |||
281 | buf->data0 = htonl(data->data0); | ||
282 | buf->data1 = htonl(data->data1); | ||
283 | buf->data2 = htonl(data->data2); | ||
284 | buf->cmd = htonl(cmd); | ||
285 | dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); | ||
286 | dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); | ||
287 | |||
288 | buf->response_addr.low = htonl(dma_low); | ||
289 | buf->response_addr.high = htonl(dma_high); | ||
290 | response->result = MYRI10GE_NO_RESPONSE_RESULT; | ||
291 | mb(); | ||
292 | myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf)); | ||
293 | |||
294 | /* wait up to 15ms. Longest command is the DMA benchmark, | ||
295 | * which is capped at 5ms, but runs from a timeout handler | ||
296 | * that runs every 7.8ms. So a 15ms timeout leaves us with | ||
297 | * a 2.2ms margin | ||
298 | */ | ||
299 | if (atomic) { | ||
300 | /* if atomic is set, do not sleep, | ||
301 | * and try to get the completion quickly | ||
302 | * (1ms will be enough for those commands) */ | ||
303 | for (sleep_total = 0; | ||
304 | sleep_total < 1000 | ||
305 | && response->result == MYRI10GE_NO_RESPONSE_RESULT; | ||
306 | sleep_total += 10) | ||
307 | udelay(10); | ||
308 | } else { | ||
309 | /* use msleep for most command */ | ||
310 | for (sleep_total = 0; | ||
311 | sleep_total < 15 | ||
312 | && response->result == MYRI10GE_NO_RESPONSE_RESULT; | ||
313 | sleep_total++) | ||
314 | msleep(1); | ||
315 | } | ||
316 | |||
317 | result = ntohl(response->result); | ||
318 | value = ntohl(response->data); | ||
319 | if (result != MYRI10GE_NO_RESPONSE_RESULT) { | ||
320 | if (result == 0) { | ||
321 | data->data0 = value; | ||
322 | return 0; | ||
323 | } else { | ||
324 | dev_err(&mgp->pdev->dev, | ||
325 | "command %d failed, result = %d\n", | ||
326 | cmd, result); | ||
327 | return -ENXIO; | ||
328 | } | ||
329 | } | ||
330 | |||
331 | dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n", | ||
332 | cmd, result); | ||
333 | return -EAGAIN; | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * The eeprom strings on the lanaiX have the format | ||
338 | * SN=x\0 | ||
339 | * MAC=x:x:x:x:x:x\0 | ||
340 | * PT:ddd mmm xx xx:xx:xx xx\0 | ||
341 | * PV:ddd mmm xx xx:xx:xx xx\0 | ||
342 | */ | ||
343 | static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp) | ||
344 | { | ||
345 | char *ptr, *limit; | ||
346 | int i; | ||
347 | |||
348 | ptr = mgp->eeprom_strings; | ||
349 | limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE; | ||
350 | |||
351 | while (*ptr != '\0' && ptr < limit) { | ||
352 | if (memcmp(ptr, "MAC=", 4) == 0) { | ||
353 | ptr += 4; | ||
354 | mgp->mac_addr_string = ptr; | ||
355 | for (i = 0; i < 6; i++) { | ||
356 | if ((ptr + 2) > limit) | ||
357 | goto abort; | ||
358 | mgp->mac_addr[i] = | ||
359 | simple_strtoul(ptr, &ptr, 16); | ||
360 | ptr += 1; | ||
361 | } | ||
362 | } | ||
363 | if (memcmp((const void *)ptr, "SN=", 3) == 0) { | ||
364 | ptr += 3; | ||
365 | mgp->serial_number = simple_strtoul(ptr, &ptr, 10); | ||
366 | } | ||
367 | while (ptr < limit && *ptr++) ; | ||
368 | } | ||
369 | |||
370 | return 0; | ||
371 | |||
372 | abort: | ||
373 | dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n"); | ||
374 | return -ENXIO; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Enable or disable periodic RDMAs from the host to make certain | ||
379 | * chipsets resend dropped PCIe messages | ||
380 | */ | ||
381 | |||
382 | static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) | ||
383 | { | ||
384 | char __iomem *submit; | ||
385 | u32 buf[16]; | ||
386 | u32 dma_low, dma_high; | ||
387 | int i; | ||
388 | |||
389 | /* clear confirmation addr */ | ||
390 | mgp->cmd->data = 0; | ||
391 | mb(); | ||
392 | |||
393 | /* send a rdma command to the PCIe engine, and wait for the | ||
394 | * response in the confirmation address. The firmware should | ||
395 | * write a -1 there to indicate it is alive and well | ||
396 | */ | ||
397 | dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); | ||
398 | dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); | ||
399 | |||
400 | buf[0] = htonl(dma_high); /* confirm addr MSW */ | ||
401 | buf[1] = htonl(dma_low); /* confirm addr LSW */ | ||
402 | buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA); /* confirm data */ | ||
403 | buf[3] = htonl(dma_high); /* dummy addr MSW */ | ||
404 | buf[4] = htonl(dma_low); /* dummy addr LSW */ | ||
405 | buf[5] = htonl(enable); /* enable? */ | ||
406 | |||
407 | submit = mgp->sram + 0xfc01c0; | ||
408 | |||
409 | myri10ge_pio_copy(submit, &buf, sizeof(buf)); | ||
410 | for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++) | ||
411 | msleep(1); | ||
412 | if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) | ||
413 | dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n", | ||
414 | (enable ? "enable" : "disable")); | ||
415 | } | ||
416 | |||
417 | static int | ||
418 | myri10ge_validate_firmware(struct myri10ge_priv *mgp, | ||
419 | struct mcp_gen_header *hdr) | ||
420 | { | ||
421 | struct device *dev = &mgp->pdev->dev; | ||
422 | int major, minor; | ||
423 | |||
424 | /* check firmware type */ | ||
425 | if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) { | ||
426 | dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type)); | ||
427 | return -EINVAL; | ||
428 | } | ||
429 | |||
430 | /* save firmware version for ethtool */ | ||
431 | strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version)); | ||
432 | |||
433 | sscanf(mgp->fw_version, "%d.%d", &major, &minor); | ||
434 | |||
435 | if (!(major == MXGEFW_VERSION_MAJOR && minor == MXGEFW_VERSION_MINOR)) { | ||
436 | dev_err(dev, "Found firmware version %s\n", mgp->fw_version); | ||
437 | dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR, | ||
438 | MXGEFW_VERSION_MINOR); | ||
439 | return -EINVAL; | ||
440 | } | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) | ||
445 | { | ||
446 | unsigned crc, reread_crc; | ||
447 | const struct firmware *fw; | ||
448 | struct device *dev = &mgp->pdev->dev; | ||
449 | struct mcp_gen_header *hdr; | ||
450 | size_t hdr_offset; | ||
451 | int status; | ||
452 | |||
453 | if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { | ||
454 | dev_err(dev, "Unable to load %s firmware image via hotplug\n", | ||
455 | mgp->fw_name); | ||
456 | status = -EINVAL; | ||
457 | goto abort_with_nothing; | ||
458 | } | ||
459 | |||
460 | /* check size */ | ||
461 | |||
462 | if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET || | ||
463 | fw->size < MCP_HEADER_PTR_OFFSET + 4) { | ||
464 | dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size); | ||
465 | status = -EINVAL; | ||
466 | goto abort_with_fw; | ||
467 | } | ||
468 | |||
469 | /* check id */ | ||
470 | hdr_offset = ntohl(*(u32 *) (fw->data + MCP_HEADER_PTR_OFFSET)); | ||
471 | if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) { | ||
472 | dev_err(dev, "Bad firmware file\n"); | ||
473 | status = -EINVAL; | ||
474 | goto abort_with_fw; | ||
475 | } | ||
476 | hdr = (void *)(fw->data + hdr_offset); | ||
477 | |||
478 | status = myri10ge_validate_firmware(mgp, hdr); | ||
479 | if (status != 0) | ||
480 | goto abort_with_fw; | ||
481 | |||
482 | crc = crc32(~0, fw->data, fw->size); | ||
483 | memcpy_toio(mgp->sram + MYRI10GE_FW_OFFSET, fw->data, fw->size); | ||
484 | /* corruption checking is good for parity recovery and buggy chipset */ | ||
485 | memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); | ||
486 | reread_crc = crc32(~0, fw->data, fw->size); | ||
487 | if (crc != reread_crc) { | ||
488 | dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n", | ||
489 | (unsigned)fw->size, reread_crc, crc); | ||
490 | status = -EIO; | ||
491 | goto abort_with_fw; | ||
492 | } | ||
493 | *size = (u32) fw->size; | ||
494 | |||
495 | abort_with_fw: | ||
496 | release_firmware(fw); | ||
497 | |||
498 | abort_with_nothing: | ||
499 | return status; | ||
500 | } | ||
501 | |||
502 | static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp) | ||
503 | { | ||
504 | struct mcp_gen_header *hdr; | ||
505 | struct device *dev = &mgp->pdev->dev; | ||
506 | const size_t bytes = sizeof(struct mcp_gen_header); | ||
507 | size_t hdr_offset; | ||
508 | int status; | ||
509 | |||
510 | /* find running firmware header */ | ||
511 | hdr_offset = ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET)); | ||
512 | |||
513 | if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) { | ||
514 | dev_err(dev, "Running firmware has bad header offset (%d)\n", | ||
515 | (int)hdr_offset); | ||
516 | return -EIO; | ||
517 | } | ||
518 | |||
519 | /* copy header of running firmware from SRAM to host memory to | ||
520 | * validate firmware */ | ||
521 | hdr = kmalloc(bytes, GFP_KERNEL); | ||
522 | if (hdr == NULL) { | ||
523 | dev_err(dev, "could not malloc firmware hdr\n"); | ||
524 | return -ENOMEM; | ||
525 | } | ||
526 | memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes); | ||
527 | status = myri10ge_validate_firmware(mgp, hdr); | ||
528 | kfree(hdr); | ||
529 | return status; | ||
530 | } | ||
531 | |||
532 | static int myri10ge_load_firmware(struct myri10ge_priv *mgp) | ||
533 | { | ||
534 | char __iomem *submit; | ||
535 | u32 buf[16]; | ||
536 | u32 dma_low, dma_high, size; | ||
537 | int status, i; | ||
538 | |||
539 | status = myri10ge_load_hotplug_firmware(mgp, &size); | ||
540 | if (status) { | ||
541 | dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n"); | ||
542 | |||
543 | /* Do not attempt to adopt firmware if there | ||
544 | * was a bad crc */ | ||
545 | if (status == -EIO) | ||
546 | return status; | ||
547 | |||
548 | status = myri10ge_adopt_running_firmware(mgp); | ||
549 | if (status != 0) { | ||
550 | dev_err(&mgp->pdev->dev, | ||
551 | "failed to adopt running firmware\n"); | ||
552 | return status; | ||
553 | } | ||
554 | dev_info(&mgp->pdev->dev, | ||
555 | "Successfully adopted running firmware\n"); | ||
556 | if (mgp->tx.boundary == 4096) { | ||
557 | dev_warn(&mgp->pdev->dev, | ||
558 | "Using firmware currently running on NIC" | ||
559 | ". For optimal\n"); | ||
560 | dev_warn(&mgp->pdev->dev, | ||
561 | "performance consider loading optimized " | ||
562 | "firmware\n"); | ||
563 | dev_warn(&mgp->pdev->dev, "via hotplug\n"); | ||
564 | } | ||
565 | |||
566 | mgp->fw_name = "adopted"; | ||
567 | mgp->tx.boundary = 2048; | ||
568 | return status; | ||
569 | } | ||
570 | |||
571 | /* clear confirmation addr */ | ||
572 | mgp->cmd->data = 0; | ||
573 | mb(); | ||
574 | |||
575 | /* send a reload command to the bootstrap MCP, and wait for the | ||
576 | * response in the confirmation address. The firmware should | ||
577 | * write a -1 there to indicate it is alive and well | ||
578 | */ | ||
579 | dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); | ||
580 | dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); | ||
581 | |||
582 | buf[0] = htonl(dma_high); /* confirm addr MSW */ | ||
583 | buf[1] = htonl(dma_low); /* confirm addr LSW */ | ||
584 | buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA); /* confirm data */ | ||
585 | |||
586 | /* FIX: All newest firmware should un-protect the bottom of | ||
587 | * the sram before handoff. However, the very first interfaces | ||
588 | * do not. Therefore the handoff copy must skip the first 8 bytes | ||
589 | */ | ||
590 | buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */ | ||
591 | buf[4] = htonl(size - 8); /* length of code */ | ||
592 | buf[5] = htonl(8); /* where to copy to */ | ||
593 | buf[6] = htonl(0); /* where to jump to */ | ||
594 | |||
595 | submit = mgp->sram + 0xfc0000; | ||
596 | |||
597 | myri10ge_pio_copy(submit, &buf, sizeof(buf)); | ||
598 | mb(); | ||
599 | msleep(1); | ||
600 | mb(); | ||
601 | i = 0; | ||
602 | while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) { | ||
603 | msleep(1); | ||
604 | i++; | ||
605 | } | ||
606 | if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { | ||
607 | dev_err(&mgp->pdev->dev, "handoff failed\n"); | ||
608 | return -ENXIO; | ||
609 | } | ||
610 | dev_info(&mgp->pdev->dev, "handoff confirmed\n"); | ||
611 | myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); | ||
612 | |||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) | ||
617 | { | ||
618 | struct myri10ge_cmd cmd; | ||
619 | int status; | ||
620 | |||
621 | cmd.data0 = ((addr[0] << 24) | (addr[1] << 16) | ||
622 | | (addr[2] << 8) | addr[3]); | ||
623 | |||
624 | cmd.data1 = ((addr[4] << 8) | (addr[5])); | ||
625 | |||
626 | status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0); | ||
627 | return status; | ||
628 | } | ||
629 | |||
630 | static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause) | ||
631 | { | ||
632 | struct myri10ge_cmd cmd; | ||
633 | int status, ctl; | ||
634 | |||
635 | ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL; | ||
636 | status = myri10ge_send_cmd(mgp, ctl, &cmd, 0); | ||
637 | |||
638 | if (status) { | ||
639 | printk(KERN_ERR | ||
640 | "myri10ge: %s: Failed to set flow control mode\n", | ||
641 | mgp->dev->name); | ||
642 | return status; | ||
643 | } | ||
644 | mgp->pause = pause; | ||
645 | return 0; | ||
646 | } | ||
647 | |||
648 | static void | ||
649 | myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic) | ||
650 | { | ||
651 | struct myri10ge_cmd cmd; | ||
652 | int status, ctl; | ||
653 | |||
654 | ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC; | ||
655 | status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic); | ||
656 | if (status) | ||
657 | printk(KERN_ERR "myri10ge: %s: Failed to set promisc mode\n", | ||
658 | mgp->dev->name); | ||
659 | } | ||
660 | |||
661 | static int myri10ge_reset(struct myri10ge_priv *mgp) | ||
662 | { | ||
663 | struct myri10ge_cmd cmd; | ||
664 | int status; | ||
665 | size_t bytes; | ||
666 | u32 len; | ||
667 | |||
668 | /* try to send a reset command to the card to see if it | ||
669 | * is alive */ | ||
670 | memset(&cmd, 0, sizeof(cmd)); | ||
671 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0); | ||
672 | if (status != 0) { | ||
673 | dev_err(&mgp->pdev->dev, "failed reset\n"); | ||
674 | return -ENXIO; | ||
675 | } | ||
676 | |||
677 | /* Now exchange information about interrupts */ | ||
678 | |||
679 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | ||
680 | memset(mgp->rx_done.entry, 0, bytes); | ||
681 | cmd.data0 = (u32) bytes; | ||
682 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); | ||
683 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); | ||
684 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); | ||
685 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); | ||
686 | |||
687 | status |= | ||
688 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); | ||
689 | mgp->irq_claim = (__iomem u32 *) (mgp->sram + cmd.data0); | ||
690 | if (!mgp->msi_enabled) { | ||
691 | status |= myri10ge_send_cmd | ||
692 | (mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd, 0); | ||
693 | mgp->irq_deassert = (__iomem u32 *) (mgp->sram + cmd.data0); | ||
694 | |||
695 | } | ||
696 | status |= myri10ge_send_cmd | ||
697 | (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0); | ||
698 | mgp->intr_coal_delay_ptr = (__iomem u32 *) (mgp->sram + cmd.data0); | ||
699 | if (status != 0) { | ||
700 | dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n"); | ||
701 | return status; | ||
702 | } | ||
703 | __raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); | ||
704 | |||
705 | /* Run a small DMA test. | ||
706 | * The magic multipliers to the length tell the firmware | ||
707 | * to do DMA read, write, or read+write tests. The | ||
708 | * results are returned in cmd.data0. The upper 16 | ||
709 | * bits or the return is the number of transfers completed. | ||
710 | * The lower 16 bits is the time in 0.5us ticks that the | ||
711 | * transfers took to complete. | ||
712 | */ | ||
713 | |||
714 | len = mgp->tx.boundary; | ||
715 | |||
716 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); | ||
717 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); | ||
718 | cmd.data2 = len * 0x10000; | ||
719 | status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0); | ||
720 | if (status == 0) | ||
721 | mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / | ||
722 | (cmd.data0 & 0xffff); | ||
723 | else | ||
724 | dev_warn(&mgp->pdev->dev, "DMA read benchmark failed: %d\n", | ||
725 | status); | ||
726 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); | ||
727 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); | ||
728 | cmd.data2 = len * 0x1; | ||
729 | status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0); | ||
730 | if (status == 0) | ||
731 | mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / | ||
732 | (cmd.data0 & 0xffff); | ||
733 | else | ||
734 | dev_warn(&mgp->pdev->dev, "DMA write benchmark failed: %d\n", | ||
735 | status); | ||
736 | |||
737 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); | ||
738 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); | ||
739 | cmd.data2 = len * 0x10001; | ||
740 | status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0); | ||
741 | if (status == 0) | ||
742 | mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) / | ||
743 | (cmd.data0 & 0xffff); | ||
744 | else | ||
745 | dev_warn(&mgp->pdev->dev, | ||
746 | "DMA read/write benchmark failed: %d\n", status); | ||
747 | |||
748 | memset(mgp->rx_done.entry, 0, bytes); | ||
749 | |||
750 | /* reset mcp/driver shared state back to 0 */ | ||
751 | mgp->tx.req = 0; | ||
752 | mgp->tx.done = 0; | ||
753 | mgp->tx.pkt_start = 0; | ||
754 | mgp->tx.pkt_done = 0; | ||
755 | mgp->rx_big.cnt = 0; | ||
756 | mgp->rx_small.cnt = 0; | ||
757 | mgp->rx_done.idx = 0; | ||
758 | mgp->rx_done.cnt = 0; | ||
759 | status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); | ||
760 | myri10ge_change_promisc(mgp, 0, 0); | ||
761 | myri10ge_change_pause(mgp, mgp->pause); | ||
762 | return status; | ||
763 | } | ||
764 | |||
765 | static inline void | ||
766 | myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst, | ||
767 | struct mcp_kreq_ether_recv *src) | ||
768 | { | ||
769 | u32 low; | ||
770 | |||
771 | low = src->addr_low; | ||
772 | src->addr_low = DMA_32BIT_MASK; | ||
773 | myri10ge_pio_copy(dst, src, 8 * sizeof(*src)); | ||
774 | mb(); | ||
775 | src->addr_low = low; | ||
776 | __raw_writel(low, &dst->addr_low); | ||
777 | mb(); | ||
778 | } | ||
779 | |||
780 | /* | ||
781 | * Set of routunes to get a new receive buffer. Any buffer which | ||
782 | * crosses a 4KB boundary must start on a 4KB boundary due to PCIe | ||
783 | * wdma restrictions. We also try to align any smaller allocation to | ||
784 | * at least a 16 byte boundary for efficiency. We assume the linux | ||
785 | * memory allocator works by powers of 2, and will not return memory | ||
786 | * smaller than 2KB which crosses a 4KB boundary. If it does, we fall | ||
787 | * back to allocating 2x as much space as required. | ||
788 | * | ||
789 | * We intend to replace large (>4KB) skb allocations by using | ||
790 | * pages directly and building a fraglist in the near future. | ||
791 | */ | ||
792 | |||
793 | static inline struct sk_buff *myri10ge_alloc_big(int bytes) | ||
794 | { | ||
795 | struct sk_buff *skb; | ||
796 | unsigned long data, roundup; | ||
797 | |||
798 | skb = dev_alloc_skb(bytes + 4096 + MXGEFW_PAD); | ||
799 | if (skb == NULL) | ||
800 | return NULL; | ||
801 | |||
802 | /* Correct skb->truesize so that socket buffer | ||
803 | * accounting is not confused the rounding we must | ||
804 | * do to satisfy alignment constraints. | ||
805 | */ | ||
806 | skb->truesize -= 4096; | ||
807 | |||
808 | data = (unsigned long)(skb->data); | ||
809 | roundup = (-data) & (4095); | ||
810 | skb_reserve(skb, roundup); | ||
811 | return skb; | ||
812 | } | ||
813 | |||
814 | /* Allocate 2x as much space as required and use whichever portion | ||
815 | * does not cross a 4KB boundary */ | ||
816 | static inline struct sk_buff *myri10ge_alloc_small_safe(unsigned int bytes) | ||
817 | { | ||
818 | struct sk_buff *skb; | ||
819 | unsigned long data, boundary; | ||
820 | |||
821 | skb = dev_alloc_skb(2 * (bytes + MXGEFW_PAD) - 1); | ||
822 | if (unlikely(skb == NULL)) | ||
823 | return NULL; | ||
824 | |||
825 | /* Correct skb->truesize so that socket buffer | ||
826 | * accounting is not confused the rounding we must | ||
827 | * do to satisfy alignment constraints. | ||
828 | */ | ||
829 | skb->truesize -= bytes + MXGEFW_PAD; | ||
830 | |||
831 | data = (unsigned long)(skb->data); | ||
832 | boundary = (data + 4095UL) & ~4095UL; | ||
833 | if ((boundary - data) >= (bytes + MXGEFW_PAD)) | ||
834 | return skb; | ||
835 | |||
836 | skb_reserve(skb, boundary - data); | ||
837 | return skb; | ||
838 | } | ||
839 | |||
840 | /* Allocate just enough space, and verify that the allocated | ||
841 | * space does not cross a 4KB boundary */ | ||
842 | static inline struct sk_buff *myri10ge_alloc_small(int bytes) | ||
843 | { | ||
844 | struct sk_buff *skb; | ||
845 | unsigned long roundup, data, end; | ||
846 | |||
847 | skb = dev_alloc_skb(bytes + 16 + MXGEFW_PAD); | ||
848 | if (unlikely(skb == NULL)) | ||
849 | return NULL; | ||
850 | |||
851 | /* Round allocated buffer to 16 byte boundary */ | ||
852 | data = (unsigned long)(skb->data); | ||
853 | roundup = (-data) & 15UL; | ||
854 | skb_reserve(skb, roundup); | ||
855 | /* Verify that the data buffer does not cross a page boundary */ | ||
856 | data = (unsigned long)(skb->data); | ||
857 | end = data + bytes + MXGEFW_PAD - 1; | ||
858 | if (unlikely(((end >> 12) != (data >> 12)) && (data & 4095UL))) { | ||
859 | printk(KERN_NOTICE | ||
860 | "myri10ge_alloc_small: small skb crossed 4KB boundary\n"); | ||
861 | myri10ge_skb_cross_4k = 1; | ||
862 | dev_kfree_skb_any(skb); | ||
863 | skb = myri10ge_alloc_small_safe(bytes); | ||
864 | } | ||
865 | return skb; | ||
866 | } | ||
867 | |||
868 | static inline int | ||
869 | myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct pci_dev *pdev, int bytes, | ||
870 | int idx) | ||
871 | { | ||
872 | struct sk_buff *skb; | ||
873 | dma_addr_t bus; | ||
874 | int len, retval = 0; | ||
875 | |||
876 | bytes += VLAN_HLEN; /* account for 802.1q vlan tag */ | ||
877 | |||
878 | if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ ) | ||
879 | skb = myri10ge_alloc_big(bytes); | ||
880 | else if (myri10ge_skb_cross_4k) | ||
881 | skb = myri10ge_alloc_small_safe(bytes); | ||
882 | else | ||
883 | skb = myri10ge_alloc_small(bytes); | ||
884 | |||
885 | if (unlikely(skb == NULL)) { | ||
886 | rx->alloc_fail++; | ||
887 | retval = -ENOBUFS; | ||
888 | goto done; | ||
889 | } | ||
890 | |||
891 | /* set len so that it only covers the area we | ||
892 | * need mapped for DMA */ | ||
893 | len = bytes + MXGEFW_PAD; | ||
894 | |||
895 | bus = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE); | ||
896 | rx->info[idx].skb = skb; | ||
897 | pci_unmap_addr_set(&rx->info[idx], bus, bus); | ||
898 | pci_unmap_len_set(&rx->info[idx], len, len); | ||
899 | rx->shadow[idx].addr_low = htonl(MYRI10GE_LOWPART_TO_U32(bus)); | ||
900 | rx->shadow[idx].addr_high = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); | ||
901 | |||
902 | done: | ||
903 | /* copy 8 descriptors (64-bytes) to the mcp at a time */ | ||
904 | if ((idx & 7) == 7) { | ||
905 | if (rx->wc_fifo == NULL) | ||
906 | myri10ge_submit_8rx(&rx->lanai[idx - 7], | ||
907 | &rx->shadow[idx - 7]); | ||
908 | else { | ||
909 | mb(); | ||
910 | myri10ge_pio_copy(rx->wc_fifo, | ||
911 | &rx->shadow[idx - 7], 64); | ||
912 | } | ||
913 | } | ||
914 | return retval; | ||
915 | } | ||
916 | |||
917 | static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, u16 hw_csum) | ||
918 | { | ||
919 | struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data); | ||
920 | |||
921 | if ((skb->protocol == ntohs(ETH_P_8021Q)) && | ||
922 | (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) || | ||
923 | vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) { | ||
924 | skb->csum = hw_csum; | ||
925 | skb->ip_summed = CHECKSUM_HW; | ||
926 | } | ||
927 | } | ||
928 | |||
929 | static inline unsigned long | ||
930 | myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | ||
931 | int bytes, int len, int csum) | ||
932 | { | ||
933 | dma_addr_t bus; | ||
934 | struct sk_buff *skb; | ||
935 | int idx, unmap_len; | ||
936 | |||
937 | idx = rx->cnt & rx->mask; | ||
938 | rx->cnt++; | ||
939 | |||
940 | /* save a pointer to the received skb */ | ||
941 | skb = rx->info[idx].skb; | ||
942 | bus = pci_unmap_addr(&rx->info[idx], bus); | ||
943 | unmap_len = pci_unmap_len(&rx->info[idx], len); | ||
944 | |||
945 | /* try to replace the received skb */ | ||
946 | if (myri10ge_getbuf(rx, mgp->pdev, bytes, idx)) { | ||
947 | /* drop the frame -- the old skbuf is re-cycled */ | ||
948 | mgp->stats.rx_dropped += 1; | ||
949 | return 0; | ||
950 | } | ||
951 | |||
952 | /* unmap the recvd skb */ | ||
953 | pci_unmap_single(mgp->pdev, bus, unmap_len, PCI_DMA_FROMDEVICE); | ||
954 | |||
955 | /* mcp implicitly skips 1st bytes so that packet is properly | ||
956 | * aligned */ | ||
957 | skb_reserve(skb, MXGEFW_PAD); | ||
958 | |||
959 | /* set the length of the frame */ | ||
960 | skb_put(skb, len); | ||
961 | |||
962 | skb->protocol = eth_type_trans(skb, mgp->dev); | ||
963 | skb->dev = mgp->dev; | ||
964 | if (mgp->csum_flag) { | ||
965 | if ((skb->protocol == ntohs(ETH_P_IP)) || | ||
966 | (skb->protocol == ntohs(ETH_P_IPV6))) { | ||
967 | skb->csum = ntohs((u16) csum); | ||
968 | skb->ip_summed = CHECKSUM_HW; | ||
969 | } else | ||
970 | myri10ge_vlan_ip_csum(skb, ntohs((u16) csum)); | ||
971 | } | ||
972 | |||
973 | netif_receive_skb(skb); | ||
974 | mgp->dev->last_rx = jiffies; | ||
975 | return 1; | ||
976 | } | ||
977 | |||
978 | static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) | ||
979 | { | ||
980 | struct pci_dev *pdev = mgp->pdev; | ||
981 | struct myri10ge_tx_buf *tx = &mgp->tx; | ||
982 | struct sk_buff *skb; | ||
983 | int idx, len; | ||
984 | int limit = 0; | ||
985 | |||
986 | while (tx->pkt_done != mcp_index) { | ||
987 | idx = tx->done & tx->mask; | ||
988 | skb = tx->info[idx].skb; | ||
989 | |||
990 | /* Mark as free */ | ||
991 | tx->info[idx].skb = NULL; | ||
992 | if (tx->info[idx].last) { | ||
993 | tx->pkt_done++; | ||
994 | tx->info[idx].last = 0; | ||
995 | } | ||
996 | tx->done++; | ||
997 | len = pci_unmap_len(&tx->info[idx], len); | ||
998 | pci_unmap_len_set(&tx->info[idx], len, 0); | ||
999 | if (skb) { | ||
1000 | mgp->stats.tx_bytes += skb->len; | ||
1001 | mgp->stats.tx_packets++; | ||
1002 | dev_kfree_skb_irq(skb); | ||
1003 | if (len) | ||
1004 | pci_unmap_single(pdev, | ||
1005 | pci_unmap_addr(&tx->info[idx], | ||
1006 | bus), len, | ||
1007 | PCI_DMA_TODEVICE); | ||
1008 | } else { | ||
1009 | if (len) | ||
1010 | pci_unmap_page(pdev, | ||
1011 | pci_unmap_addr(&tx->info[idx], | ||
1012 | bus), len, | ||
1013 | PCI_DMA_TODEVICE); | ||
1014 | } | ||
1015 | |||
1016 | /* limit potential for livelock by only handling | ||
1017 | * 2 full tx rings per call */ | ||
1018 | if (unlikely(++limit > 2 * tx->mask)) | ||
1019 | break; | ||
1020 | } | ||
1021 | /* start the queue if we've stopped it */ | ||
1022 | if (netif_queue_stopped(mgp->dev) | ||
1023 | && tx->req - tx->done < (tx->mask >> 1)) { | ||
1024 | mgp->wake_queue++; | ||
1025 | netif_wake_queue(mgp->dev); | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) | ||
1030 | { | ||
1031 | struct myri10ge_rx_done *rx_done = &mgp->rx_done; | ||
1032 | unsigned long rx_bytes = 0; | ||
1033 | unsigned long rx_packets = 0; | ||
1034 | unsigned long rx_ok; | ||
1035 | |||
1036 | int idx = rx_done->idx; | ||
1037 | int cnt = rx_done->cnt; | ||
1038 | u16 length; | ||
1039 | u16 checksum; | ||
1040 | |||
1041 | while (rx_done->entry[idx].length != 0 && *limit != 0) { | ||
1042 | length = ntohs(rx_done->entry[idx].length); | ||
1043 | rx_done->entry[idx].length = 0; | ||
1044 | checksum = ntohs(rx_done->entry[idx].checksum); | ||
1045 | if (length <= mgp->small_bytes) | ||
1046 | rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small, | ||
1047 | mgp->small_bytes, | ||
1048 | length, checksum); | ||
1049 | else | ||
1050 | rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, | ||
1051 | mgp->dev->mtu + ETH_HLEN, | ||
1052 | length, checksum); | ||
1053 | rx_packets += rx_ok; | ||
1054 | rx_bytes += rx_ok * (unsigned long)length; | ||
1055 | cnt++; | ||
1056 | idx = cnt & (myri10ge_max_intr_slots - 1); | ||
1057 | |||
1058 | /* limit potential for livelock by only handling a | ||
1059 | * limited number of frames. */ | ||
1060 | (*limit)--; | ||
1061 | } | ||
1062 | rx_done->idx = idx; | ||
1063 | rx_done->cnt = cnt; | ||
1064 | mgp->stats.rx_packets += rx_packets; | ||
1065 | mgp->stats.rx_bytes += rx_bytes; | ||
1066 | } | ||
1067 | |||
1068 | static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) | ||
1069 | { | ||
1070 | struct mcp_irq_data *stats = mgp->fw_stats; | ||
1071 | |||
1072 | if (unlikely(stats->stats_updated)) { | ||
1073 | if (mgp->link_state != stats->link_up) { | ||
1074 | mgp->link_state = stats->link_up; | ||
1075 | if (mgp->link_state) { | ||
1076 | printk(KERN_INFO "myri10ge: %s: link up\n", | ||
1077 | mgp->dev->name); | ||
1078 | netif_carrier_on(mgp->dev); | ||
1079 | } else { | ||
1080 | printk(KERN_INFO "myri10ge: %s: link down\n", | ||
1081 | mgp->dev->name); | ||
1082 | netif_carrier_off(mgp->dev); | ||
1083 | } | ||
1084 | } | ||
1085 | if (mgp->rdma_tags_available != | ||
1086 | ntohl(mgp->fw_stats->rdma_tags_available)) { | ||
1087 | mgp->rdma_tags_available = | ||
1088 | ntohl(mgp->fw_stats->rdma_tags_available); | ||
1089 | printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " | ||
1090 | "%d tags left\n", mgp->dev->name, | ||
1091 | mgp->rdma_tags_available); | ||
1092 | } | ||
1093 | mgp->down_cnt += stats->link_down; | ||
1094 | if (stats->link_down) | ||
1095 | wake_up(&mgp->down_wq); | ||
1096 | } | ||
1097 | } | ||
1098 | |||
1099 | static int myri10ge_poll(struct net_device *netdev, int *budget) | ||
1100 | { | ||
1101 | struct myri10ge_priv *mgp = netdev_priv(netdev); | ||
1102 | struct myri10ge_rx_done *rx_done = &mgp->rx_done; | ||
1103 | int limit, orig_limit, work_done; | ||
1104 | |||
1105 | /* process as many rx events as NAPI will allow */ | ||
1106 | limit = min(*budget, netdev->quota); | ||
1107 | orig_limit = limit; | ||
1108 | myri10ge_clean_rx_done(mgp, &limit); | ||
1109 | work_done = orig_limit - limit; | ||
1110 | *budget -= work_done; | ||
1111 | netdev->quota -= work_done; | ||
1112 | |||
1113 | if (rx_done->entry[rx_done->idx].length == 0 || !netif_running(netdev)) { | ||
1114 | netif_rx_complete(netdev); | ||
1115 | __raw_writel(htonl(3), mgp->irq_claim); | ||
1116 | return 0; | ||
1117 | } | ||
1118 | return 1; | ||
1119 | } | ||
1120 | |||
1121 | static irqreturn_t myri10ge_intr(int irq, void *arg, struct pt_regs *regs) | ||
1122 | { | ||
1123 | struct myri10ge_priv *mgp = arg; | ||
1124 | struct mcp_irq_data *stats = mgp->fw_stats; | ||
1125 | struct myri10ge_tx_buf *tx = &mgp->tx; | ||
1126 | u32 send_done_count; | ||
1127 | int i; | ||
1128 | |||
1129 | /* make sure it is our IRQ, and that the DMA has finished */ | ||
1130 | if (unlikely(!stats->valid)) | ||
1131 | return (IRQ_NONE); | ||
1132 | |||
1133 | /* low bit indicates receives are present, so schedule | ||
1134 | * napi poll handler */ | ||
1135 | if (stats->valid & 1) | ||
1136 | netif_rx_schedule(mgp->dev); | ||
1137 | |||
1138 | if (!mgp->msi_enabled) { | ||
1139 | __raw_writel(0, mgp->irq_deassert); | ||
1140 | if (!myri10ge_deassert_wait) | ||
1141 | stats->valid = 0; | ||
1142 | mb(); | ||
1143 | } else | ||
1144 | stats->valid = 0; | ||
1145 | |||
1146 | /* Wait for IRQ line to go low, if using INTx */ | ||
1147 | i = 0; | ||
1148 | while (1) { | ||
1149 | i++; | ||
1150 | /* check for transmit completes and receives */ | ||
1151 | send_done_count = ntohl(stats->send_done_count); | ||
1152 | if (send_done_count != tx->pkt_done) | ||
1153 | myri10ge_tx_done(mgp, (int)send_done_count); | ||
1154 | if (unlikely(i > myri10ge_max_irq_loops)) { | ||
1155 | printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", | ||
1156 | mgp->dev->name); | ||
1157 | stats->valid = 0; | ||
1158 | schedule_work(&mgp->watchdog_work); | ||
1159 | } | ||
1160 | if (likely(stats->valid == 0)) | ||
1161 | break; | ||
1162 | cpu_relax(); | ||
1163 | barrier(); | ||
1164 | } | ||
1165 | |||
1166 | myri10ge_check_statblock(mgp); | ||
1167 | |||
1168 | __raw_writel(htonl(3), mgp->irq_claim + 1); | ||
1169 | return (IRQ_HANDLED); | ||
1170 | } | ||
1171 | |||
1172 | static int | ||
1173 | myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) | ||
1174 | { | ||
1175 | cmd->autoneg = AUTONEG_DISABLE; | ||
1176 | cmd->speed = SPEED_10000; | ||
1177 | cmd->duplex = DUPLEX_FULL; | ||
1178 | return 0; | ||
1179 | } | ||
1180 | |||
1181 | static void | ||
1182 | myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) | ||
1183 | { | ||
1184 | struct myri10ge_priv *mgp = netdev_priv(netdev); | ||
1185 | |||
1186 | strlcpy(info->driver, "myri10ge", sizeof(info->driver)); | ||
1187 | strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version)); | ||
1188 | strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version)); | ||
1189 | strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info)); | ||
1190 | } | ||
1191 | |||
1192 | static int | ||
1193 | myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) | ||
1194 | { | ||
1195 | struct myri10ge_priv *mgp = netdev_priv(netdev); | ||
1196 | coal->rx_coalesce_usecs = mgp->intr_coal_delay; | ||
1197 | return 0; | ||
1198 | } | ||
1199 | |||
1200 | static int | ||
1201 | myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) | ||
1202 | { | ||
1203 | struct myri10ge_priv *mgp = netdev_priv(netdev); | ||
1204 | |||
1205 | mgp->intr_coal_delay = coal->rx_coalesce_usecs; | ||
1206 | __raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); | ||
1207 | return 0; | ||
1208 | } | ||
1209 | |||
1210 | static void | ||
1211 | myri10ge_get_pauseparam(struct net_device *netdev, | ||
1212 | struct ethtool_pauseparam *pause) | ||
1213 | { | ||
1214 | struct myri10ge_priv *mgp = netdev_priv(netdev); | ||
1215 | |||
1216 | pause->autoneg = 0; | ||
1217 | pause->rx_pause = mgp->pause; | ||
1218 | pause->tx_pause = mgp->pause; | ||
1219 | } | ||
1220 | |||
1221 | static int | ||
1222 | myri10ge_set_pauseparam(struct net_device *netdev, | ||
1223 | struct ethtool_pauseparam *pause) | ||
1224 | { | ||
1225 | struct myri10ge_priv *mgp = netdev_priv(netdev); | ||
1226 | |||
1227 | if (pause->tx_pause != mgp->pause) | ||
1228 | return myri10ge_change_pause(mgp, pause->tx_pause); | ||
1229 | if (pause->rx_pause != mgp->pause) | ||
1230 | return myri10ge_change_pause(mgp, pause->tx_pause); | ||
1231 | if (pause->autoneg != 0) | ||
1232 | return -EINVAL; | ||
1233 | return 0; | ||
1234 | } | ||
1235 | |||
1236 | static void | ||
1237 | myri10ge_get_ringparam(struct net_device *netdev, | ||
1238 | struct ethtool_ringparam *ring) | ||
1239 | { | ||
1240 | struct myri10ge_priv *mgp = netdev_priv(netdev); | ||
1241 | |||
1242 | ring->rx_mini_max_pending = mgp->rx_small.mask + 1; | ||
1243 | ring->rx_max_pending = mgp->rx_big.mask + 1; | ||
1244 | ring->rx_jumbo_max_pending = 0; | ||
1245 | ring->tx_max_pending = mgp->rx_small.mask + 1; | ||
1246 | ring->rx_mini_pending = ring->rx_mini_max_pending; | ||
1247 | ring->rx_pending = ring->rx_max_pending; | ||
1248 | ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; | ||
1249 | ring->tx_pending = ring->tx_max_pending; | ||
1250 | } | ||
1251 | |||
1252 | static u32 myri10ge_get_rx_csum(struct net_device *netdev) | ||
1253 | { | ||
1254 | struct myri10ge_priv *mgp = netdev_priv(netdev); | ||
1255 | if (mgp->csum_flag) | ||
1256 | return 1; | ||
1257 | else | ||
1258 | return 0; | ||
1259 | } | ||
1260 | |||
1261 | static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) | ||
1262 | { | ||
1263 | struct myri10ge_priv *mgp = netdev_priv(netdev); | ||
1264 | if (csum_enabled) | ||
1265 | mgp->csum_flag = MXGEFW_FLAGS_CKSUM; | ||
1266 | else | ||
1267 | mgp->csum_flag = 0; | ||
1268 | return 0; | ||
1269 | } | ||
1270 | |||
1271 | static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { | ||
1272 | "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", | ||
1273 | "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", | ||
1274 | "rx_length_errors", "rx_over_errors", "rx_crc_errors", | ||
1275 | "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", | ||
1276 | "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", | ||
1277 | "tx_heartbeat_errors", "tx_window_errors", | ||
1278 | /* device-specific stats */ | ||
1279 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", | ||
1280 | "serial_number", "tx_pkt_start", "tx_pkt_done", | ||
1281 | "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", | ||
1282 | "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized", | ||
1283 | "link_up", "dropped_link_overflow", "dropped_link_error_or_filtered", | ||
1284 | "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", | ||
1285 | "dropped_no_big_buffer" | ||
1286 | }; | ||
1287 | |||
1288 | #define MYRI10GE_NET_STATS_LEN 21 | ||
1289 | #define MYRI10GE_STATS_LEN sizeof(myri10ge_gstrings_stats) / ETH_GSTRING_LEN | ||
1290 | |||
1291 | static void | ||
1292 | myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) | ||
1293 | { | ||
1294 | switch (stringset) { | ||
1295 | case ETH_SS_STATS: | ||
1296 | memcpy(data, *myri10ge_gstrings_stats, | ||
1297 | sizeof(myri10ge_gstrings_stats)); | ||
1298 | break; | ||
1299 | } | ||
1300 | } | ||
1301 | |||
1302 | static int myri10ge_get_stats_count(struct net_device *netdev) | ||
1303 | { | ||
1304 | return MYRI10GE_STATS_LEN; | ||
1305 | } | ||
1306 | |||
1307 | static void | ||
1308 | myri10ge_get_ethtool_stats(struct net_device *netdev, | ||
1309 | struct ethtool_stats *stats, u64 * data) | ||
1310 | { | ||
1311 | struct myri10ge_priv *mgp = netdev_priv(netdev); | ||
1312 | int i; | ||
1313 | |||
1314 | for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) | ||
1315 | data[i] = ((unsigned long *)&mgp->stats)[i]; | ||
1316 | |||
1317 | data[i++] = (unsigned int)mgp->read_dma; | ||
1318 | data[i++] = (unsigned int)mgp->write_dma; | ||
1319 | data[i++] = (unsigned int)mgp->read_write_dma; | ||
1320 | data[i++] = (unsigned int)mgp->serial_number; | ||
1321 | data[i++] = (unsigned int)mgp->tx.pkt_start; | ||
1322 | data[i++] = (unsigned int)mgp->tx.pkt_done; | ||
1323 | data[i++] = (unsigned int)mgp->tx.req; | ||
1324 | data[i++] = (unsigned int)mgp->tx.done; | ||
1325 | data[i++] = (unsigned int)mgp->rx_small.cnt; | ||
1326 | data[i++] = (unsigned int)mgp->rx_big.cnt; | ||
1327 | data[i++] = (unsigned int)mgp->wake_queue; | ||
1328 | data[i++] = (unsigned int)mgp->stop_queue; | ||
1329 | data[i++] = (unsigned int)mgp->watchdog_resets; | ||
1330 | data[i++] = (unsigned int)mgp->tx_linearized; | ||
1331 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up); | ||
1332 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow); | ||
1333 | data[i++] = | ||
1334 | (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered); | ||
1335 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt); | ||
1336 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun); | ||
1337 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer); | ||
1338 | data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer); | ||
1339 | } | ||
1340 | |||
1341 | static struct ethtool_ops myri10ge_ethtool_ops = { | ||
1342 | .get_settings = myri10ge_get_settings, | ||
1343 | .get_drvinfo = myri10ge_get_drvinfo, | ||
1344 | .get_coalesce = myri10ge_get_coalesce, | ||
1345 | .set_coalesce = myri10ge_set_coalesce, | ||
1346 | .get_pauseparam = myri10ge_get_pauseparam, | ||
1347 | .set_pauseparam = myri10ge_set_pauseparam, | ||
1348 | .get_ringparam = myri10ge_get_ringparam, | ||
1349 | .get_rx_csum = myri10ge_get_rx_csum, | ||
1350 | .set_rx_csum = myri10ge_set_rx_csum, | ||
1351 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
1352 | .set_tx_csum = ethtool_op_set_tx_csum, | ||
1353 | .get_sg = ethtool_op_get_sg, | ||
1354 | .set_sg = ethtool_op_set_sg, | ||
1355 | #ifdef NETIF_F_TSO | ||
1356 | .get_tso = ethtool_op_get_tso, | ||
1357 | .set_tso = ethtool_op_set_tso, | ||
1358 | #endif | ||
1359 | .get_strings = myri10ge_get_strings, | ||
1360 | .get_stats_count = myri10ge_get_stats_count, | ||
1361 | .get_ethtool_stats = myri10ge_get_ethtool_stats | ||
1362 | }; | ||
1363 | |||
1364 | static int myri10ge_allocate_rings(struct net_device *dev) | ||
1365 | { | ||
1366 | struct myri10ge_priv *mgp; | ||
1367 | struct myri10ge_cmd cmd; | ||
1368 | int tx_ring_size, rx_ring_size; | ||
1369 | int tx_ring_entries, rx_ring_entries; | ||
1370 | int i, status; | ||
1371 | size_t bytes; | ||
1372 | |||
1373 | mgp = netdev_priv(dev); | ||
1374 | |||
1375 | /* get ring sizes */ | ||
1376 | |||
1377 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); | ||
1378 | tx_ring_size = cmd.data0; | ||
1379 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); | ||
1380 | rx_ring_size = cmd.data0; | ||
1381 | |||
1382 | tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); | ||
1383 | rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); | ||
1384 | mgp->tx.mask = tx_ring_entries - 1; | ||
1385 | mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1; | ||
1386 | |||
1387 | /* allocate the host shadow rings */ | ||
1388 | |||
1389 | bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) | ||
1390 | * sizeof(*mgp->tx.req_list); | ||
1391 | mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); | ||
1392 | if (mgp->tx.req_bytes == NULL) | ||
1393 | goto abort_with_nothing; | ||
1394 | |||
1395 | /* ensure req_list entries are aligned to 8 bytes */ | ||
1396 | mgp->tx.req_list = (struct mcp_kreq_ether_send *) | ||
1397 | ALIGN((unsigned long)mgp->tx.req_bytes, 8); | ||
1398 | |||
1399 | bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow); | ||
1400 | mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); | ||
1401 | if (mgp->rx_small.shadow == NULL) | ||
1402 | goto abort_with_tx_req_bytes; | ||
1403 | |||
1404 | bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow); | ||
1405 | mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); | ||
1406 | if (mgp->rx_big.shadow == NULL) | ||
1407 | goto abort_with_rx_small_shadow; | ||
1408 | |||
1409 | /* allocate the host info rings */ | ||
1410 | |||
1411 | bytes = tx_ring_entries * sizeof(*mgp->tx.info); | ||
1412 | mgp->tx.info = kzalloc(bytes, GFP_KERNEL); | ||
1413 | if (mgp->tx.info == NULL) | ||
1414 | goto abort_with_rx_big_shadow; | ||
1415 | |||
1416 | bytes = rx_ring_entries * sizeof(*mgp->rx_small.info); | ||
1417 | mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL); | ||
1418 | if (mgp->rx_small.info == NULL) | ||
1419 | goto abort_with_tx_info; | ||
1420 | |||
1421 | bytes = rx_ring_entries * sizeof(*mgp->rx_big.info); | ||
1422 | mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL); | ||
1423 | if (mgp->rx_big.info == NULL) | ||
1424 | goto abort_with_rx_small_info; | ||
1425 | |||
1426 | /* Fill the receive rings */ | ||
1427 | |||
1428 | for (i = 0; i <= mgp->rx_small.mask; i++) { | ||
1429 | status = myri10ge_getbuf(&mgp->rx_small, mgp->pdev, | ||
1430 | mgp->small_bytes, i); | ||
1431 | if (status) { | ||
1432 | printk(KERN_ERR | ||
1433 | "myri10ge: %s: alloced only %d small bufs\n", | ||
1434 | dev->name, i); | ||
1435 | goto abort_with_rx_small_ring; | ||
1436 | } | ||
1437 | } | ||
1438 | |||
1439 | for (i = 0; i <= mgp->rx_big.mask; i++) { | ||
1440 | status = | ||
1441 | myri10ge_getbuf(&mgp->rx_big, mgp->pdev, | ||
1442 | dev->mtu + ETH_HLEN, i); | ||
1443 | if (status) { | ||
1444 | printk(KERN_ERR | ||
1445 | "myri10ge: %s: alloced only %d big bufs\n", | ||
1446 | dev->name, i); | ||
1447 | goto abort_with_rx_big_ring; | ||
1448 | } | ||
1449 | } | ||
1450 | |||
1451 | return 0; | ||
1452 | |||
1453 | abort_with_rx_big_ring: | ||
1454 | for (i = 0; i <= mgp->rx_big.mask; i++) { | ||
1455 | if (mgp->rx_big.info[i].skb != NULL) | ||
1456 | dev_kfree_skb_any(mgp->rx_big.info[i].skb); | ||
1457 | if (pci_unmap_len(&mgp->rx_big.info[i], len)) | ||
1458 | pci_unmap_single(mgp->pdev, | ||
1459 | pci_unmap_addr(&mgp->rx_big.info[i], | ||
1460 | bus), | ||
1461 | pci_unmap_len(&mgp->rx_big.info[i], | ||
1462 | len), | ||
1463 | PCI_DMA_FROMDEVICE); | ||
1464 | } | ||
1465 | |||
1466 | abort_with_rx_small_ring: | ||
1467 | for (i = 0; i <= mgp->rx_small.mask; i++) { | ||
1468 | if (mgp->rx_small.info[i].skb != NULL) | ||
1469 | dev_kfree_skb_any(mgp->rx_small.info[i].skb); | ||
1470 | if (pci_unmap_len(&mgp->rx_small.info[i], len)) | ||
1471 | pci_unmap_single(mgp->pdev, | ||
1472 | pci_unmap_addr(&mgp->rx_small.info[i], | ||
1473 | bus), | ||
1474 | pci_unmap_len(&mgp->rx_small.info[i], | ||
1475 | len), | ||
1476 | PCI_DMA_FROMDEVICE); | ||
1477 | } | ||
1478 | kfree(mgp->rx_big.info); | ||
1479 | |||
1480 | abort_with_rx_small_info: | ||
1481 | kfree(mgp->rx_small.info); | ||
1482 | |||
1483 | abort_with_tx_info: | ||
1484 | kfree(mgp->tx.info); | ||
1485 | |||
1486 | abort_with_rx_big_shadow: | ||
1487 | kfree(mgp->rx_big.shadow); | ||
1488 | |||
1489 | abort_with_rx_small_shadow: | ||
1490 | kfree(mgp->rx_small.shadow); | ||
1491 | |||
1492 | abort_with_tx_req_bytes: | ||
1493 | kfree(mgp->tx.req_bytes); | ||
1494 | mgp->tx.req_bytes = NULL; | ||
1495 | mgp->tx.req_list = NULL; | ||
1496 | |||
1497 | abort_with_nothing: | ||
1498 | return status; | ||
1499 | } | ||
1500 | |||
1501 | static void myri10ge_free_rings(struct net_device *dev) | ||
1502 | { | ||
1503 | struct myri10ge_priv *mgp; | ||
1504 | struct sk_buff *skb; | ||
1505 | struct myri10ge_tx_buf *tx; | ||
1506 | int i, len, idx; | ||
1507 | |||
1508 | mgp = netdev_priv(dev); | ||
1509 | |||
1510 | for (i = 0; i <= mgp->rx_big.mask; i++) { | ||
1511 | if (mgp->rx_big.info[i].skb != NULL) | ||
1512 | dev_kfree_skb_any(mgp->rx_big.info[i].skb); | ||
1513 | if (pci_unmap_len(&mgp->rx_big.info[i], len)) | ||
1514 | pci_unmap_single(mgp->pdev, | ||
1515 | pci_unmap_addr(&mgp->rx_big.info[i], | ||
1516 | bus), | ||
1517 | pci_unmap_len(&mgp->rx_big.info[i], | ||
1518 | len), | ||
1519 | PCI_DMA_FROMDEVICE); | ||
1520 | } | ||
1521 | |||
1522 | for (i = 0; i <= mgp->rx_small.mask; i++) { | ||
1523 | if (mgp->rx_small.info[i].skb != NULL) | ||
1524 | dev_kfree_skb_any(mgp->rx_small.info[i].skb); | ||
1525 | if (pci_unmap_len(&mgp->rx_small.info[i], len)) | ||
1526 | pci_unmap_single(mgp->pdev, | ||
1527 | pci_unmap_addr(&mgp->rx_small.info[i], | ||
1528 | bus), | ||
1529 | pci_unmap_len(&mgp->rx_small.info[i], | ||
1530 | len), | ||
1531 | PCI_DMA_FROMDEVICE); | ||
1532 | } | ||
1533 | |||
1534 | tx = &mgp->tx; | ||
1535 | while (tx->done != tx->req) { | ||
1536 | idx = tx->done & tx->mask; | ||
1537 | skb = tx->info[idx].skb; | ||
1538 | |||
1539 | /* Mark as free */ | ||
1540 | tx->info[idx].skb = NULL; | ||
1541 | tx->done++; | ||
1542 | len = pci_unmap_len(&tx->info[idx], len); | ||
1543 | pci_unmap_len_set(&tx->info[idx], len, 0); | ||
1544 | if (skb) { | ||
1545 | mgp->stats.tx_dropped++; | ||
1546 | dev_kfree_skb_any(skb); | ||
1547 | if (len) | ||
1548 | pci_unmap_single(mgp->pdev, | ||
1549 | pci_unmap_addr(&tx->info[idx], | ||
1550 | bus), len, | ||
1551 | PCI_DMA_TODEVICE); | ||
1552 | } else { | ||
1553 | if (len) | ||
1554 | pci_unmap_page(mgp->pdev, | ||
1555 | pci_unmap_addr(&tx->info[idx], | ||
1556 | bus), len, | ||
1557 | PCI_DMA_TODEVICE); | ||
1558 | } | ||
1559 | } | ||
1560 | kfree(mgp->rx_big.info); | ||
1561 | |||
1562 | kfree(mgp->rx_small.info); | ||
1563 | |||
1564 | kfree(mgp->tx.info); | ||
1565 | |||
1566 | kfree(mgp->rx_big.shadow); | ||
1567 | |||
1568 | kfree(mgp->rx_small.shadow); | ||
1569 | |||
1570 | kfree(mgp->tx.req_bytes); | ||
1571 | mgp->tx.req_bytes = NULL; | ||
1572 | mgp->tx.req_list = NULL; | ||
1573 | } | ||
1574 | |||
1575 | static int myri10ge_open(struct net_device *dev) | ||
1576 | { | ||
1577 | struct myri10ge_priv *mgp; | ||
1578 | struct myri10ge_cmd cmd; | ||
1579 | int status, big_pow2; | ||
1580 | |||
1581 | mgp = netdev_priv(dev); | ||
1582 | |||
1583 | if (mgp->running != MYRI10GE_ETH_STOPPED) | ||
1584 | return -EBUSY; | ||
1585 | |||
1586 | mgp->running = MYRI10GE_ETH_STARTING; | ||
1587 | status = myri10ge_reset(mgp); | ||
1588 | if (status != 0) { | ||
1589 | printk(KERN_ERR "myri10ge: %s: failed reset\n", dev->name); | ||
1590 | mgp->running = MYRI10GE_ETH_STOPPED; | ||
1591 | return -ENXIO; | ||
1592 | } | ||
1593 | |||
1594 | /* decide what small buffer size to use. For good TCP rx | ||
1595 | * performance, it is important to not receive 1514 byte | ||
1596 | * frames into jumbo buffers, as it confuses the socket buffer | ||
1597 | * accounting code, leading to drops and erratic performance. | ||
1598 | */ | ||
1599 | |||
1600 | if (dev->mtu <= ETH_DATA_LEN) | ||
1601 | mgp->small_bytes = 128; /* enough for a TCP header */ | ||
1602 | else | ||
1603 | mgp->small_bytes = ETH_FRAME_LEN; /* enough for an ETH_DATA_LEN frame */ | ||
1604 | |||
1605 | /* Override the small buffer size? */ | ||
1606 | if (myri10ge_small_bytes > 0) | ||
1607 | mgp->small_bytes = myri10ge_small_bytes; | ||
1608 | |||
1609 | /* If the user sets an obscenely small MTU, adjust the small | ||
1610 | * bytes down to nearly nothing */ | ||
1611 | if (mgp->small_bytes >= (dev->mtu + ETH_HLEN)) | ||
1612 | mgp->small_bytes = 64; | ||
1613 | |||
1614 | /* get the lanai pointers to the send and receive rings */ | ||
1615 | |||
1616 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); | ||
1617 | mgp->tx.lanai = | ||
1618 | (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0); | ||
1619 | |||
1620 | status |= | ||
1621 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0); | ||
1622 | mgp->rx_small.lanai = | ||
1623 | (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); | ||
1624 | |||
1625 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); | ||
1626 | mgp->rx_big.lanai = | ||
1627 | (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); | ||
1628 | |||
1629 | if (status != 0) { | ||
1630 | printk(KERN_ERR | ||
1631 | "myri10ge: %s: failed to get ring sizes or locations\n", | ||
1632 | dev->name); | ||
1633 | mgp->running = MYRI10GE_ETH_STOPPED; | ||
1634 | return -ENXIO; | ||
1635 | } | ||
1636 | |||
1637 | if (mgp->mtrr >= 0) { | ||
1638 | mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + 0x200000; | ||
1639 | mgp->rx_small.wc_fifo = (u8 __iomem *) mgp->sram + 0x300000; | ||
1640 | mgp->rx_big.wc_fifo = (u8 __iomem *) mgp->sram + 0x340000; | ||
1641 | } else { | ||
1642 | mgp->tx.wc_fifo = NULL; | ||
1643 | mgp->rx_small.wc_fifo = NULL; | ||
1644 | mgp->rx_big.wc_fifo = NULL; | ||
1645 | } | ||
1646 | |||
1647 | status = myri10ge_allocate_rings(dev); | ||
1648 | if (status != 0) | ||
1649 | goto abort_with_nothing; | ||
1650 | |||
1651 | /* Firmware needs the big buff size as a power of 2. Lie and | ||
1652 | * tell him the buffer is larger, because we only use 1 | ||
1653 | * buffer/pkt, and the mtu will prevent overruns. | ||
1654 | */ | ||
1655 | big_pow2 = dev->mtu + ETH_HLEN + MXGEFW_PAD; | ||
1656 | while ((big_pow2 & (big_pow2 - 1)) != 0) | ||
1657 | big_pow2++; | ||
1658 | |||
1659 | /* now give firmware buffers sizes, and MTU */ | ||
1660 | cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; | ||
1661 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0); | ||
1662 | cmd.data0 = mgp->small_bytes; | ||
1663 | status |= | ||
1664 | myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0); | ||
1665 | cmd.data0 = big_pow2; | ||
1666 | status |= | ||
1667 | myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0); | ||
1668 | if (status) { | ||
1669 | printk(KERN_ERR "myri10ge: %s: Couldn't set buffer sizes\n", | ||
1670 | dev->name); | ||
1671 | goto abort_with_rings; | ||
1672 | } | ||
1673 | |||
1674 | cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus); | ||
1675 | cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus); | ||
1676 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA, &cmd, 0); | ||
1677 | if (status) { | ||
1678 | printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n", | ||
1679 | dev->name); | ||
1680 | goto abort_with_rings; | ||
1681 | } | ||
1682 | |||
1683 | mgp->link_state = -1; | ||
1684 | mgp->rdma_tags_available = 15; | ||
1685 | |||
1686 | netif_poll_enable(mgp->dev); /* must happen prior to any irq */ | ||
1687 | |||
1688 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); | ||
1689 | if (status) { | ||
1690 | printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n", | ||
1691 | dev->name); | ||
1692 | goto abort_with_rings; | ||
1693 | } | ||
1694 | |||
1695 | mgp->wake_queue = 0; | ||
1696 | mgp->stop_queue = 0; | ||
1697 | mgp->running = MYRI10GE_ETH_RUNNING; | ||
1698 | mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; | ||
1699 | add_timer(&mgp->watchdog_timer); | ||
1700 | netif_wake_queue(dev); | ||
1701 | return 0; | ||
1702 | |||
1703 | abort_with_rings: | ||
1704 | myri10ge_free_rings(dev); | ||
1705 | |||
1706 | abort_with_nothing: | ||
1707 | mgp->running = MYRI10GE_ETH_STOPPED; | ||
1708 | return -ENOMEM; | ||
1709 | } | ||
1710 | |||
1711 | static int myri10ge_close(struct net_device *dev) | ||
1712 | { | ||
1713 | struct myri10ge_priv *mgp; | ||
1714 | struct myri10ge_cmd cmd; | ||
1715 | int status, old_down_cnt; | ||
1716 | |||
1717 | mgp = netdev_priv(dev); | ||
1718 | |||
1719 | if (mgp->running != MYRI10GE_ETH_RUNNING) | ||
1720 | return 0; | ||
1721 | |||
1722 | if (mgp->tx.req_bytes == NULL) | ||
1723 | return 0; | ||
1724 | |||
1725 | del_timer_sync(&mgp->watchdog_timer); | ||
1726 | mgp->running = MYRI10GE_ETH_STOPPING; | ||
1727 | netif_poll_disable(mgp->dev); | ||
1728 | netif_carrier_off(dev); | ||
1729 | netif_stop_queue(dev); | ||
1730 | old_down_cnt = mgp->down_cnt; | ||
1731 | mb(); | ||
1732 | status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0); | ||
1733 | if (status) | ||
1734 | printk(KERN_ERR "myri10ge: %s: Couldn't bring down link\n", | ||
1735 | dev->name); | ||
1736 | |||
1737 | wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt, HZ); | ||
1738 | if (old_down_cnt == mgp->down_cnt) | ||
1739 | printk(KERN_ERR "myri10ge: %s never got down irq\n", dev->name); | ||
1740 | |||
1741 | netif_tx_disable(dev); | ||
1742 | |||
1743 | myri10ge_free_rings(dev); | ||
1744 | |||
1745 | mgp->running = MYRI10GE_ETH_STOPPED; | ||
1746 | return 0; | ||
1747 | } | ||
1748 | |||
1749 | /* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy | ||
1750 | * backwards one at a time and handle ring wraps */ | ||
1751 | |||
1752 | static inline void | ||
1753 | myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx, | ||
1754 | struct mcp_kreq_ether_send *src, int cnt) | ||
1755 | { | ||
1756 | int idx, starting_slot; | ||
1757 | starting_slot = tx->req; | ||
1758 | while (cnt > 1) { | ||
1759 | cnt--; | ||
1760 | idx = (starting_slot + cnt) & tx->mask; | ||
1761 | myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src)); | ||
1762 | mb(); | ||
1763 | } | ||
1764 | } | ||
1765 | |||
1766 | /* | ||
1767 | * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy | ||
1768 | * at most 32 bytes at a time, so as to avoid involving the software | ||
1769 | * pio handler in the nic. We re-write the first segment's flags | ||
1770 | * to mark them valid only after writing the entire chain. | ||
1771 | */ | ||
1772 | |||
1773 | static inline void | ||
1774 | myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src, | ||
1775 | int cnt) | ||
1776 | { | ||
1777 | int idx, i; | ||
1778 | struct mcp_kreq_ether_send __iomem *dstp, *dst; | ||
1779 | struct mcp_kreq_ether_send *srcp; | ||
1780 | u8 last_flags; | ||
1781 | |||
1782 | idx = tx->req & tx->mask; | ||
1783 | |||
1784 | last_flags = src->flags; | ||
1785 | src->flags = 0; | ||
1786 | mb(); | ||
1787 | dst = dstp = &tx->lanai[idx]; | ||
1788 | srcp = src; | ||
1789 | |||
1790 | if ((idx + cnt) < tx->mask) { | ||
1791 | for (i = 0; i < (cnt - 1); i += 2) { | ||
1792 | myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src)); | ||
1793 | mb(); /* force write every 32 bytes */ | ||
1794 | srcp += 2; | ||
1795 | dstp += 2; | ||
1796 | } | ||
1797 | } else { | ||
1798 | /* submit all but the first request, and ensure | ||
1799 | * that it is submitted below */ | ||
1800 | myri10ge_submit_req_backwards(tx, src, cnt); | ||
1801 | i = 0; | ||
1802 | } | ||
1803 | if (i < cnt) { | ||
1804 | /* submit the first request */ | ||
1805 | myri10ge_pio_copy(dstp, srcp, sizeof(*src)); | ||
1806 | mb(); /* barrier before setting valid flag */ | ||
1807 | } | ||
1808 | |||
1809 | /* re-write the last 32-bits with the valid flags */ | ||
1810 | src->flags = last_flags; | ||
1811 | __raw_writel(*((u32 *) src + 3), (u32 __iomem *) dst + 3); | ||
1812 | tx->req += cnt; | ||
1813 | mb(); | ||
1814 | } | ||
1815 | |||
1816 | static inline void | ||
1817 | myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx, | ||
1818 | struct mcp_kreq_ether_send *src, int cnt) | ||
1819 | { | ||
1820 | tx->req += cnt; | ||
1821 | mb(); | ||
1822 | while (cnt >= 4) { | ||
1823 | myri10ge_pio_copy(tx->wc_fifo, src, 64); | ||
1824 | mb(); | ||
1825 | src += 4; | ||
1826 | cnt -= 4; | ||
1827 | } | ||
1828 | if (cnt > 0) { | ||
1829 | /* pad it to 64 bytes. The src is 64 bytes bigger than it | ||
1830 | * needs to be so that we don't overrun it */ | ||
1831 | myri10ge_pio_copy(tx->wc_fifo + (cnt << 18), src, 64); | ||
1832 | mb(); | ||
1833 | } | ||
1834 | } | ||
1835 | |||
1836 | /* | ||
1837 | * Transmit a packet. We need to split the packet so that a single | ||
1838 | * segment does not cross myri10ge->tx.boundary, so this makes segment | ||
1839 | * counting tricky. So rather than try to count segments up front, we | ||
1840 | * just give up if there are too few segments to hold a reasonably | ||
1841 | * fragmented packet currently available. If we run | ||
1842 | * out of segments while preparing a packet for DMA, we just linearize | ||
1843 | * it and try again. | ||
1844 | */ | ||
1845 | |||
1846 | static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1847 | { | ||
1848 | struct myri10ge_priv *mgp = netdev_priv(dev); | ||
1849 | struct mcp_kreq_ether_send *req; | ||
1850 | struct myri10ge_tx_buf *tx = &mgp->tx; | ||
1851 | struct skb_frag_struct *frag; | ||
1852 | dma_addr_t bus; | ||
1853 | u32 low, high_swapped; | ||
1854 | unsigned int len; | ||
1855 | int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments; | ||
1856 | u16 pseudo_hdr_offset, cksum_offset; | ||
1857 | int cum_len, seglen, boundary, rdma_count; | ||
1858 | u8 flags, odd_flag; | ||
1859 | |||
1860 | again: | ||
1861 | req = tx->req_list; | ||
1862 | avail = tx->mask - 1 - (tx->req - tx->done); | ||
1863 | |||
1864 | mss = 0; | ||
1865 | max_segments = MXGEFW_MAX_SEND_DESC; | ||
1866 | |||
1867 | #ifdef NETIF_F_TSO | ||
1868 | if (skb->len > (dev->mtu + ETH_HLEN)) { | ||
1869 | mss = skb_shinfo(skb)->tso_size; | ||
1870 | if (mss != 0) | ||
1871 | max_segments = MYRI10GE_MAX_SEND_DESC_TSO; | ||
1872 | } | ||
1873 | #endif /*NETIF_F_TSO */ | ||
1874 | |||
1875 | if ((unlikely(avail < max_segments))) { | ||
1876 | /* we are out of transmit resources */ | ||
1877 | mgp->stop_queue++; | ||
1878 | netif_stop_queue(dev); | ||
1879 | return 1; | ||
1880 | } | ||
1881 | |||
1882 | /* Setup checksum offloading, if needed */ | ||
1883 | cksum_offset = 0; | ||
1884 | pseudo_hdr_offset = 0; | ||
1885 | odd_flag = 0; | ||
1886 | flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); | ||
1887 | if (likely(skb->ip_summed == CHECKSUM_HW)) { | ||
1888 | cksum_offset = (skb->h.raw - skb->data); | ||
1889 | pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data; | ||
1890 | /* If the headers are excessively large, then we must | ||
1891 | * fall back to a software checksum */ | ||
1892 | if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) { | ||
1893 | if (skb_checksum_help(skb, 0)) | ||
1894 | goto drop; | ||
1895 | cksum_offset = 0; | ||
1896 | pseudo_hdr_offset = 0; | ||
1897 | } else { | ||
1898 | pseudo_hdr_offset = htons(pseudo_hdr_offset); | ||
1899 | odd_flag = MXGEFW_FLAGS_ALIGN_ODD; | ||
1900 | flags |= MXGEFW_FLAGS_CKSUM; | ||
1901 | } | ||
1902 | } | ||
1903 | |||
1904 | cum_len = 0; | ||
1905 | |||
1906 | #ifdef NETIF_F_TSO | ||
1907 | if (mss) { /* TSO */ | ||
1908 | /* this removes any CKSUM flag from before */ | ||
1909 | flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST); | ||
1910 | |||
1911 | /* negative cum_len signifies to the | ||
1912 | * send loop that we are still in the | ||
1913 | * header portion of the TSO packet. | ||
1914 | * TSO header must be at most 134 bytes long */ | ||
1915 | cum_len = -((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | ||
1916 | |||
1917 | /* for TSO, pseudo_hdr_offset holds mss. | ||
1918 | * The firmware figures out where to put | ||
1919 | * the checksum by parsing the header. */ | ||
1920 | pseudo_hdr_offset = htons(mss); | ||
1921 | } else | ||
1922 | #endif /*NETIF_F_TSO */ | ||
1923 | /* Mark small packets, and pad out tiny packets */ | ||
1924 | if (skb->len <= MXGEFW_SEND_SMALL_SIZE) { | ||
1925 | flags |= MXGEFW_FLAGS_SMALL; | ||
1926 | |||
1927 | /* pad frames to at least ETH_ZLEN bytes */ | ||
1928 | if (unlikely(skb->len < ETH_ZLEN)) { | ||
1929 | skb = skb_padto(skb, ETH_ZLEN); | ||
1930 | if (skb == NULL) { | ||
1931 | /* The packet is gone, so we must | ||
1932 | * return 0 */ | ||
1933 | mgp->stats.tx_dropped += 1; | ||
1934 | return 0; | ||
1935 | } | ||
1936 | /* adjust the len to account for the zero pad | ||
1937 | * so that the nic can know how long it is */ | ||
1938 | skb->len = ETH_ZLEN; | ||
1939 | } | ||
1940 | } | ||
1941 | |||
1942 | /* map the skb for DMA */ | ||
1943 | len = skb->len - skb->data_len; | ||
1944 | idx = tx->req & tx->mask; | ||
1945 | tx->info[idx].skb = skb; | ||
1946 | bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
1947 | pci_unmap_addr_set(&tx->info[idx], bus, bus); | ||
1948 | pci_unmap_len_set(&tx->info[idx], len, len); | ||
1949 | |||
1950 | frag_cnt = skb_shinfo(skb)->nr_frags; | ||
1951 | frag_idx = 0; | ||
1952 | count = 0; | ||
1953 | rdma_count = 0; | ||
1954 | |||
1955 | /* "rdma_count" is the number of RDMAs belonging to the | ||
1956 | * current packet BEFORE the current send request. For | ||
1957 | * non-TSO packets, this is equal to "count". | ||
1958 | * For TSO packets, rdma_count needs to be reset | ||
1959 | * to 0 after a segment cut. | ||
1960 | * | ||
1961 | * The rdma_count field of the send request is | ||
1962 | * the number of RDMAs of the packet starting at | ||
1963 | * that request. For TSO send requests with one ore more cuts | ||
1964 | * in the middle, this is the number of RDMAs starting | ||
1965 | * after the last cut in the request. All previous | ||
1966 | * segments before the last cut implicitly have 1 RDMA. | ||
1967 | * | ||
1968 | * Since the number of RDMAs is not known beforehand, | ||
1969 | * it must be filled-in retroactively - after each | ||
1970 | * segmentation cut or at the end of the entire packet. | ||
1971 | */ | ||
1972 | |||
1973 | while (1) { | ||
1974 | /* Break the SKB or Fragment up into pieces which | ||
1975 | * do not cross mgp->tx.boundary */ | ||
1976 | low = MYRI10GE_LOWPART_TO_U32(bus); | ||
1977 | high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); | ||
1978 | while (len) { | ||
1979 | u8 flags_next; | ||
1980 | int cum_len_next; | ||
1981 | |||
1982 | if (unlikely(count == max_segments)) | ||
1983 | goto abort_linearize; | ||
1984 | |||
1985 | boundary = (low + tx->boundary) & ~(tx->boundary - 1); | ||
1986 | seglen = boundary - low; | ||
1987 | if (seglen > len) | ||
1988 | seglen = len; | ||
1989 | flags_next = flags & ~MXGEFW_FLAGS_FIRST; | ||
1990 | cum_len_next = cum_len + seglen; | ||
1991 | #ifdef NETIF_F_TSO | ||
1992 | if (mss) { /* TSO */ | ||
1993 | (req - rdma_count)->rdma_count = rdma_count + 1; | ||
1994 | |||
1995 | if (likely(cum_len >= 0)) { /* payload */ | ||
1996 | int next_is_first, chop; | ||
1997 | |||
1998 | chop = (cum_len_next > mss); | ||
1999 | cum_len_next = cum_len_next % mss; | ||
2000 | next_is_first = (cum_len_next == 0); | ||
2001 | flags |= chop * MXGEFW_FLAGS_TSO_CHOP; | ||
2002 | flags_next |= next_is_first * | ||
2003 | MXGEFW_FLAGS_FIRST; | ||
2004 | rdma_count |= -(chop | next_is_first); | ||
2005 | rdma_count += chop & !next_is_first; | ||
2006 | } else if (likely(cum_len_next >= 0)) { /* header ends */ | ||
2007 | int small; | ||
2008 | |||
2009 | rdma_count = -1; | ||
2010 | cum_len_next = 0; | ||
2011 | seglen = -cum_len; | ||
2012 | small = (mss <= MXGEFW_SEND_SMALL_SIZE); | ||
2013 | flags_next = MXGEFW_FLAGS_TSO_PLD | | ||
2014 | MXGEFW_FLAGS_FIRST | | ||
2015 | (small * MXGEFW_FLAGS_SMALL); | ||
2016 | } | ||
2017 | } | ||
2018 | #endif /* NETIF_F_TSO */ | ||
2019 | req->addr_high = high_swapped; | ||
2020 | req->addr_low = htonl(low); | ||
2021 | req->pseudo_hdr_offset = pseudo_hdr_offset; | ||
2022 | req->pad = 0; /* complete solid 16-byte block; does this matter? */ | ||
2023 | req->rdma_count = 1; | ||
2024 | req->length = htons(seglen); | ||
2025 | req->cksum_offset = cksum_offset; | ||
2026 | req->flags = flags | ((cum_len & 1) * odd_flag); | ||
2027 | |||
2028 | low += seglen; | ||
2029 | len -= seglen; | ||
2030 | cum_len = cum_len_next; | ||
2031 | flags = flags_next; | ||
2032 | req++; | ||
2033 | count++; | ||
2034 | rdma_count++; | ||
2035 | if (unlikely(cksum_offset > seglen)) | ||
2036 | cksum_offset -= seglen; | ||
2037 | else | ||
2038 | cksum_offset = 0; | ||
2039 | } | ||
2040 | if (frag_idx == frag_cnt) | ||
2041 | break; | ||
2042 | |||
2043 | /* map next fragment for DMA */ | ||
2044 | idx = (count + tx->req) & tx->mask; | ||
2045 | frag = &skb_shinfo(skb)->frags[frag_idx]; | ||
2046 | frag_idx++; | ||
2047 | len = frag->size; | ||
2048 | bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset, | ||
2049 | len, PCI_DMA_TODEVICE); | ||
2050 | pci_unmap_addr_set(&tx->info[idx], bus, bus); | ||
2051 | pci_unmap_len_set(&tx->info[idx], len, len); | ||
2052 | } | ||
2053 | |||
2054 | (req - rdma_count)->rdma_count = rdma_count; | ||
2055 | #ifdef NETIF_F_TSO | ||
2056 | if (mss) | ||
2057 | do { | ||
2058 | req--; | ||
2059 | req->flags |= MXGEFW_FLAGS_TSO_LAST; | ||
2060 | } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | | ||
2061 | MXGEFW_FLAGS_FIRST))); | ||
2062 | #endif | ||
2063 | idx = ((count - 1) + tx->req) & tx->mask; | ||
2064 | tx->info[idx].last = 1; | ||
2065 | if (tx->wc_fifo == NULL) | ||
2066 | myri10ge_submit_req(tx, tx->req_list, count); | ||
2067 | else | ||
2068 | myri10ge_submit_req_wc(tx, tx->req_list, count); | ||
2069 | tx->pkt_start++; | ||
2070 | if ((avail - count) < MXGEFW_MAX_SEND_DESC) { | ||
2071 | mgp->stop_queue++; | ||
2072 | netif_stop_queue(dev); | ||
2073 | } | ||
2074 | dev->trans_start = jiffies; | ||
2075 | return 0; | ||
2076 | |||
2077 | abort_linearize: | ||
2078 | /* Free any DMA resources we've alloced and clear out the skb | ||
2079 | * slot so as to not trip up assertions, and to avoid a | ||
2080 | * double-free if linearizing fails */ | ||
2081 | |||
2082 | last_idx = (idx + 1) & tx->mask; | ||
2083 | idx = tx->req & tx->mask; | ||
2084 | tx->info[idx].skb = NULL; | ||
2085 | do { | ||
2086 | len = pci_unmap_len(&tx->info[idx], len); | ||
2087 | if (len) { | ||
2088 | if (tx->info[idx].skb != NULL) | ||
2089 | pci_unmap_single(mgp->pdev, | ||
2090 | pci_unmap_addr(&tx->info[idx], | ||
2091 | bus), len, | ||
2092 | PCI_DMA_TODEVICE); | ||
2093 | else | ||
2094 | pci_unmap_page(mgp->pdev, | ||
2095 | pci_unmap_addr(&tx->info[idx], | ||
2096 | bus), len, | ||
2097 | PCI_DMA_TODEVICE); | ||
2098 | pci_unmap_len_set(&tx->info[idx], len, 0); | ||
2099 | tx->info[idx].skb = NULL; | ||
2100 | } | ||
2101 | idx = (idx + 1) & tx->mask; | ||
2102 | } while (idx != last_idx); | ||
2103 | if (skb_shinfo(skb)->tso_size) { | ||
2104 | printk(KERN_ERR | ||
2105 | "myri10ge: %s: TSO but wanted to linearize?!?!?\n", | ||
2106 | mgp->dev->name); | ||
2107 | goto drop; | ||
2108 | } | ||
2109 | |||
2110 | if (skb_linearize(skb, GFP_ATOMIC)) | ||
2111 | goto drop; | ||
2112 | |||
2113 | mgp->tx_linearized++; | ||
2114 | goto again; | ||
2115 | |||
2116 | drop: | ||
2117 | dev_kfree_skb_any(skb); | ||
2118 | mgp->stats.tx_dropped += 1; | ||
2119 | return 0; | ||
2120 | |||
2121 | } | ||
2122 | |||
2123 | static struct net_device_stats *myri10ge_get_stats(struct net_device *dev) | ||
2124 | { | ||
2125 | struct myri10ge_priv *mgp = netdev_priv(dev); | ||
2126 | return &mgp->stats; | ||
2127 | } | ||
2128 | |||
2129 | static void myri10ge_set_multicast_list(struct net_device *dev) | ||
2130 | { | ||
2131 | /* can be called from atomic contexts, | ||
2132 | * pass 1 to force atomicity in myri10ge_send_cmd() */ | ||
2133 | myri10ge_change_promisc(netdev_priv(dev), dev->flags & IFF_PROMISC, 1); | ||
2134 | } | ||
2135 | |||
2136 | static int myri10ge_set_mac_address(struct net_device *dev, void *addr) | ||
2137 | { | ||
2138 | struct sockaddr *sa = addr; | ||
2139 | struct myri10ge_priv *mgp = netdev_priv(dev); | ||
2140 | int status; | ||
2141 | |||
2142 | if (!is_valid_ether_addr(sa->sa_data)) | ||
2143 | return -EADDRNOTAVAIL; | ||
2144 | |||
2145 | status = myri10ge_update_mac_address(mgp, sa->sa_data); | ||
2146 | if (status != 0) { | ||
2147 | printk(KERN_ERR | ||
2148 | "myri10ge: %s: changing mac address failed with %d\n", | ||
2149 | dev->name, status); | ||
2150 | return status; | ||
2151 | } | ||
2152 | |||
2153 | /* change the dev structure */ | ||
2154 | memcpy(dev->dev_addr, sa->sa_data, 6); | ||
2155 | return 0; | ||
2156 | } | ||
2157 | |||
2158 | static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) | ||
2159 | { | ||
2160 | struct myri10ge_priv *mgp = netdev_priv(dev); | ||
2161 | int error = 0; | ||
2162 | |||
2163 | if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) { | ||
2164 | printk(KERN_ERR "myri10ge: %s: new mtu (%d) is not valid\n", | ||
2165 | dev->name, new_mtu); | ||
2166 | return -EINVAL; | ||
2167 | } | ||
2168 | printk(KERN_INFO "%s: changing mtu from %d to %d\n", | ||
2169 | dev->name, dev->mtu, new_mtu); | ||
2170 | if (mgp->running) { | ||
2171 | /* if we change the mtu on an active device, we must | ||
2172 | * reset the device so the firmware sees the change */ | ||
2173 | myri10ge_close(dev); | ||
2174 | dev->mtu = new_mtu; | ||
2175 | myri10ge_open(dev); | ||
2176 | } else | ||
2177 | dev->mtu = new_mtu; | ||
2178 | |||
2179 | return error; | ||
2180 | } | ||
2181 | |||
2182 | /* | ||
2183 | * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary. | ||
2184 | * Only do it if the bridge is a root port since we don't want to disturb | ||
2185 | * any other device, except if forced with myri10ge_ecrc_enable > 1. | ||
2186 | */ | ||
2187 | |||
2188 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE 0x005d | ||
2189 | |||
2190 | static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) | ||
2191 | { | ||
2192 | struct pci_dev *bridge = mgp->pdev->bus->self; | ||
2193 | struct device *dev = &mgp->pdev->dev; | ||
2194 | unsigned cap; | ||
2195 | unsigned err_cap; | ||
2196 | u16 val; | ||
2197 | u8 ext_type; | ||
2198 | int ret; | ||
2199 | |||
2200 | if (!myri10ge_ecrc_enable || !bridge) | ||
2201 | return; | ||
2202 | |||
2203 | /* check that the bridge is a root port */ | ||
2204 | cap = pci_find_capability(bridge, PCI_CAP_ID_EXP); | ||
2205 | pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val); | ||
2206 | ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; | ||
2207 | if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { | ||
2208 | if (myri10ge_ecrc_enable > 1) { | ||
2209 | struct pci_dev *old_bridge = bridge; | ||
2210 | |||
2211 | /* Walk the hierarchy up to the root port | ||
2212 | * where ECRC has to be enabled */ | ||
2213 | do { | ||
2214 | bridge = bridge->bus->self; | ||
2215 | if (!bridge) { | ||
2216 | dev_err(dev, | ||
2217 | "Failed to find root port" | ||
2218 | " to force ECRC\n"); | ||
2219 | return; | ||
2220 | } | ||
2221 | cap = | ||
2222 | pci_find_capability(bridge, PCI_CAP_ID_EXP); | ||
2223 | pci_read_config_word(bridge, | ||
2224 | cap + PCI_CAP_FLAGS, &val); | ||
2225 | ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; | ||
2226 | } while (ext_type != PCI_EXP_TYPE_ROOT_PORT); | ||
2227 | |||
2228 | dev_info(dev, | ||
2229 | "Forcing ECRC on non-root port %s" | ||
2230 | " (enabling on root port %s)\n", | ||
2231 | pci_name(old_bridge), pci_name(bridge)); | ||
2232 | } else { | ||
2233 | dev_err(dev, | ||
2234 | "Not enabling ECRC on non-root port %s\n", | ||
2235 | pci_name(bridge)); | ||
2236 | return; | ||
2237 | } | ||
2238 | } | ||
2239 | |||
2240 | cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR); | ||
2241 | /* nvidia ext cap is not always linked in ext cap chain */ | ||
2242 | if (!cap | ||
2243 | && bridge->vendor == PCI_VENDOR_ID_NVIDIA | ||
2244 | && bridge->device == PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE) | ||
2245 | cap = 0x160; | ||
2246 | |||
2247 | if (!cap) | ||
2248 | return; | ||
2249 | |||
2250 | ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap); | ||
2251 | if (ret) { | ||
2252 | dev_err(dev, "failed reading ext-conf-space of %s\n", | ||
2253 | pci_name(bridge)); | ||
2254 | dev_err(dev, "\t pci=nommconf in use? " | ||
2255 | "or buggy/incomplete/absent ACPI MCFG attr?\n"); | ||
2256 | return; | ||
2257 | } | ||
2258 | if (!(err_cap & PCI_ERR_CAP_ECRC_GENC)) | ||
2259 | return; | ||
2260 | |||
2261 | err_cap |= PCI_ERR_CAP_ECRC_GENE; | ||
2262 | pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap); | ||
2263 | dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge)); | ||
2264 | mgp->tx.boundary = 4096; | ||
2265 | mgp->fw_name = myri10ge_fw_aligned; | ||
2266 | } | ||
2267 | |||
2268 | /* | ||
2269 | * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput | ||
2270 | * when the PCI-E Completion packets are aligned on an 8-byte | ||
2271 | * boundary. Some PCI-E chip sets always align Completion packets; on | ||
2272 | * the ones that do not, the alignment can be enforced by enabling | ||
2273 | * ECRC generation (if supported). | ||
2274 | * | ||
2275 | * When PCI-E Completion packets are not aligned, it is actually more | ||
2276 | * efficient to limit Read-DMA transactions to 2KB, rather than 4KB. | ||
2277 | * | ||
2278 | * If the driver can neither enable ECRC nor verify that it has | ||
2279 | * already been enabled, then it must use a firmware image which works | ||
2280 | * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it | ||
2281 | * should also ensure that it never gives the device a Read-DMA which is | ||
2282 | * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is | ||
2283 | * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) | ||
2284 | * firmware image, and set tx.boundary to 4KB. | ||
2285 | */ | ||
2286 | |||
2287 | #define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132 | ||
2288 | |||
2289 | static void myri10ge_select_firmware(struct myri10ge_priv *mgp) | ||
2290 | { | ||
2291 | struct pci_dev *bridge = mgp->pdev->bus->self; | ||
2292 | |||
2293 | mgp->tx.boundary = 2048; | ||
2294 | mgp->fw_name = myri10ge_fw_unaligned; | ||
2295 | |||
2296 | if (myri10ge_force_firmware == 0) { | ||
2297 | myri10ge_enable_ecrc(mgp); | ||
2298 | |||
2299 | /* Check to see if the upstream bridge is known to | ||
2300 | * provide aligned completions */ | ||
2301 | if (bridge | ||
2302 | /* ServerWorks HT2000/HT1000 */ | ||
2303 | && bridge->vendor == PCI_VENDOR_ID_SERVERWORKS | ||
2304 | && bridge->device == | ||
2305 | PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE) { | ||
2306 | dev_info(&mgp->pdev->dev, | ||
2307 | "Assuming aligned completions (0x%x:0x%x)\n", | ||
2308 | bridge->vendor, bridge->device); | ||
2309 | mgp->tx.boundary = 4096; | ||
2310 | mgp->fw_name = myri10ge_fw_aligned; | ||
2311 | } | ||
2312 | } else { | ||
2313 | if (myri10ge_force_firmware == 1) { | ||
2314 | dev_info(&mgp->pdev->dev, | ||
2315 | "Assuming aligned completions (forced)\n"); | ||
2316 | mgp->tx.boundary = 4096; | ||
2317 | mgp->fw_name = myri10ge_fw_aligned; | ||
2318 | } else { | ||
2319 | dev_info(&mgp->pdev->dev, | ||
2320 | "Assuming unaligned completions (forced)\n"); | ||
2321 | mgp->tx.boundary = 2048; | ||
2322 | mgp->fw_name = myri10ge_fw_unaligned; | ||
2323 | } | ||
2324 | } | ||
2325 | if (myri10ge_fw_name != NULL) { | ||
2326 | dev_info(&mgp->pdev->dev, "overriding firmware to %s\n", | ||
2327 | myri10ge_fw_name); | ||
2328 | mgp->fw_name = myri10ge_fw_name; | ||
2329 | } | ||
2330 | } | ||
2331 | |||
2332 | static void myri10ge_save_state(struct myri10ge_priv *mgp) | ||
2333 | { | ||
2334 | struct pci_dev *pdev = mgp->pdev; | ||
2335 | int cap; | ||
2336 | |||
2337 | pci_save_state(pdev); | ||
2338 | /* now save PCIe and MSI state that Linux will not | ||
2339 | * save for us */ | ||
2340 | cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
2341 | pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &mgp->devctl); | ||
2342 | cap = pci_find_capability(pdev, PCI_CAP_ID_MSI); | ||
2343 | pci_read_config_word(pdev, cap + PCI_MSI_FLAGS, &mgp->msi_flags); | ||
2344 | } | ||
2345 | |||
2346 | static void myri10ge_restore_state(struct myri10ge_priv *mgp) | ||
2347 | { | ||
2348 | struct pci_dev *pdev = mgp->pdev; | ||
2349 | int cap; | ||
2350 | |||
2351 | /* restore PCIe and MSI state that linux will not */ | ||
2352 | cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
2353 | pci_write_config_dword(pdev, cap + PCI_CAP_ID_EXP, mgp->devctl); | ||
2354 | cap = pci_find_capability(pdev, PCI_CAP_ID_MSI); | ||
2355 | pci_write_config_word(pdev, cap + PCI_MSI_FLAGS, mgp->msi_flags); | ||
2356 | |||
2357 | pci_restore_state(pdev); | ||
2358 | } | ||
2359 | |||
2360 | #ifdef CONFIG_PM | ||
2361 | |||
2362 | static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2363 | { | ||
2364 | struct myri10ge_priv *mgp; | ||
2365 | struct net_device *netdev; | ||
2366 | |||
2367 | mgp = pci_get_drvdata(pdev); | ||
2368 | if (mgp == NULL) | ||
2369 | return -EINVAL; | ||
2370 | netdev = mgp->dev; | ||
2371 | |||
2372 | netif_device_detach(netdev); | ||
2373 | if (netif_running(netdev)) { | ||
2374 | printk(KERN_INFO "myri10ge: closing %s\n", netdev->name); | ||
2375 | rtnl_lock(); | ||
2376 | myri10ge_close(netdev); | ||
2377 | rtnl_unlock(); | ||
2378 | } | ||
2379 | myri10ge_dummy_rdma(mgp, 0); | ||
2380 | free_irq(pdev->irq, mgp); | ||
2381 | myri10ge_save_state(mgp); | ||
2382 | pci_disable_device(pdev); | ||
2383 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
2384 | return 0; | ||
2385 | } | ||
2386 | |||
2387 | static int myri10ge_resume(struct pci_dev *pdev) | ||
2388 | { | ||
2389 | struct myri10ge_priv *mgp; | ||
2390 | struct net_device *netdev; | ||
2391 | int status; | ||
2392 | u16 vendor; | ||
2393 | |||
2394 | mgp = pci_get_drvdata(pdev); | ||
2395 | if (mgp == NULL) | ||
2396 | return -EINVAL; | ||
2397 | netdev = mgp->dev; | ||
2398 | pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */ | ||
2399 | msleep(5); /* give card time to respond */ | ||
2400 | pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); | ||
2401 | if (vendor == 0xffff) { | ||
2402 | printk(KERN_ERR "myri10ge: %s: device disappeared!\n", | ||
2403 | mgp->dev->name); | ||
2404 | return -EIO; | ||
2405 | } | ||
2406 | myri10ge_restore_state(mgp); | ||
2407 | pci_enable_device(pdev); | ||
2408 | pci_set_master(pdev); | ||
2409 | |||
2410 | status = request_irq(pdev->irq, myri10ge_intr, SA_SHIRQ, | ||
2411 | netdev->name, mgp); | ||
2412 | if (status != 0) { | ||
2413 | dev_err(&pdev->dev, "failed to allocate IRQ\n"); | ||
2414 | goto abort_with_msi; | ||
2415 | } | ||
2416 | |||
2417 | myri10ge_reset(mgp); | ||
2418 | myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); | ||
2419 | |||
2420 | /* Save configuration space to be restored if the | ||
2421 | * nic resets due to a parity error */ | ||
2422 | myri10ge_save_state(mgp); | ||
2423 | |||
2424 | if (netif_running(netdev)) { | ||
2425 | rtnl_lock(); | ||
2426 | myri10ge_open(netdev); | ||
2427 | rtnl_unlock(); | ||
2428 | } | ||
2429 | netif_device_attach(netdev); | ||
2430 | |||
2431 | return 0; | ||
2432 | |||
2433 | abort_with_msi: | ||
2434 | return -EIO; | ||
2435 | |||
2436 | } | ||
2437 | |||
2438 | #endif /* CONFIG_PM */ | ||
2439 | |||
2440 | static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp) | ||
2441 | { | ||
2442 | struct pci_dev *pdev = mgp->pdev; | ||
2443 | int vs = mgp->vendor_specific_offset; | ||
2444 | u32 reboot; | ||
2445 | |||
2446 | /*enter read32 mode */ | ||
2447 | pci_write_config_byte(pdev, vs + 0x10, 0x3); | ||
2448 | |||
2449 | /*read REBOOT_STATUS (0xfffffff0) */ | ||
2450 | pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0); | ||
2451 | pci_read_config_dword(pdev, vs + 0x14, &reboot); | ||
2452 | return reboot; | ||
2453 | } | ||
2454 | |||
2455 | /* | ||
2456 | * This watchdog is used to check whether the board has suffered | ||
2457 | * from a parity error and needs to be recovered. | ||
2458 | */ | ||
2459 | static void myri10ge_watchdog(void *arg) | ||
2460 | { | ||
2461 | struct myri10ge_priv *mgp = arg; | ||
2462 | u32 reboot; | ||
2463 | int status; | ||
2464 | u16 cmd, vendor; | ||
2465 | |||
2466 | mgp->watchdog_resets++; | ||
2467 | pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd); | ||
2468 | if ((cmd & PCI_COMMAND_MASTER) == 0) { | ||
2469 | /* Bus master DMA disabled? Check to see | ||
2470 | * if the card rebooted due to a parity error | ||
2471 | * For now, just report it */ | ||
2472 | reboot = myri10ge_read_reboot(mgp); | ||
2473 | printk(KERN_ERR | ||
2474 | "myri10ge: %s: NIC rebooted (0x%x), resetting\n", | ||
2475 | mgp->dev->name, reboot); | ||
2476 | /* | ||
2477 | * A rebooted nic will come back with config space as | ||
2478 | * it was after power was applied to PCIe bus. | ||
2479 | * Attempt to restore config space which was saved | ||
2480 | * when the driver was loaded, or the last time the | ||
2481 | * nic was resumed from power saving mode. | ||
2482 | */ | ||
2483 | myri10ge_restore_state(mgp); | ||
2484 | } else { | ||
2485 | /* if we get back -1's from our slot, perhaps somebody | ||
2486 | * powered off our card. Don't try to reset it in | ||
2487 | * this case */ | ||
2488 | if (cmd == 0xffff) { | ||
2489 | pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); | ||
2490 | if (vendor == 0xffff) { | ||
2491 | printk(KERN_ERR | ||
2492 | "myri10ge: %s: device disappeared!\n", | ||
2493 | mgp->dev->name); | ||
2494 | return; | ||
2495 | } | ||
2496 | } | ||
2497 | /* Perhaps it is a software error. Try to reset */ | ||
2498 | |||
2499 | printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", | ||
2500 | mgp->dev->name); | ||
2501 | printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", | ||
2502 | mgp->dev->name, mgp->tx.req, mgp->tx.done, | ||
2503 | mgp->tx.pkt_start, mgp->tx.pkt_done, | ||
2504 | (int)ntohl(mgp->fw_stats->send_done_count)); | ||
2505 | msleep(2000); | ||
2506 | printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", | ||
2507 | mgp->dev->name, mgp->tx.req, mgp->tx.done, | ||
2508 | mgp->tx.pkt_start, mgp->tx.pkt_done, | ||
2509 | (int)ntohl(mgp->fw_stats->send_done_count)); | ||
2510 | } | ||
2511 | rtnl_lock(); | ||
2512 | myri10ge_close(mgp->dev); | ||
2513 | status = myri10ge_load_firmware(mgp); | ||
2514 | if (status != 0) | ||
2515 | printk(KERN_ERR "myri10ge: %s: failed to load firmware\n", | ||
2516 | mgp->dev->name); | ||
2517 | else | ||
2518 | myri10ge_open(mgp->dev); | ||
2519 | rtnl_unlock(); | ||
2520 | } | ||
2521 | |||
2522 | /* | ||
2523 | * We use our own timer routine rather than relying upon | ||
2524 | * netdev->tx_timeout because we have a very large hardware transmit | ||
2525 | * queue. Due to the large queue, the netdev->tx_timeout function | ||
2526 | * cannot detect a NIC with a parity error in a timely fashion if the | ||
2527 | * NIC is lightly loaded. | ||
2528 | */ | ||
2529 | static void myri10ge_watchdog_timer(unsigned long arg) | ||
2530 | { | ||
2531 | struct myri10ge_priv *mgp; | ||
2532 | |||
2533 | mgp = (struct myri10ge_priv *)arg; | ||
2534 | if (mgp->tx.req != mgp->tx.done && | ||
2535 | mgp->tx.done == mgp->watchdog_tx_done) | ||
2536 | /* nic seems like it might be stuck.. */ | ||
2537 | schedule_work(&mgp->watchdog_work); | ||
2538 | else | ||
2539 | /* rearm timer */ | ||
2540 | mod_timer(&mgp->watchdog_timer, | ||
2541 | jiffies + myri10ge_watchdog_timeout * HZ); | ||
2542 | |||
2543 | mgp->watchdog_tx_done = mgp->tx.done; | ||
2544 | } | ||
2545 | |||
2546 | static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
2547 | { | ||
2548 | struct net_device *netdev; | ||
2549 | struct myri10ge_priv *mgp; | ||
2550 | struct device *dev = &pdev->dev; | ||
2551 | size_t bytes; | ||
2552 | int i; | ||
2553 | int status = -ENXIO; | ||
2554 | int cap; | ||
2555 | int dac_enabled; | ||
2556 | u16 val; | ||
2557 | |||
2558 | netdev = alloc_etherdev(sizeof(*mgp)); | ||
2559 | if (netdev == NULL) { | ||
2560 | dev_err(dev, "Could not allocate ethernet device\n"); | ||
2561 | return -ENOMEM; | ||
2562 | } | ||
2563 | |||
2564 | mgp = netdev_priv(netdev); | ||
2565 | memset(mgp, 0, sizeof(*mgp)); | ||
2566 | mgp->dev = netdev; | ||
2567 | mgp->pdev = pdev; | ||
2568 | mgp->csum_flag = MXGEFW_FLAGS_CKSUM; | ||
2569 | mgp->pause = myri10ge_flow_control; | ||
2570 | mgp->intr_coal_delay = myri10ge_intr_coal_delay; | ||
2571 | init_waitqueue_head(&mgp->down_wq); | ||
2572 | |||
2573 | if (pci_enable_device(pdev)) { | ||
2574 | dev_err(&pdev->dev, "pci_enable_device call failed\n"); | ||
2575 | status = -ENODEV; | ||
2576 | goto abort_with_netdev; | ||
2577 | } | ||
2578 | myri10ge_select_firmware(mgp); | ||
2579 | |||
2580 | /* Find the vendor-specific cap so we can check | ||
2581 | * the reboot register later on */ | ||
2582 | mgp->vendor_specific_offset | ||
2583 | = pci_find_capability(pdev, PCI_CAP_ID_VNDR); | ||
2584 | |||
2585 | /* Set our max read request to 4KB */ | ||
2586 | cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
2587 | if (cap < 64) { | ||
2588 | dev_err(&pdev->dev, "Bad PCI_CAP_ID_EXP location %d\n", cap); | ||
2589 | goto abort_with_netdev; | ||
2590 | } | ||
2591 | status = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &val); | ||
2592 | if (status != 0) { | ||
2593 | dev_err(&pdev->dev, "Error %d reading PCI_EXP_DEVCTL\n", | ||
2594 | status); | ||
2595 | goto abort_with_netdev; | ||
2596 | } | ||
2597 | val = (val & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12); | ||
2598 | status = pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, val); | ||
2599 | if (status != 0) { | ||
2600 | dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n", | ||
2601 | status); | ||
2602 | goto abort_with_netdev; | ||
2603 | } | ||
2604 | |||
2605 | pci_set_master(pdev); | ||
2606 | dac_enabled = 1; | ||
2607 | status = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | ||
2608 | if (status != 0) { | ||
2609 | dac_enabled = 0; | ||
2610 | dev_err(&pdev->dev, | ||
2611 | "64-bit pci address mask was refused, trying 32-bit"); | ||
2612 | status = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
2613 | } | ||
2614 | if (status != 0) { | ||
2615 | dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); | ||
2616 | goto abort_with_netdev; | ||
2617 | } | ||
2618 | mgp->cmd = pci_alloc_consistent(pdev, sizeof(*mgp->cmd), &mgp->cmd_bus); | ||
2619 | if (mgp->cmd == NULL) | ||
2620 | goto abort_with_netdev; | ||
2621 | |||
2622 | mgp->fw_stats = pci_alloc_consistent(pdev, sizeof(*mgp->fw_stats), | ||
2623 | &mgp->fw_stats_bus); | ||
2624 | if (mgp->fw_stats == NULL) | ||
2625 | goto abort_with_cmd; | ||
2626 | |||
2627 | mgp->board_span = pci_resource_len(pdev, 0); | ||
2628 | mgp->iomem_base = pci_resource_start(pdev, 0); | ||
2629 | mgp->mtrr = -1; | ||
2630 | #ifdef CONFIG_MTRR | ||
2631 | mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span, | ||
2632 | MTRR_TYPE_WRCOMB, 1); | ||
2633 | #endif | ||
2634 | /* Hack. need to get rid of these magic numbers */ | ||
2635 | mgp->sram_size = | ||
2636 | 2 * 1024 * 1024 - (2 * (48 * 1024) + (32 * 1024)) - 0x100; | ||
2637 | if (mgp->sram_size > mgp->board_span) { | ||
2638 | dev_err(&pdev->dev, "board span %ld bytes too small\n", | ||
2639 | mgp->board_span); | ||
2640 | goto abort_with_wc; | ||
2641 | } | ||
2642 | mgp->sram = ioremap(mgp->iomem_base, mgp->board_span); | ||
2643 | if (mgp->sram == NULL) { | ||
2644 | dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", | ||
2645 | mgp->board_span, mgp->iomem_base); | ||
2646 | status = -ENXIO; | ||
2647 | goto abort_with_wc; | ||
2648 | } | ||
2649 | memcpy_fromio(mgp->eeprom_strings, | ||
2650 | mgp->sram + mgp->sram_size - MYRI10GE_EEPROM_STRINGS_SIZE, | ||
2651 | MYRI10GE_EEPROM_STRINGS_SIZE); | ||
2652 | memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2); | ||
2653 | status = myri10ge_read_mac_addr(mgp); | ||
2654 | if (status) | ||
2655 | goto abort_with_ioremap; | ||
2656 | |||
2657 | for (i = 0; i < ETH_ALEN; i++) | ||
2658 | netdev->dev_addr[i] = mgp->mac_addr[i]; | ||
2659 | |||
2660 | /* allocate rx done ring */ | ||
2661 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | ||
2662 | mgp->rx_done.entry = | ||
2663 | pci_alloc_consistent(pdev, bytes, &mgp->rx_done.bus); | ||
2664 | if (mgp->rx_done.entry == NULL) | ||
2665 | goto abort_with_ioremap; | ||
2666 | memset(mgp->rx_done.entry, 0, bytes); | ||
2667 | |||
2668 | status = myri10ge_load_firmware(mgp); | ||
2669 | if (status != 0) { | ||
2670 | dev_err(&pdev->dev, "failed to load firmware\n"); | ||
2671 | goto abort_with_rx_done; | ||
2672 | } | ||
2673 | |||
2674 | status = myri10ge_reset(mgp); | ||
2675 | if (status != 0) { | ||
2676 | dev_err(&pdev->dev, "failed reset\n"); | ||
2677 | goto abort_with_firmware; | ||
2678 | } | ||
2679 | |||
2680 | if (myri10ge_msi) { | ||
2681 | status = pci_enable_msi(pdev); | ||
2682 | if (status != 0) | ||
2683 | dev_err(&pdev->dev, | ||
2684 | "Error %d setting up MSI; falling back to xPIC\n", | ||
2685 | status); | ||
2686 | else | ||
2687 | mgp->msi_enabled = 1; | ||
2688 | } | ||
2689 | |||
2690 | status = request_irq(pdev->irq, myri10ge_intr, SA_SHIRQ, | ||
2691 | netdev->name, mgp); | ||
2692 | if (status != 0) { | ||
2693 | dev_err(&pdev->dev, "failed to allocate IRQ\n"); | ||
2694 | goto abort_with_firmware; | ||
2695 | } | ||
2696 | |||
2697 | pci_set_drvdata(pdev, mgp); | ||
2698 | if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU) | ||
2699 | myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; | ||
2700 | if ((myri10ge_initial_mtu + ETH_HLEN) < 68) | ||
2701 | myri10ge_initial_mtu = 68; | ||
2702 | netdev->mtu = myri10ge_initial_mtu; | ||
2703 | netdev->open = myri10ge_open; | ||
2704 | netdev->stop = myri10ge_close; | ||
2705 | netdev->hard_start_xmit = myri10ge_xmit; | ||
2706 | netdev->get_stats = myri10ge_get_stats; | ||
2707 | netdev->base_addr = mgp->iomem_base; | ||
2708 | netdev->irq = pdev->irq; | ||
2709 | netdev->change_mtu = myri10ge_change_mtu; | ||
2710 | netdev->set_multicast_list = myri10ge_set_multicast_list; | ||
2711 | netdev->set_mac_address = myri10ge_set_mac_address; | ||
2712 | netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; | ||
2713 | if (dac_enabled) | ||
2714 | netdev->features |= NETIF_F_HIGHDMA; | ||
2715 | netdev->poll = myri10ge_poll; | ||
2716 | netdev->weight = myri10ge_napi_weight; | ||
2717 | |||
2718 | /* Save configuration space to be restored if the | ||
2719 | * nic resets due to a parity error */ | ||
2720 | myri10ge_save_state(mgp); | ||
2721 | /* Restore state immediately since pci_save_msi_state disables MSI */ | ||
2722 | myri10ge_restore_state(mgp); | ||
2723 | |||
2724 | /* Setup the watchdog timer */ | ||
2725 | setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer, | ||
2726 | (unsigned long)mgp); | ||
2727 | |||
2728 | SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); | ||
2729 | INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp); | ||
2730 | status = register_netdev(netdev); | ||
2731 | if (status != 0) { | ||
2732 | dev_err(&pdev->dev, "register_netdev failed: %d\n", status); | ||
2733 | goto abort_with_irq; | ||
2734 | } | ||
2735 | |||
2736 | printk(KERN_INFO "myri10ge: %s: %s IRQ %d, tx bndry %d, fw %s, WC %s\n", | ||
2737 | netdev->name, (mgp->msi_enabled ? "MSI" : "xPIC"), | ||
2738 | pdev->irq, mgp->tx.boundary, mgp->fw_name, | ||
2739 | (mgp->mtrr >= 0 ? "Enabled" : "Disabled")); | ||
2740 | |||
2741 | return 0; | ||
2742 | |||
2743 | abort_with_irq: | ||
2744 | free_irq(pdev->irq, mgp); | ||
2745 | if (mgp->msi_enabled) | ||
2746 | pci_disable_msi(pdev); | ||
2747 | |||
2748 | abort_with_firmware: | ||
2749 | myri10ge_dummy_rdma(mgp, 0); | ||
2750 | |||
2751 | abort_with_rx_done: | ||
2752 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | ||
2753 | pci_free_consistent(pdev, bytes, mgp->rx_done.entry, mgp->rx_done.bus); | ||
2754 | |||
2755 | abort_with_ioremap: | ||
2756 | iounmap(mgp->sram); | ||
2757 | |||
2758 | abort_with_wc: | ||
2759 | #ifdef CONFIG_MTRR | ||
2760 | if (mgp->mtrr >= 0) | ||
2761 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | ||
2762 | #endif | ||
2763 | pci_free_consistent(pdev, sizeof(*mgp->fw_stats), | ||
2764 | mgp->fw_stats, mgp->fw_stats_bus); | ||
2765 | |||
2766 | abort_with_cmd: | ||
2767 | pci_free_consistent(pdev, sizeof(*mgp->cmd), mgp->cmd, mgp->cmd_bus); | ||
2768 | |||
2769 | abort_with_netdev: | ||
2770 | |||
2771 | free_netdev(netdev); | ||
2772 | return status; | ||
2773 | } | ||
2774 | |||
2775 | /* | ||
2776 | * myri10ge_remove | ||
2777 | * | ||
2778 | * Does what is necessary to shutdown one Myrinet device. Called | ||
2779 | * once for each Myrinet card by the kernel when a module is | ||
2780 | * unloaded. | ||
2781 | */ | ||
2782 | static void myri10ge_remove(struct pci_dev *pdev) | ||
2783 | { | ||
2784 | struct myri10ge_priv *mgp; | ||
2785 | struct net_device *netdev; | ||
2786 | size_t bytes; | ||
2787 | |||
2788 | mgp = pci_get_drvdata(pdev); | ||
2789 | if (mgp == NULL) | ||
2790 | return; | ||
2791 | |||
2792 | flush_scheduled_work(); | ||
2793 | netdev = mgp->dev; | ||
2794 | unregister_netdev(netdev); | ||
2795 | free_irq(pdev->irq, mgp); | ||
2796 | if (mgp->msi_enabled) | ||
2797 | pci_disable_msi(pdev); | ||
2798 | |||
2799 | myri10ge_dummy_rdma(mgp, 0); | ||
2800 | |||
2801 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | ||
2802 | pci_free_consistent(pdev, bytes, mgp->rx_done.entry, mgp->rx_done.bus); | ||
2803 | |||
2804 | iounmap(mgp->sram); | ||
2805 | |||
2806 | #ifdef CONFIG_MTRR | ||
2807 | if (mgp->mtrr >= 0) | ||
2808 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | ||
2809 | #endif | ||
2810 | pci_free_consistent(pdev, sizeof(*mgp->fw_stats), | ||
2811 | mgp->fw_stats, mgp->fw_stats_bus); | ||
2812 | |||
2813 | pci_free_consistent(pdev, sizeof(*mgp->cmd), mgp->cmd, mgp->cmd_bus); | ||
2814 | |||
2815 | free_netdev(netdev); | ||
2816 | pci_set_drvdata(pdev, NULL); | ||
2817 | } | ||
2818 | |||
2819 | #define PCI_DEVICE_ID_MYIRCOM_MYRI10GE_Z8E 0x0008 | ||
2820 | |||
2821 | static struct pci_device_id myri10ge_pci_tbl[] = { | ||
2822 | {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYIRCOM_MYRI10GE_Z8E)}, | ||
2823 | {0}, | ||
2824 | }; | ||
2825 | |||
2826 | static struct pci_driver myri10ge_driver = { | ||
2827 | .name = "myri10ge", | ||
2828 | .probe = myri10ge_probe, | ||
2829 | .remove = myri10ge_remove, | ||
2830 | .id_table = myri10ge_pci_tbl, | ||
2831 | #ifdef CONFIG_PM | ||
2832 | .suspend = myri10ge_suspend, | ||
2833 | .resume = myri10ge_resume, | ||
2834 | #endif | ||
2835 | }; | ||
2836 | |||
2837 | static __init int myri10ge_init_module(void) | ||
2838 | { | ||
2839 | printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name, | ||
2840 | MYRI10GE_VERSION_STR); | ||
2841 | return pci_register_driver(&myri10ge_driver); | ||
2842 | } | ||
2843 | |||
2844 | module_init(myri10ge_init_module); | ||
2845 | |||
2846 | static __exit void myri10ge_cleanup_module(void) | ||
2847 | { | ||
2848 | pci_unregister_driver(&myri10ge_driver); | ||
2849 | } | ||
2850 | |||
2851 | module_exit(myri10ge_cleanup_module); | ||
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h new file mode 100644 index 000000000000..0a6cae6cb186 --- /dev/null +++ b/drivers/net/myri10ge/myri10ge_mcp.h | |||
@@ -0,0 +1,205 @@ | |||
1 | #ifndef __MYRI10GE_MCP_H__ | ||
2 | #define __MYRI10GE_MCP_H__ | ||
3 | |||
4 | #define MXGEFW_VERSION_MAJOR 1 | ||
5 | #define MXGEFW_VERSION_MINOR 4 | ||
6 | |||
7 | /* 8 Bytes */ | ||
8 | struct mcp_dma_addr { | ||
9 | u32 high; | ||
10 | u32 low; | ||
11 | }; | ||
12 | |||
13 | /* 4 Bytes */ | ||
14 | struct mcp_slot { | ||
15 | u16 checksum; | ||
16 | u16 length; | ||
17 | }; | ||
18 | |||
19 | /* 64 Bytes */ | ||
20 | struct mcp_cmd { | ||
21 | u32 cmd; | ||
22 | u32 data0; /* will be low portion if data > 32 bits */ | ||
23 | /* 8 */ | ||
24 | u32 data1; /* will be high portion if data > 32 bits */ | ||
25 | u32 data2; /* currently unused.. */ | ||
26 | /* 16 */ | ||
27 | struct mcp_dma_addr response_addr; | ||
28 | /* 24 */ | ||
29 | u8 pad[40]; | ||
30 | }; | ||
31 | |||
32 | /* 8 Bytes */ | ||
33 | struct mcp_cmd_response { | ||
34 | u32 data; | ||
35 | u32 result; | ||
36 | }; | ||
37 | |||
38 | /* | ||
39 | * flags used in mcp_kreq_ether_send_t: | ||
40 | * | ||
41 | * The SMALL flag is only needed in the first segment. It is raised | ||
42 | * for packets that are total less or equal 512 bytes. | ||
43 | * | ||
44 | * The CKSUM flag must be set in all segments. | ||
45 | * | ||
46 | * The PADDED flags is set if the packet needs to be padded, and it | ||
47 | * must be set for all segments. | ||
48 | * | ||
49 | * The MXGEFW_FLAGS_ALIGN_ODD must be set if the cumulative | ||
50 | * length of all previous segments was odd. | ||
51 | */ | ||
52 | |||
53 | #define MXGEFW_FLAGS_SMALL 0x1 | ||
54 | #define MXGEFW_FLAGS_TSO_HDR 0x1 | ||
55 | #define MXGEFW_FLAGS_FIRST 0x2 | ||
56 | #define MXGEFW_FLAGS_ALIGN_ODD 0x4 | ||
57 | #define MXGEFW_FLAGS_CKSUM 0x8 | ||
58 | #define MXGEFW_FLAGS_TSO_LAST 0x8 | ||
59 | #define MXGEFW_FLAGS_NO_TSO 0x10 | ||
60 | #define MXGEFW_FLAGS_TSO_CHOP 0x10 | ||
61 | #define MXGEFW_FLAGS_TSO_PLD 0x20 | ||
62 | |||
63 | #define MXGEFW_SEND_SMALL_SIZE 1520 | ||
64 | #define MXGEFW_MAX_MTU 9400 | ||
65 | |||
66 | union mcp_pso_or_cumlen { | ||
67 | u16 pseudo_hdr_offset; | ||
68 | u16 cum_len; | ||
69 | }; | ||
70 | |||
71 | #define MXGEFW_MAX_SEND_DESC 12 | ||
72 | #define MXGEFW_PAD 2 | ||
73 | |||
74 | /* 16 Bytes */ | ||
75 | struct mcp_kreq_ether_send { | ||
76 | u32 addr_high; | ||
77 | u32 addr_low; | ||
78 | u16 pseudo_hdr_offset; | ||
79 | u16 length; | ||
80 | u8 pad; | ||
81 | u8 rdma_count; | ||
82 | u8 cksum_offset; /* where to start computing cksum */ | ||
83 | u8 flags; /* as defined above */ | ||
84 | }; | ||
85 | |||
86 | /* 8 Bytes */ | ||
87 | struct mcp_kreq_ether_recv { | ||
88 | u32 addr_high; | ||
89 | u32 addr_low; | ||
90 | }; | ||
91 | |||
92 | /* Commands */ | ||
93 | |||
94 | #define MXGEFW_CMD_OFFSET 0xf80000 | ||
95 | |||
96 | enum myri10ge_mcp_cmd_type { | ||
97 | MXGEFW_CMD_NONE = 0, | ||
98 | /* Reset the mcp, it is left in a safe state, waiting | ||
99 | * for the driver to set all its parameters */ | ||
100 | MXGEFW_CMD_RESET, | ||
101 | |||
102 | /* get the version number of the current firmware.. | ||
103 | * (may be available in the eeprom strings..? */ | ||
104 | MXGEFW_GET_MCP_VERSION, | ||
105 | |||
106 | /* Parameters which must be set by the driver before it can | ||
107 | * issue MXGEFW_CMD_ETHERNET_UP. They persist until the next | ||
108 | * MXGEFW_CMD_RESET is issued */ | ||
109 | |||
110 | MXGEFW_CMD_SET_INTRQ_DMA, | ||
111 | MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */ | ||
112 | MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */ | ||
113 | |||
114 | /* Parameters which refer to lanai SRAM addresses where the | ||
115 | * driver must issue PIO writes for various things */ | ||
116 | |||
117 | MXGEFW_CMD_GET_SEND_OFFSET, | ||
118 | MXGEFW_CMD_GET_SMALL_RX_OFFSET, | ||
119 | MXGEFW_CMD_GET_BIG_RX_OFFSET, | ||
120 | MXGEFW_CMD_GET_IRQ_ACK_OFFSET, | ||
121 | MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, | ||
122 | |||
123 | /* Parameters which refer to rings stored on the MCP, | ||
124 | * and whose size is controlled by the mcp */ | ||
125 | |||
126 | MXGEFW_CMD_GET_SEND_RING_SIZE, /* in bytes */ | ||
127 | MXGEFW_CMD_GET_RX_RING_SIZE, /* in bytes */ | ||
128 | |||
129 | /* Parameters which refer to rings stored in the host, | ||
130 | * and whose size is controlled by the host. Note that | ||
131 | * all must be physically contiguous and must contain | ||
132 | * a power of 2 number of entries. */ | ||
133 | |||
134 | MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */ | ||
135 | |||
136 | /* command to bring ethernet interface up. Above parameters | ||
137 | * (plus mtu & mac address) must have been exchanged prior | ||
138 | * to issuing this command */ | ||
139 | MXGEFW_CMD_ETHERNET_UP, | ||
140 | |||
141 | /* command to bring ethernet interface down. No further sends | ||
142 | * or receives may be processed until an MXGEFW_CMD_ETHERNET_UP | ||
143 | * is issued, and all interrupt queues must be flushed prior | ||
144 | * to ack'ing this command */ | ||
145 | |||
146 | MXGEFW_CMD_ETHERNET_DOWN, | ||
147 | |||
148 | /* commands the driver may issue live, without resetting | ||
149 | * the nic. Note that increasing the mtu "live" should | ||
150 | * only be done if the driver has already supplied buffers | ||
151 | * sufficiently large to handle the new mtu. Decreasing | ||
152 | * the mtu live is safe */ | ||
153 | |||
154 | MXGEFW_CMD_SET_MTU, | ||
155 | MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, /* in microseconds */ | ||
156 | MXGEFW_CMD_SET_STATS_INTERVAL, /* in microseconds */ | ||
157 | MXGEFW_CMD_SET_STATS_DMA, | ||
158 | |||
159 | MXGEFW_ENABLE_PROMISC, | ||
160 | MXGEFW_DISABLE_PROMISC, | ||
161 | MXGEFW_SET_MAC_ADDRESS, | ||
162 | |||
163 | MXGEFW_ENABLE_FLOW_CONTROL, | ||
164 | MXGEFW_DISABLE_FLOW_CONTROL, | ||
165 | |||
166 | /* do a DMA test | ||
167 | * data0,data1 = DMA address | ||
168 | * data2 = RDMA length (MSH), WDMA length (LSH) | ||
169 | * command return data = repetitions (MSH), 0.5-ms ticks (LSH) | ||
170 | */ | ||
171 | MXGEFW_DMA_TEST | ||
172 | }; | ||
173 | |||
174 | enum myri10ge_mcp_cmd_status { | ||
175 | MXGEFW_CMD_OK = 0, | ||
176 | MXGEFW_CMD_UNKNOWN, | ||
177 | MXGEFW_CMD_ERROR_RANGE, | ||
178 | MXGEFW_CMD_ERROR_BUSY, | ||
179 | MXGEFW_CMD_ERROR_EMPTY, | ||
180 | MXGEFW_CMD_ERROR_CLOSED, | ||
181 | MXGEFW_CMD_ERROR_HASH_ERROR, | ||
182 | MXGEFW_CMD_ERROR_BAD_PORT, | ||
183 | MXGEFW_CMD_ERROR_RESOURCES | ||
184 | }; | ||
185 | |||
186 | /* 40 Bytes */ | ||
187 | struct mcp_irq_data { | ||
188 | u32 send_done_count; | ||
189 | |||
190 | u32 link_up; | ||
191 | u32 dropped_link_overflow; | ||
192 | u32 dropped_link_error_or_filtered; | ||
193 | u32 dropped_runt; | ||
194 | u32 dropped_overrun; | ||
195 | u32 dropped_no_small_buffer; | ||
196 | u32 dropped_no_big_buffer; | ||
197 | u32 rdma_tags_available; | ||
198 | |||
199 | u8 tx_stopped; | ||
200 | u8 link_down; | ||
201 | u8 stats_updated; | ||
202 | u8 valid; | ||
203 | }; | ||
204 | |||
205 | #endif /* __MYRI10GE_MCP_H__ */ | ||
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h new file mode 100644 index 000000000000..487f7792fd46 --- /dev/null +++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h | |||
@@ -0,0 +1,58 @@ | |||
1 | #ifndef __MYRI10GE_MCP_GEN_HEADER_H__ | ||
2 | #define __MYRI10GE_MCP_GEN_HEADER_H__ | ||
3 | |||
4 | /* this file define a standard header used as a first entry point to | ||
5 | * exchange information between firmware/driver and driver. The | ||
6 | * header structure can be anywhere in the mcp. It will usually be in | ||
7 | * the .data section, because some fields needs to be initialized at | ||
8 | * compile time. | ||
9 | * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must | ||
10 | * contains the location of the header. | ||
11 | * | ||
12 | * Typically a MCP will start with the following: | ||
13 | * .text | ||
14 | * .space 52 ! to help catch MEMORY_INT errors | ||
15 | * bt start ! jump to real code | ||
16 | * nop | ||
17 | * .long _gen_mcp_header | ||
18 | * | ||
19 | * The source will have a definition like: | ||
20 | * | ||
21 | * mcp_gen_header_t gen_mcp_header = { | ||
22 | * .header_length = sizeof(mcp_gen_header_t), | ||
23 | * .mcp_type = MCP_TYPE_XXX, | ||
24 | * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $", | ||
25 | * .mcp_globals = (unsigned)&Globals | ||
26 | * }; | ||
27 | */ | ||
28 | |||
29 | #define MCP_HEADER_PTR_OFFSET 0x3c | ||
30 | |||
31 | #define MCP_TYPE_MX 0x4d582020 /* "MX " */ | ||
32 | #define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */ | ||
33 | #define MCP_TYPE_ETH 0x45544820 /* "ETH " */ | ||
34 | #define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */ | ||
35 | |||
36 | struct mcp_gen_header { | ||
37 | /* the first 4 fields are filled at compile time */ | ||
38 | unsigned header_length; | ||
39 | unsigned mcp_type; | ||
40 | char version[128]; | ||
41 | unsigned mcp_globals; /* pointer to mcp-type specific structure */ | ||
42 | |||
43 | /* filled by the MCP at run-time */ | ||
44 | unsigned sram_size; | ||
45 | unsigned string_specs; /* either the original STRING_SPECS or a superset */ | ||
46 | unsigned string_specs_len; | ||
47 | |||
48 | /* Fields above this comment are guaranteed to be present. | ||
49 | * | ||
50 | * Fields below this comment are extensions added in later versions | ||
51 | * of this struct, drivers should compare the header_length against | ||
52 | * offsetof(field) to check wether a given MCP implements them. | ||
53 | * | ||
54 | * Never remove any field. Keep everything naturally align. | ||
55 | */ | ||
56 | }; | ||
57 | |||
58 | #endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */ | ||
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index d090df413049..661bfe54ff5d 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -12,7 +12,7 @@ | |||
12 | Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net | 12 | Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net |
13 | 13 | ||
14 | pcnet_cs.c 1.153 2003/11/09 18:53:09 | 14 | pcnet_cs.c 1.153 2003/11/09 18:53:09 |
15 | 15 | ||
16 | The network driver code is based on Donald Becker's NE2000 code: | 16 | The network driver code is based on Donald Becker's NE2000 code: |
17 | 17 | ||
18 | Written 1992,1993 by Donald Becker. | 18 | Written 1992,1993 by Donald Becker. |
@@ -146,7 +146,7 @@ typedef struct hw_info_t { | |||
146 | #define MII_PHYID_REG2 0x03 | 146 | #define MII_PHYID_REG2 0x03 |
147 | 147 | ||
148 | static hw_info_t hw_info[] = { | 148 | static hw_info_t hw_info[] = { |
149 | { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT }, | 149 | { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT }, |
150 | { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 }, | 150 | { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 }, |
151 | { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 }, | 151 | { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 }, |
152 | { /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94, | 152 | { /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94, |
@@ -193,7 +193,7 @@ static hw_info_t hw_info[] = { | |||
193 | { /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 }, | 193 | { /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 }, |
194 | { /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65, | 194 | { /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65, |
195 | HAS_MISC_REG | HAS_IBM_MISC }, | 195 | HAS_MISC_REG | HAS_IBM_MISC }, |
196 | { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45, | 196 | { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45, |
197 | HAS_MISC_REG | HAS_IBM_MISC }, | 197 | HAS_MISC_REG | HAS_IBM_MISC }, |
198 | { /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 }, | 198 | { /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 }, |
199 | { /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 }, | 199 | { /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 }, |
@@ -330,7 +330,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link) | |||
330 | for (j = 0; j < 6; j++) | 330 | for (j = 0; j < 6; j++) |
331 | dev->dev_addr[j] = readb(base + (j<<1)); | 331 | dev->dev_addr[j] = readb(base + (j<<1)); |
332 | } | 332 | } |
333 | 333 | ||
334 | iounmap(virt); | 334 | iounmap(virt); |
335 | j = pcmcia_release_window(link->win); | 335 | j = pcmcia_release_window(link->win); |
336 | if (j != CS_SUCCESS) | 336 | if (j != CS_SUCCESS) |
@@ -490,7 +490,7 @@ static int try_io_port(struct pcmcia_device *link) | |||
490 | if (link->io.NumPorts2 > 0) { | 490 | if (link->io.NumPorts2 > 0) { |
491 | /* for master/slave multifunction cards */ | 491 | /* for master/slave multifunction cards */ |
492 | link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; | 492 | link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; |
493 | link->irq.Attributes = | 493 | link->irq.Attributes = |
494 | IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; | 494 | IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; |
495 | } | 495 | } |
496 | } else { | 496 | } else { |
@@ -543,19 +543,19 @@ static int pcnet_config(struct pcmcia_device *link) | |||
543 | manfid = le16_to_cpu(buf[0]); | 543 | manfid = le16_to_cpu(buf[0]); |
544 | prodid = le16_to_cpu(buf[1]); | 544 | prodid = le16_to_cpu(buf[1]); |
545 | } | 545 | } |
546 | 546 | ||
547 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; | 547 | tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; |
548 | tuple.Attributes = 0; | 548 | tuple.Attributes = 0; |
549 | CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); | 549 | CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); |
550 | while (last_ret == CS_SUCCESS) { | 550 | while (last_ret == CS_SUCCESS) { |
551 | cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); | 551 | cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); |
552 | cistpl_io_t *io = &(parse.cftable_entry.io); | 552 | cistpl_io_t *io = &(parse.cftable_entry.io); |
553 | 553 | ||
554 | if (pcmcia_get_tuple_data(link, &tuple) != 0 || | 554 | if (pcmcia_get_tuple_data(link, &tuple) != 0 || |
555 | pcmcia_parse_tuple(link, &tuple, &parse) != 0 || | 555 | pcmcia_parse_tuple(link, &tuple, &parse) != 0 || |
556 | cfg->index == 0 || cfg->io.nwin == 0) | 556 | cfg->index == 0 || cfg->io.nwin == 0) |
557 | goto next_entry; | 557 | goto next_entry; |
558 | 558 | ||
559 | link->conf.ConfigIndex = cfg->index; | 559 | link->conf.ConfigIndex = cfg->index; |
560 | /* For multifunction cards, by convention, we configure the | 560 | /* For multifunction cards, by convention, we configure the |
561 | network function with window 0, and serial with window 1 */ | 561 | network function with window 0, and serial with window 1 */ |
@@ -584,7 +584,7 @@ static int pcnet_config(struct pcmcia_device *link) | |||
584 | } | 584 | } |
585 | 585 | ||
586 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); | 586 | CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); |
587 | 587 | ||
588 | if (link->io.NumPorts2 == 8) { | 588 | if (link->io.NumPorts2 == 8) { |
589 | link->conf.Attributes |= CONF_ENABLE_SPKR; | 589 | link->conf.Attributes |= CONF_ENABLE_SPKR; |
590 | link->conf.Status = CCSR_AUDIO_ENA; | 590 | link->conf.Status = CCSR_AUDIO_ENA; |
@@ -592,7 +592,7 @@ static int pcnet_config(struct pcmcia_device *link) | |||
592 | if ((manfid == MANFID_IBM) && | 592 | if ((manfid == MANFID_IBM) && |
593 | (prodid == PRODID_IBM_HOME_AND_AWAY)) | 593 | (prodid == PRODID_IBM_HOME_AND_AWAY)) |
594 | link->conf.ConfigIndex |= 0x10; | 594 | link->conf.ConfigIndex |= 0x10; |
595 | 595 | ||
596 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); | 596 | CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); |
597 | dev->irq = link->irq.AssignedIRQ; | 597 | dev->irq = link->irq.AssignedIRQ; |
598 | dev->base_addr = link->io.BasePort1; | 598 | dev->base_addr = link->io.BasePort1; |
@@ -614,7 +614,7 @@ static int pcnet_config(struct pcmcia_device *link) | |||
614 | hw_info = get_ax88190(link); | 614 | hw_info = get_ax88190(link); |
615 | if (hw_info == NULL) | 615 | if (hw_info == NULL) |
616 | hw_info = get_hwired(link); | 616 | hw_info = get_hwired(link); |
617 | 617 | ||
618 | if (hw_info == NULL) { | 618 | if (hw_info == NULL) { |
619 | printk(KERN_NOTICE "pcnet_cs: unable to read hardware net" | 619 | printk(KERN_NOTICE "pcnet_cs: unable to read hardware net" |
620 | " address for io base %#3lx\n", dev->base_addr); | 620 | " address for io base %#3lx\n", dev->base_addr); |
@@ -631,7 +631,7 @@ static int pcnet_config(struct pcmcia_device *link) | |||
631 | info->flags &= ~USE_BIG_BUF; | 631 | info->flags &= ~USE_BIG_BUF; |
632 | if (!use_big_buf) | 632 | if (!use_big_buf) |
633 | info->flags &= ~USE_BIG_BUF; | 633 | info->flags &= ~USE_BIG_BUF; |
634 | 634 | ||
635 | if (info->flags & USE_BIG_BUF) { | 635 | if (info->flags & USE_BIG_BUF) { |
636 | start_pg = SOCKET_START_PG; | 636 | start_pg = SOCKET_START_PG; |
637 | stop_pg = SOCKET_STOP_PG; | 637 | stop_pg = SOCKET_STOP_PG; |
@@ -929,7 +929,7 @@ static void set_misc_reg(struct net_device *dev) | |||
929 | kio_addr_t nic_base = dev->base_addr; | 929 | kio_addr_t nic_base = dev->base_addr; |
930 | pcnet_dev_t *info = PRIV(dev); | 930 | pcnet_dev_t *info = PRIV(dev); |
931 | u_char tmp; | 931 | u_char tmp; |
932 | 932 | ||
933 | if (info->flags & HAS_MISC_REG) { | 933 | if (info->flags & HAS_MISC_REG) { |
934 | tmp = inb_p(nic_base + PCNET_MISC) & ~3; | 934 | tmp = inb_p(nic_base + PCNET_MISC) & ~3; |
935 | if (dev->if_port == 2) | 935 | if (dev->if_port == 2) |
@@ -1022,7 +1022,7 @@ static int pcnet_close(struct net_device *dev) | |||
1022 | 1022 | ||
1023 | ei_close(dev); | 1023 | ei_close(dev); |
1024 | free_irq(dev->irq, dev); | 1024 | free_irq(dev->irq, dev); |
1025 | 1025 | ||
1026 | link->open--; | 1026 | link->open--; |
1027 | netif_stop_queue(dev); | 1027 | netif_stop_queue(dev); |
1028 | del_timer_sync(&info->watchdog); | 1028 | del_timer_sync(&info->watchdog); |
@@ -1054,12 +1054,12 @@ static void pcnet_reset_8390(struct net_device *dev) | |||
1054 | udelay(100); | 1054 | udelay(100); |
1055 | } | 1055 | } |
1056 | outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ | 1056 | outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ |
1057 | 1057 | ||
1058 | if (i == 100) | 1058 | if (i == 100) |
1059 | printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n", | 1059 | printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n", |
1060 | dev->name); | 1060 | dev->name); |
1061 | set_misc_reg(dev); | 1061 | set_misc_reg(dev); |
1062 | 1062 | ||
1063 | } /* pcnet_reset_8390 */ | 1063 | } /* pcnet_reset_8390 */ |
1064 | 1064 | ||
1065 | /*====================================================================*/ | 1065 | /*====================================================================*/ |
@@ -1233,7 +1233,7 @@ static void dma_get_8390_hdr(struct net_device *dev, | |||
1233 | dev->name, ei_status.dmaing, ei_status.irqlock); | 1233 | dev->name, ei_status.dmaing, ei_status.irqlock); |
1234 | return; | 1234 | return; |
1235 | } | 1235 | } |
1236 | 1236 | ||
1237 | ei_status.dmaing |= 0x01; | 1237 | ei_status.dmaing |= 0x01; |
1238 | outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD); | 1238 | outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD); |
1239 | outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); | 1239 | outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); |
@@ -1458,7 +1458,7 @@ static void shmem_get_8390_hdr(struct net_device *dev, | |||
1458 | void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8) | 1458 | void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8) |
1459 | + (ring_page << 8) | 1459 | + (ring_page << 8) |
1460 | - (ei_status.rx_start_page << 8); | 1460 | - (ei_status.rx_start_page << 8); |
1461 | 1461 | ||
1462 | copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr)); | 1462 | copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr)); |
1463 | /* Fix for big endian systems */ | 1463 | /* Fix for big endian systems */ |
1464 | hdr->count = le16_to_cpu(hdr->count); | 1464 | hdr->count = le16_to_cpu(hdr->count); |
@@ -1473,7 +1473,7 @@ static void shmem_block_input(struct net_device *dev, int count, | |||
1473 | unsigned long offset = (TX_PAGES<<8) + ring_offset | 1473 | unsigned long offset = (TX_PAGES<<8) + ring_offset |
1474 | - (ei_status.rx_start_page << 8); | 1474 | - (ei_status.rx_start_page << 8); |
1475 | char *buf = skb->data; | 1475 | char *buf = skb->data; |
1476 | 1476 | ||
1477 | if (offset + count > ei_status.priv) { | 1477 | if (offset + count > ei_status.priv) { |
1478 | /* We must wrap the input move. */ | 1478 | /* We must wrap the input move. */ |
1479 | int semi_count = ei_status.priv - offset; | 1479 | int semi_count = ei_status.priv - offset; |
@@ -1541,7 +1541,7 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg, | |||
1541 | info->base = NULL; link->win = NULL; | 1541 | info->base = NULL; link->win = NULL; |
1542 | goto failed; | 1542 | goto failed; |
1543 | } | 1543 | } |
1544 | 1544 | ||
1545 | ei_status.mem = info->base + offset; | 1545 | ei_status.mem = info->base + offset; |
1546 | ei_status.priv = req.Size; | 1546 | ei_status.priv = req.Size; |
1547 | dev->mem_start = (u_long)ei_status.mem; | 1547 | dev->mem_start = (u_long)ei_status.mem; |
@@ -1768,6 +1768,8 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1768 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"), | 1768 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"), |
1769 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), | 1769 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), |
1770 | PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"), | 1770 | PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"), |
1771 | PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", | ||
1772 | 0xb4be14e3, 0x43ac239b, 0x0877b627), | ||
1771 | PCMCIA_DEVICE_NULL | 1773 | PCMCIA_DEVICE_NULL |
1772 | }; | 1774 | }; |
1773 | MODULE_DEVICE_TABLE(pcmcia, pcnet_ids); | 1775 | MODULE_DEVICE_TABLE(pcmcia, pcnet_ids); |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index fa39b944bc46..cda3e53d6917 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
@@ -45,5 +45,11 @@ config CICADA_PHY | |||
45 | ---help--- | 45 | ---help--- |
46 | Currently supports the cis8204 | 46 | Currently supports the cis8204 |
47 | 47 | ||
48 | config SMSC_PHY | ||
49 | tristate "Drivers for SMSC PHYs" | ||
50 | depends on PHYLIB | ||
51 | ---help--- | ||
52 | Currently supports the LAN83C185 PHY | ||
53 | |||
48 | endmenu | 54 | endmenu |
49 | 55 | ||
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index e4116a5fbb4c..d9614134cc06 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile | |||
@@ -8,3 +8,4 @@ obj-$(CONFIG_DAVICOM_PHY) += davicom.o | |||
8 | obj-$(CONFIG_CICADA_PHY) += cicada.o | 8 | obj-$(CONFIG_CICADA_PHY) += cicada.o |
9 | obj-$(CONFIG_LXT_PHY) += lxt.o | 9 | obj-$(CONFIG_LXT_PHY) += lxt.o |
10 | obj-$(CONFIG_QSEMI_PHY) += qsemi.o | 10 | obj-$(CONFIG_QSEMI_PHY) += qsemi.o |
11 | obj-$(CONFIG_SMSC_PHY) += smsc.o | ||
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c new file mode 100644 index 000000000000..25e31fb5cb31 --- /dev/null +++ b/drivers/net/phy/smsc.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * drivers/net/phy/smsc.c | ||
3 | * | ||
4 | * Driver for SMSC PHYs | ||
5 | * | ||
6 | * Author: Herbert Valerio Riedel | ||
7 | * | ||
8 | * Copyright (c) 2006 Herbert Valerio Riedel <hvr@gnu.org> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/mii.h> | ||
21 | #include <linux/ethtool.h> | ||
22 | #include <linux/phy.h> | ||
23 | #include <linux/netdevice.h> | ||
24 | |||
25 | #define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ | ||
26 | #define MII_LAN83C185_IM 30 /* Interrupt Mask */ | ||
27 | |||
28 | #define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ | ||
29 | #define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ | ||
30 | #define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */ | ||
31 | #define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */ | ||
32 | #define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */ | ||
33 | #define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */ | ||
34 | #define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */ | ||
35 | |||
36 | #define MII_LAN83C185_ISF_INT_ALL (0x0e) | ||
37 | |||
38 | #define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ | ||
39 | (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4) | ||
40 | |||
41 | |||
42 | static int lan83c185_config_intr(struct phy_device *phydev) | ||
43 | { | ||
44 | int rc = phy_write (phydev, MII_LAN83C185_IM, | ||
45 | ((PHY_INTERRUPT_ENABLED == phydev->interrupts) | ||
46 | ? MII_LAN83C185_ISF_INT_PHYLIB_EVENTS | ||
47 | : 0)); | ||
48 | |||
49 | return rc < 0 ? rc : 0; | ||
50 | } | ||
51 | |||
52 | static int lan83c185_ack_interrupt(struct phy_device *phydev) | ||
53 | { | ||
54 | int rc = phy_read (phydev, MII_LAN83C185_ISF); | ||
55 | |||
56 | return rc < 0 ? rc : 0; | ||
57 | } | ||
58 | |||
59 | static int lan83c185_config_init(struct phy_device *phydev) | ||
60 | { | ||
61 | return lan83c185_ack_interrupt (phydev); | ||
62 | } | ||
63 | |||
64 | |||
65 | static struct phy_driver lan83c185_driver = { | ||
66 | .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */ | ||
67 | .phy_id_mask = 0xfffffff0, | ||
68 | .name = "SMSC LAN83C185", | ||
69 | |||
70 | .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | ||
71 | | SUPPORTED_Asym_Pause), | ||
72 | .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, | ||
73 | |||
74 | /* basic functions */ | ||
75 | .config_aneg = genphy_config_aneg, | ||
76 | .read_status = genphy_read_status, | ||
77 | .config_init = lan83c185_config_init, | ||
78 | |||
79 | /* IRQ related */ | ||
80 | .ack_interrupt = lan83c185_ack_interrupt, | ||
81 | .config_intr = lan83c185_config_intr, | ||
82 | |||
83 | .driver = { .owner = THIS_MODULE, } | ||
84 | }; | ||
85 | |||
86 | static int __init smsc_init(void) | ||
87 | { | ||
88 | return phy_driver_register (&lan83c185_driver); | ||
89 | } | ||
90 | |||
91 | static void __exit smsc_exit(void) | ||
92 | { | ||
93 | phy_driver_unregister (&lan83c185_driver); | ||
94 | } | ||
95 | |||
96 | MODULE_DESCRIPTION("SMSC PHY driver"); | ||
97 | MODULE_AUTHOR("Herbert Valerio Riedel"); | ||
98 | MODULE_LICENSE("GPL"); | ||
99 | |||
100 | module_init(smsc_init); | ||
101 | module_exit(smsc_exit); | ||
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 0ad3310290f1..9945cc6b8d90 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -184,6 +184,7 @@ static const struct { | |||
184 | 184 | ||
185 | static struct pci_device_id rtl8169_pci_tbl[] = { | 185 | static struct pci_device_id rtl8169_pci_tbl[] = { |
186 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), }, | 186 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), }, |
187 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), }, | ||
187 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), }, | 188 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), }, |
188 | { PCI_DEVICE(0x16ec, 0x0116), }, | 189 | { PCI_DEVICE(0x16ec, 0x0116), }, |
189 | { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, }, | 190 | { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, }, |
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h index 00179bc3437f..0ef525899566 100644 --- a/drivers/net/s2io-regs.h +++ b/drivers/net/s2io-regs.h | |||
@@ -167,6 +167,7 @@ typedef struct _XENA_dev_config { | |||
167 | u8 unused4[0x08]; | 167 | u8 unused4[0x08]; |
168 | 168 | ||
169 | u64 gpio_int_reg; | 169 | u64 gpio_int_reg; |
170 | #define GPIO_INT_REG_DP_ERR_INT BIT(0) | ||
170 | #define GPIO_INT_REG_LINK_DOWN BIT(1) | 171 | #define GPIO_INT_REG_LINK_DOWN BIT(1) |
171 | #define GPIO_INT_REG_LINK_UP BIT(2) | 172 | #define GPIO_INT_REG_LINK_UP BIT(2) |
172 | u64 gpio_int_mask; | 173 | u64 gpio_int_mask; |
@@ -187,7 +188,7 @@ typedef struct _XENA_dev_config { | |||
187 | /* PIC Control registers */ | 188 | /* PIC Control registers */ |
188 | u64 pic_control; | 189 | u64 pic_control; |
189 | #define PIC_CNTL_RX_ALARM_MAP_1 BIT(0) | 190 | #define PIC_CNTL_RX_ALARM_MAP_1 BIT(0) |
190 | #define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,4) | 191 | #define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,5) |
191 | 192 | ||
192 | u64 swapper_ctrl; | 193 | u64 swapper_ctrl; |
193 | #define SWAPPER_CTRL_PIF_R_FE BIT(0) | 194 | #define SWAPPER_CTRL_PIF_R_FE BIT(0) |
@@ -267,6 +268,21 @@ typedef struct _XENA_dev_config { | |||
267 | 268 | ||
268 | /* General Configuration */ | 269 | /* General Configuration */ |
269 | u64 mdio_control; | 270 | u64 mdio_control; |
271 | #define MDIO_MMD_INDX_ADDR(val) vBIT(val, 0, 16) | ||
272 | #define MDIO_MMD_DEV_ADDR(val) vBIT(val, 19, 5) | ||
273 | #define MDIO_MMD_PMA_DEV_ADDR 0x1 | ||
274 | #define MDIO_MMD_PMD_DEV_ADDR 0x1 | ||
275 | #define MDIO_MMD_WIS_DEV_ADDR 0x2 | ||
276 | #define MDIO_MMD_PCS_DEV_ADDR 0x3 | ||
277 | #define MDIO_MMD_PHYXS_DEV_ADDR 0x4 | ||
278 | #define MDIO_MMS_PRT_ADDR(val) vBIT(val, 27, 5) | ||
279 | #define MDIO_CTRL_START_TRANS(val) vBIT(val, 56, 4) | ||
280 | #define MDIO_OP(val) vBIT(val, 60, 2) | ||
281 | #define MDIO_OP_ADDR_TRANS 0x0 | ||
282 | #define MDIO_OP_WRITE_TRANS 0x1 | ||
283 | #define MDIO_OP_READ_POST_INC_TRANS 0x2 | ||
284 | #define MDIO_OP_READ_TRANS 0x3 | ||
285 | #define MDIO_MDIO_DATA(val) vBIT(val, 32, 16) | ||
270 | 286 | ||
271 | u64 dtx_control; | 287 | u64 dtx_control; |
272 | 288 | ||
@@ -284,9 +300,13 @@ typedef struct _XENA_dev_config { | |||
284 | u64 gpio_control; | 300 | u64 gpio_control; |
285 | #define GPIO_CTRL_GPIO_0 BIT(8) | 301 | #define GPIO_CTRL_GPIO_0 BIT(8) |
286 | u64 misc_control; | 302 | u64 misc_control; |
303 | #define EXT_REQ_EN BIT(1) | ||
287 | #define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3) | 304 | #define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3) |
288 | 305 | ||
289 | u8 unused7_1[0x240 - 0x208]; | 306 | u8 unused7_1[0x230 - 0x208]; |
307 | |||
308 | u64 pic_control2; | ||
309 | u64 ini_dperr_ctrl; | ||
290 | 310 | ||
291 | u64 wreq_split_mask; | 311 | u64 wreq_split_mask; |
292 | #define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12) | 312 | #define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12) |
@@ -493,6 +513,7 @@ typedef struct _XENA_dev_config { | |||
493 | #define PRC_CTRL_NO_SNOOP_DESC BIT(22) | 513 | #define PRC_CTRL_NO_SNOOP_DESC BIT(22) |
494 | #define PRC_CTRL_NO_SNOOP_BUFF BIT(23) | 514 | #define PRC_CTRL_NO_SNOOP_BUFF BIT(23) |
495 | #define PRC_CTRL_BIMODAL_INTERRUPT BIT(37) | 515 | #define PRC_CTRL_BIMODAL_INTERRUPT BIT(37) |
516 | #define PRC_CTRL_GROUP_READS BIT(38) | ||
496 | #define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24) | 517 | #define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24) |
497 | 518 | ||
498 | u64 prc_alarm_action; | 519 | u64 prc_alarm_action; |
@@ -541,7 +562,12 @@ typedef struct _XENA_dev_config { | |||
541 | #define RX_PA_CFG_IGNORE_LLC_CTRL BIT(3) | 562 | #define RX_PA_CFG_IGNORE_LLC_CTRL BIT(3) |
542 | #define RX_PA_CFG_IGNORE_L2_ERR BIT(6) | 563 | #define RX_PA_CFG_IGNORE_L2_ERR BIT(6) |
543 | 564 | ||
544 | u8 unused12[0x700 - 0x1D8]; | 565 | u64 unused_11_1; |
566 | |||
567 | u64 ring_bump_counter1; | ||
568 | u64 ring_bump_counter2; | ||
569 | |||
570 | u8 unused12[0x700 - 0x1F0]; | ||
545 | 571 | ||
546 | u64 rxdma_debug_ctrl; | 572 | u64 rxdma_debug_ctrl; |
547 | 573 | ||
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 79208f434ac1..cac9fdd2e1d5 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -26,15 +26,22 @@ | |||
26 | * | 26 | * |
27 | * The module loadable parameters that are supported by the driver and a brief | 27 | * The module loadable parameters that are supported by the driver and a brief |
28 | * explaination of all the variables. | 28 | * explaination of all the variables. |
29 | * | ||
29 | * rx_ring_num : This can be used to program the number of receive rings used | 30 | * rx_ring_num : This can be used to program the number of receive rings used |
30 | * in the driver. | 31 | * in the driver. |
31 | * rx_ring_sz: This defines the number of descriptors each ring can have. This | 32 | * rx_ring_sz: This defines the number of receive blocks each ring can have. |
32 | * is also an array of size 8. | 33 | * This is also an array of size 8. |
33 | * rx_ring_mode: This defines the operation mode of all 8 rings. The valid | 34 | * rx_ring_mode: This defines the operation mode of all 8 rings. The valid |
34 | * values are 1, 2 and 3. | 35 | * values are 1, 2 and 3. |
35 | * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. | 36 | * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. |
36 | * tx_fifo_len: This too is an array of 8. Each element defines the number of | 37 | * tx_fifo_len: This too is an array of 8. Each element defines the number of |
37 | * Tx descriptors that can be associated with each corresponding FIFO. | 38 | * Tx descriptors that can be associated with each corresponding FIFO. |
39 | * intr_type: This defines the type of interrupt. The values can be 0(INTA), | ||
40 | * 1(MSI), 2(MSI_X). Default value is '0(INTA)' | ||
41 | * lro: Specifies whether to enable Large Receive Offload (LRO) or not. | ||
42 | * Possible values '1' for enable '0' for disable. Default is '0' | ||
43 | * lro_max_pkts: This parameter defines maximum number of packets can be | ||
44 | * aggregated as a single large packet | ||
38 | ************************************************************************/ | 45 | ************************************************************************/ |
39 | 46 | ||
40 | #include <linux/config.h> | 47 | #include <linux/config.h> |
@@ -70,7 +77,7 @@ | |||
70 | #include "s2io.h" | 77 | #include "s2io.h" |
71 | #include "s2io-regs.h" | 78 | #include "s2io-regs.h" |
72 | 79 | ||
73 | #define DRV_VERSION "2.0.11.2" | 80 | #define DRV_VERSION "2.0.14.2" |
74 | 81 | ||
75 | /* S2io Driver name & version. */ | 82 | /* S2io Driver name & version. */ |
76 | static char s2io_driver_name[] = "Neterion"; | 83 | static char s2io_driver_name[] = "Neterion"; |
@@ -106,18 +113,14 @@ static inline int RXD_IS_UP2DT(RxD_t *rxdp) | |||
106 | #define LOW 2 | 113 | #define LOW 2 |
107 | static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring) | 114 | static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring) |
108 | { | 115 | { |
109 | int level = 0; | ||
110 | mac_info_t *mac_control; | 116 | mac_info_t *mac_control; |
111 | 117 | ||
112 | mac_control = &sp->mac_control; | 118 | mac_control = &sp->mac_control; |
113 | if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) { | 119 | if (rxb_size <= rxd_count[sp->rxd_mode]) |
114 | level = LOW; | 120 | return PANIC; |
115 | if (rxb_size <= rxd_count[sp->rxd_mode]) { | 121 | else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) |
116 | level = PANIC; | 122 | return LOW; |
117 | } | 123 | return 0; |
118 | } | ||
119 | |||
120 | return level; | ||
121 | } | 124 | } |
122 | 125 | ||
123 | /* Ethtool related variables and Macros. */ | 126 | /* Ethtool related variables and Macros. */ |
@@ -136,7 +139,11 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = { | |||
136 | {"tmac_mcst_frms"}, | 139 | {"tmac_mcst_frms"}, |
137 | {"tmac_bcst_frms"}, | 140 | {"tmac_bcst_frms"}, |
138 | {"tmac_pause_ctrl_frms"}, | 141 | {"tmac_pause_ctrl_frms"}, |
142 | {"tmac_ttl_octets"}, | ||
143 | {"tmac_ucst_frms"}, | ||
144 | {"tmac_nucst_frms"}, | ||
139 | {"tmac_any_err_frms"}, | 145 | {"tmac_any_err_frms"}, |
146 | {"tmac_ttl_less_fb_octets"}, | ||
140 | {"tmac_vld_ip_octets"}, | 147 | {"tmac_vld_ip_octets"}, |
141 | {"tmac_vld_ip"}, | 148 | {"tmac_vld_ip"}, |
142 | {"tmac_drop_ip"}, | 149 | {"tmac_drop_ip"}, |
@@ -151,13 +158,27 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = { | |||
151 | {"rmac_vld_mcst_frms"}, | 158 | {"rmac_vld_mcst_frms"}, |
152 | {"rmac_vld_bcst_frms"}, | 159 | {"rmac_vld_bcst_frms"}, |
153 | {"rmac_in_rng_len_err_frms"}, | 160 | {"rmac_in_rng_len_err_frms"}, |
161 | {"rmac_out_rng_len_err_frms"}, | ||
154 | {"rmac_long_frms"}, | 162 | {"rmac_long_frms"}, |
155 | {"rmac_pause_ctrl_frms"}, | 163 | {"rmac_pause_ctrl_frms"}, |
164 | {"rmac_unsup_ctrl_frms"}, | ||
165 | {"rmac_ttl_octets"}, | ||
166 | {"rmac_accepted_ucst_frms"}, | ||
167 | {"rmac_accepted_nucst_frms"}, | ||
156 | {"rmac_discarded_frms"}, | 168 | {"rmac_discarded_frms"}, |
169 | {"rmac_drop_events"}, | ||
170 | {"rmac_ttl_less_fb_octets"}, | ||
171 | {"rmac_ttl_frms"}, | ||
157 | {"rmac_usized_frms"}, | 172 | {"rmac_usized_frms"}, |
158 | {"rmac_osized_frms"}, | 173 | {"rmac_osized_frms"}, |
159 | {"rmac_frag_frms"}, | 174 | {"rmac_frag_frms"}, |
160 | {"rmac_jabber_frms"}, | 175 | {"rmac_jabber_frms"}, |
176 | {"rmac_ttl_64_frms"}, | ||
177 | {"rmac_ttl_65_127_frms"}, | ||
178 | {"rmac_ttl_128_255_frms"}, | ||
179 | {"rmac_ttl_256_511_frms"}, | ||
180 | {"rmac_ttl_512_1023_frms"}, | ||
181 | {"rmac_ttl_1024_1518_frms"}, | ||
161 | {"rmac_ip"}, | 182 | {"rmac_ip"}, |
162 | {"rmac_ip_octets"}, | 183 | {"rmac_ip_octets"}, |
163 | {"rmac_hdr_err_ip"}, | 184 | {"rmac_hdr_err_ip"}, |
@@ -166,12 +187,82 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = { | |||
166 | {"rmac_tcp"}, | 187 | {"rmac_tcp"}, |
167 | {"rmac_udp"}, | 188 | {"rmac_udp"}, |
168 | {"rmac_err_drp_udp"}, | 189 | {"rmac_err_drp_udp"}, |
190 | {"rmac_xgmii_err_sym"}, | ||
191 | {"rmac_frms_q0"}, | ||
192 | {"rmac_frms_q1"}, | ||
193 | {"rmac_frms_q2"}, | ||
194 | {"rmac_frms_q3"}, | ||
195 | {"rmac_frms_q4"}, | ||
196 | {"rmac_frms_q5"}, | ||
197 | {"rmac_frms_q6"}, | ||
198 | {"rmac_frms_q7"}, | ||
199 | {"rmac_full_q0"}, | ||
200 | {"rmac_full_q1"}, | ||
201 | {"rmac_full_q2"}, | ||
202 | {"rmac_full_q3"}, | ||
203 | {"rmac_full_q4"}, | ||
204 | {"rmac_full_q5"}, | ||
205 | {"rmac_full_q6"}, | ||
206 | {"rmac_full_q7"}, | ||
169 | {"rmac_pause_cnt"}, | 207 | {"rmac_pause_cnt"}, |
208 | {"rmac_xgmii_data_err_cnt"}, | ||
209 | {"rmac_xgmii_ctrl_err_cnt"}, | ||
170 | {"rmac_accepted_ip"}, | 210 | {"rmac_accepted_ip"}, |
171 | {"rmac_err_tcp"}, | 211 | {"rmac_err_tcp"}, |
212 | {"rd_req_cnt"}, | ||
213 | {"new_rd_req_cnt"}, | ||
214 | {"new_rd_req_rtry_cnt"}, | ||
215 | {"rd_rtry_cnt"}, | ||
216 | {"wr_rtry_rd_ack_cnt"}, | ||
217 | {"wr_req_cnt"}, | ||
218 | {"new_wr_req_cnt"}, | ||
219 | {"new_wr_req_rtry_cnt"}, | ||
220 | {"wr_rtry_cnt"}, | ||
221 | {"wr_disc_cnt"}, | ||
222 | {"rd_rtry_wr_ack_cnt"}, | ||
223 | {"txp_wr_cnt"}, | ||
224 | {"txd_rd_cnt"}, | ||
225 | {"txd_wr_cnt"}, | ||
226 | {"rxd_rd_cnt"}, | ||
227 | {"rxd_wr_cnt"}, | ||
228 | {"txf_rd_cnt"}, | ||
229 | {"rxf_wr_cnt"}, | ||
230 | {"rmac_ttl_1519_4095_frms"}, | ||
231 | {"rmac_ttl_4096_8191_frms"}, | ||
232 | {"rmac_ttl_8192_max_frms"}, | ||
233 | {"rmac_ttl_gt_max_frms"}, | ||
234 | {"rmac_osized_alt_frms"}, | ||
235 | {"rmac_jabber_alt_frms"}, | ||
236 | {"rmac_gt_max_alt_frms"}, | ||
237 | {"rmac_vlan_frms"}, | ||
238 | {"rmac_len_discard"}, | ||
239 | {"rmac_fcs_discard"}, | ||
240 | {"rmac_pf_discard"}, | ||
241 | {"rmac_da_discard"}, | ||
242 | {"rmac_red_discard"}, | ||
243 | {"rmac_rts_discard"}, | ||
244 | {"rmac_ingm_full_discard"}, | ||
245 | {"link_fault_cnt"}, | ||
172 | {"\n DRIVER STATISTICS"}, | 246 | {"\n DRIVER STATISTICS"}, |
173 | {"single_bit_ecc_errs"}, | 247 | {"single_bit_ecc_errs"}, |
174 | {"double_bit_ecc_errs"}, | 248 | {"double_bit_ecc_errs"}, |
249 | {"parity_err_cnt"}, | ||
250 | {"serious_err_cnt"}, | ||
251 | {"soft_reset_cnt"}, | ||
252 | {"fifo_full_cnt"}, | ||
253 | {"ring_full_cnt"}, | ||
254 | ("alarm_transceiver_temp_high"), | ||
255 | ("alarm_transceiver_temp_low"), | ||
256 | ("alarm_laser_bias_current_high"), | ||
257 | ("alarm_laser_bias_current_low"), | ||
258 | ("alarm_laser_output_power_high"), | ||
259 | ("alarm_laser_output_power_low"), | ||
260 | ("warn_transceiver_temp_high"), | ||
261 | ("warn_transceiver_temp_low"), | ||
262 | ("warn_laser_bias_current_high"), | ||
263 | ("warn_laser_bias_current_low"), | ||
264 | ("warn_laser_output_power_high"), | ||
265 | ("warn_laser_output_power_low"), | ||
175 | ("lro_aggregated_pkts"), | 266 | ("lro_aggregated_pkts"), |
176 | ("lro_flush_both_count"), | 267 | ("lro_flush_both_count"), |
177 | ("lro_out_of_sequence_pkts"), | 268 | ("lro_out_of_sequence_pkts"), |
@@ -220,9 +311,7 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) | |||
220 | * the XAUI. | 311 | * the XAUI. |
221 | */ | 312 | */ |
222 | 313 | ||
223 | #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL | ||
224 | #define END_SIGN 0x0 | 314 | #define END_SIGN 0x0 |
225 | |||
226 | static const u64 herc_act_dtx_cfg[] = { | 315 | static const u64 herc_act_dtx_cfg[] = { |
227 | /* Set address */ | 316 | /* Set address */ |
228 | 0x8000051536750000ULL, 0x80000515367500E0ULL, | 317 | 0x8000051536750000ULL, 0x80000515367500E0ULL, |
@@ -244,37 +333,19 @@ static const u64 herc_act_dtx_cfg[] = { | |||
244 | END_SIGN | 333 | END_SIGN |
245 | }; | 334 | }; |
246 | 335 | ||
247 | static const u64 xena_mdio_cfg[] = { | ||
248 | /* Reset PMA PLL */ | ||
249 | 0xC001010000000000ULL, 0xC0010100000000E0ULL, | ||
250 | 0xC0010100008000E4ULL, | ||
251 | /* Remove Reset from PMA PLL */ | ||
252 | 0xC001010000000000ULL, 0xC0010100000000E0ULL, | ||
253 | 0xC0010100000000E4ULL, | ||
254 | END_SIGN | ||
255 | }; | ||
256 | |||
257 | static const u64 xena_dtx_cfg[] = { | 336 | static const u64 xena_dtx_cfg[] = { |
337 | /* Set address */ | ||
258 | 0x8000051500000000ULL, 0x80000515000000E0ULL, | 338 | 0x8000051500000000ULL, 0x80000515000000E0ULL, |
259 | 0x80000515D93500E4ULL, 0x8001051500000000ULL, | 339 | /* Write data */ |
260 | 0x80010515000000E0ULL, 0x80010515001E00E4ULL, | 340 | 0x80000515D9350004ULL, 0x80000515D93500E4ULL, |
261 | 0x8002051500000000ULL, 0x80020515000000E0ULL, | 341 | /* Set address */ |
262 | 0x80020515F21000E4ULL, | 342 | 0x8001051500000000ULL, 0x80010515000000E0ULL, |
263 | /* Set PADLOOPBACKN */ | 343 | /* Write data */ |
264 | 0x8002051500000000ULL, 0x80020515000000E0ULL, | 344 | 0x80010515001E0004ULL, 0x80010515001E00E4ULL, |
265 | 0x80020515B20000E4ULL, 0x8003051500000000ULL, | 345 | /* Set address */ |
266 | 0x80030515000000E0ULL, 0x80030515B20000E4ULL, | ||
267 | 0x8004051500000000ULL, 0x80040515000000E0ULL, | ||
268 | 0x80040515B20000E4ULL, 0x8005051500000000ULL, | ||
269 | 0x80050515000000E0ULL, 0x80050515B20000E4ULL, | ||
270 | SWITCH_SIGN, | ||
271 | /* Remove PADLOOPBACKN */ | ||
272 | 0x8002051500000000ULL, 0x80020515000000E0ULL, | 346 | 0x8002051500000000ULL, 0x80020515000000E0ULL, |
273 | 0x80020515F20000E4ULL, 0x8003051500000000ULL, | 347 | /* Write data */ |
274 | 0x80030515000000E0ULL, 0x80030515F20000E4ULL, | 348 | 0x80020515F2100004ULL, 0x80020515F21000E4ULL, |
275 | 0x8004051500000000ULL, 0x80040515000000E0ULL, | ||
276 | 0x80040515F20000E4ULL, 0x8005051500000000ULL, | ||
277 | 0x80050515000000E0ULL, 0x80050515F20000E4ULL, | ||
278 | END_SIGN | 349 | END_SIGN |
279 | }; | 350 | }; |
280 | 351 | ||
@@ -303,15 +374,15 @@ static const u64 fix_mac[] = { | |||
303 | /* Module Loadable parameters. */ | 374 | /* Module Loadable parameters. */ |
304 | static unsigned int tx_fifo_num = 1; | 375 | static unsigned int tx_fifo_num = 1; |
305 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = | 376 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = |
306 | {[0 ...(MAX_TX_FIFOS - 1)] = 0 }; | 377 | {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; |
307 | static unsigned int rx_ring_num = 1; | 378 | static unsigned int rx_ring_num = 1; |
308 | static unsigned int rx_ring_sz[MAX_RX_RINGS] = | 379 | static unsigned int rx_ring_sz[MAX_RX_RINGS] = |
309 | {[0 ...(MAX_RX_RINGS - 1)] = 0 }; | 380 | {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; |
310 | static unsigned int rts_frm_len[MAX_RX_RINGS] = | 381 | static unsigned int rts_frm_len[MAX_RX_RINGS] = |
311 | {[0 ...(MAX_RX_RINGS - 1)] = 0 }; | 382 | {[0 ...(MAX_RX_RINGS - 1)] = 0 }; |
312 | static unsigned int rx_ring_mode = 1; | 383 | static unsigned int rx_ring_mode = 1; |
313 | static unsigned int use_continuous_tx_intrs = 1; | 384 | static unsigned int use_continuous_tx_intrs = 1; |
314 | static unsigned int rmac_pause_time = 65535; | 385 | static unsigned int rmac_pause_time = 0x100; |
315 | static unsigned int mc_pause_threshold_q0q3 = 187; | 386 | static unsigned int mc_pause_threshold_q0q3 = 187; |
316 | static unsigned int mc_pause_threshold_q4q7 = 187; | 387 | static unsigned int mc_pause_threshold_q4q7 = 187; |
317 | static unsigned int shared_splits; | 388 | static unsigned int shared_splits; |
@@ -549,11 +620,6 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
549 | rx_blocks->block_dma_addr + | 620 | rx_blocks->block_dma_addr + |
550 | (rxd_size[nic->rxd_mode] * l); | 621 | (rxd_size[nic->rxd_mode] * l); |
551 | } | 622 | } |
552 | |||
553 | mac_control->rings[i].rx_blocks[j].block_virt_addr = | ||
554 | tmp_v_addr; | ||
555 | mac_control->rings[i].rx_blocks[j].block_dma_addr = | ||
556 | tmp_p_addr; | ||
557 | } | 623 | } |
558 | /* Interlinking all Rx Blocks */ | 624 | /* Interlinking all Rx Blocks */ |
559 | for (j = 0; j < blk_cnt; j++) { | 625 | for (j = 0; j < blk_cnt; j++) { |
@@ -772,7 +838,21 @@ static int s2io_verify_pci_mode(nic_t *nic) | |||
772 | return mode; | 838 | return mode; |
773 | } | 839 | } |
774 | 840 | ||
841 | #define NEC_VENID 0x1033 | ||
842 | #define NEC_DEVID 0x0125 | ||
843 | static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev) | ||
844 | { | ||
845 | struct pci_dev *tdev = NULL; | ||
846 | while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { | ||
847 | if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){ | ||
848 | if (tdev->bus == s2io_pdev->bus->parent) | ||
849 | return 1; | ||
850 | } | ||
851 | } | ||
852 | return 0; | ||
853 | } | ||
775 | 854 | ||
855 | static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266}; | ||
776 | /** | 856 | /** |
777 | * s2io_print_pci_mode - | 857 | * s2io_print_pci_mode - |
778 | */ | 858 | */ |
@@ -789,6 +869,14 @@ static int s2io_print_pci_mode(nic_t *nic) | |||
789 | if ( val64 & PCI_MODE_UNKNOWN_MODE) | 869 | if ( val64 & PCI_MODE_UNKNOWN_MODE) |
790 | return -1; /* Unknown PCI mode */ | 870 | return -1; /* Unknown PCI mode */ |
791 | 871 | ||
872 | config->bus_speed = bus_speed[mode]; | ||
873 | |||
874 | if (s2io_on_nec_bridge(nic->pdev)) { | ||
875 | DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n", | ||
876 | nic->dev->name); | ||
877 | return mode; | ||
878 | } | ||
879 | |||
792 | if (val64 & PCI_MODE_32_BITS) { | 880 | if (val64 & PCI_MODE_32_BITS) { |
793 | DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name); | 881 | DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name); |
794 | } else { | 882 | } else { |
@@ -798,35 +886,27 @@ static int s2io_print_pci_mode(nic_t *nic) | |||
798 | switch(mode) { | 886 | switch(mode) { |
799 | case PCI_MODE_PCI_33: | 887 | case PCI_MODE_PCI_33: |
800 | DBG_PRINT(ERR_DBG, "33MHz PCI bus\n"); | 888 | DBG_PRINT(ERR_DBG, "33MHz PCI bus\n"); |
801 | config->bus_speed = 33; | ||
802 | break; | 889 | break; |
803 | case PCI_MODE_PCI_66: | 890 | case PCI_MODE_PCI_66: |
804 | DBG_PRINT(ERR_DBG, "66MHz PCI bus\n"); | 891 | DBG_PRINT(ERR_DBG, "66MHz PCI bus\n"); |
805 | config->bus_speed = 133; | ||
806 | break; | 892 | break; |
807 | case PCI_MODE_PCIX_M1_66: | 893 | case PCI_MODE_PCIX_M1_66: |
808 | DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n"); | 894 | DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n"); |
809 | config->bus_speed = 133; /* Herc doubles the clock rate */ | ||
810 | break; | 895 | break; |
811 | case PCI_MODE_PCIX_M1_100: | 896 | case PCI_MODE_PCIX_M1_100: |
812 | DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n"); | 897 | DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n"); |
813 | config->bus_speed = 200; | ||
814 | break; | 898 | break; |
815 | case PCI_MODE_PCIX_M1_133: | 899 | case PCI_MODE_PCIX_M1_133: |
816 | DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n"); | 900 | DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n"); |
817 | config->bus_speed = 266; | ||
818 | break; | 901 | break; |
819 | case PCI_MODE_PCIX_M2_66: | 902 | case PCI_MODE_PCIX_M2_66: |
820 | DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n"); | 903 | DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n"); |
821 | config->bus_speed = 133; | ||
822 | break; | 904 | break; |
823 | case PCI_MODE_PCIX_M2_100: | 905 | case PCI_MODE_PCIX_M2_100: |
824 | DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n"); | 906 | DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n"); |
825 | config->bus_speed = 200; | ||
826 | break; | 907 | break; |
827 | case PCI_MODE_PCIX_M2_133: | 908 | case PCI_MODE_PCIX_M2_133: |
828 | DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n"); | 909 | DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n"); |
829 | config->bus_speed = 266; | ||
830 | break; | 910 | break; |
831 | default: | 911 | default: |
832 | return -1; /* Unsupported bus speed */ | 912 | return -1; /* Unsupported bus speed */ |
@@ -854,7 +934,7 @@ static int init_nic(struct s2io_nic *nic) | |||
854 | int i, j; | 934 | int i, j; |
855 | mac_info_t *mac_control; | 935 | mac_info_t *mac_control; |
856 | struct config_param *config; | 936 | struct config_param *config; |
857 | int mdio_cnt = 0, dtx_cnt = 0; | 937 | int dtx_cnt = 0; |
858 | unsigned long long mem_share; | 938 | unsigned long long mem_share; |
859 | int mem_size; | 939 | int mem_size; |
860 | 940 | ||
@@ -901,20 +981,6 @@ static int init_nic(struct s2io_nic *nic) | |||
901 | val64 = dev->mtu; | 981 | val64 = dev->mtu; |
902 | writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); | 982 | writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); |
903 | 983 | ||
904 | /* | ||
905 | * Configuring the XAUI Interface of Xena. | ||
906 | * *************************************** | ||
907 | * To Configure the Xena's XAUI, one has to write a series | ||
908 | * of 64 bit values into two registers in a particular | ||
909 | * sequence. Hence a macro 'SWITCH_SIGN' has been defined | ||
910 | * which will be defined in the array of configuration values | ||
911 | * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places | ||
912 | * to switch writing from one regsiter to another. We continue | ||
913 | * writing these values until we encounter the 'END_SIGN' macro. | ||
914 | * For example, After making a series of 21 writes into | ||
915 | * dtx_control register the 'SWITCH_SIGN' appears and hence we | ||
916 | * start writing into mdio_control until we encounter END_SIGN. | ||
917 | */ | ||
918 | if (nic->device_type & XFRAME_II_DEVICE) { | 984 | if (nic->device_type & XFRAME_II_DEVICE) { |
919 | while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) { | 985 | while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) { |
920 | SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt], | 986 | SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt], |
@@ -924,35 +990,11 @@ static int init_nic(struct s2io_nic *nic) | |||
924 | dtx_cnt++; | 990 | dtx_cnt++; |
925 | } | 991 | } |
926 | } else { | 992 | } else { |
927 | while (1) { | 993 | while (xena_dtx_cfg[dtx_cnt] != END_SIGN) { |
928 | dtx_cfg: | 994 | SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt], |
929 | while (xena_dtx_cfg[dtx_cnt] != END_SIGN) { | 995 | &bar0->dtx_control, UF); |
930 | if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) { | 996 | val64 = readq(&bar0->dtx_control); |
931 | dtx_cnt++; | 997 | dtx_cnt++; |
932 | goto mdio_cfg; | ||
933 | } | ||
934 | SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt], | ||
935 | &bar0->dtx_control, UF); | ||
936 | val64 = readq(&bar0->dtx_control); | ||
937 | dtx_cnt++; | ||
938 | } | ||
939 | mdio_cfg: | ||
940 | while (xena_mdio_cfg[mdio_cnt] != END_SIGN) { | ||
941 | if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) { | ||
942 | mdio_cnt++; | ||
943 | goto dtx_cfg; | ||
944 | } | ||
945 | SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt], | ||
946 | &bar0->mdio_control, UF); | ||
947 | val64 = readq(&bar0->mdio_control); | ||
948 | mdio_cnt++; | ||
949 | } | ||
950 | if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) && | ||
951 | (xena_mdio_cfg[mdio_cnt] == END_SIGN)) { | ||
952 | break; | ||
953 | } else { | ||
954 | goto dtx_cfg; | ||
955 | } | ||
956 | } | 998 | } |
957 | } | 999 | } |
958 | 1000 | ||
@@ -994,11 +1036,6 @@ static int init_nic(struct s2io_nic *nic) | |||
994 | } | 1036 | } |
995 | } | 1037 | } |
996 | 1038 | ||
997 | /* Enable Tx FIFO partition 0. */ | ||
998 | val64 = readq(&bar0->tx_fifo_partition_0); | ||
999 | val64 |= BIT(0); /* To enable the FIFO partition. */ | ||
1000 | writeq(val64, &bar0->tx_fifo_partition_0); | ||
1001 | |||
1002 | /* | 1039 | /* |
1003 | * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug | 1040 | * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug |
1004 | * SXE-008 TRANSMIT DMA ARBITRATION ISSUE. | 1041 | * SXE-008 TRANSMIT DMA ARBITRATION ISSUE. |
@@ -1177,6 +1214,11 @@ static int init_nic(struct s2io_nic *nic) | |||
1177 | break; | 1214 | break; |
1178 | } | 1215 | } |
1179 | 1216 | ||
1217 | /* Enable Tx FIFO partition 0. */ | ||
1218 | val64 = readq(&bar0->tx_fifo_partition_0); | ||
1219 | val64 |= (TX_FIFO_PARTITION_EN); | ||
1220 | writeq(val64, &bar0->tx_fifo_partition_0); | ||
1221 | |||
1180 | /* Filling the Rx round robin registers as per the | 1222 | /* Filling the Rx round robin registers as per the |
1181 | * number of Rings and steering based on QoS. | 1223 | * number of Rings and steering based on QoS. |
1182 | */ | 1224 | */ |
@@ -1545,19 +1587,26 @@ static int init_nic(struct s2io_nic *nic) | |||
1545 | val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits); | 1587 | val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits); |
1546 | writeq(val64, &bar0->pic_control); | 1588 | writeq(val64, &bar0->pic_control); |
1547 | 1589 | ||
1590 | if (nic->config.bus_speed == 266) { | ||
1591 | writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout); | ||
1592 | writeq(0x0, &bar0->read_retry_delay); | ||
1593 | writeq(0x0, &bar0->write_retry_delay); | ||
1594 | } | ||
1595 | |||
1548 | /* | 1596 | /* |
1549 | * Programming the Herc to split every write transaction | 1597 | * Programming the Herc to split every write transaction |
1550 | * that does not start on an ADB to reduce disconnects. | 1598 | * that does not start on an ADB to reduce disconnects. |
1551 | */ | 1599 | */ |
1552 | if (nic->device_type == XFRAME_II_DEVICE) { | 1600 | if (nic->device_type == XFRAME_II_DEVICE) { |
1553 | val64 = WREQ_SPLIT_MASK_SET_MASK(255); | 1601 | val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3); |
1554 | writeq(val64, &bar0->wreq_split_mask); | ||
1555 | } | ||
1556 | |||
1557 | /* Setting Link stability period to 64 ms */ | ||
1558 | if (nic->device_type == XFRAME_II_DEVICE) { | ||
1559 | val64 = MISC_LINK_STABILITY_PRD(3); | ||
1560 | writeq(val64, &bar0->misc_control); | 1602 | writeq(val64, &bar0->misc_control); |
1603 | val64 = readq(&bar0->pic_control2); | ||
1604 | val64 &= ~(BIT(13)|BIT(14)|BIT(15)); | ||
1605 | writeq(val64, &bar0->pic_control2); | ||
1606 | } | ||
1607 | if (strstr(nic->product_name, "CX4")) { | ||
1608 | val64 = TMAC_AVG_IPG(0x17); | ||
1609 | writeq(val64, &bar0->tmac_avg_ipg); | ||
1561 | } | 1610 | } |
1562 | 1611 | ||
1563 | return SUCCESS; | 1612 | return SUCCESS; |
@@ -1948,6 +1997,10 @@ static int start_nic(struct s2io_nic *nic) | |||
1948 | val64 |= PRC_CTRL_RC_ENABLED; | 1997 | val64 |= PRC_CTRL_RC_ENABLED; |
1949 | else | 1998 | else |
1950 | val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3; | 1999 | val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3; |
2000 | if (nic->device_type == XFRAME_II_DEVICE) | ||
2001 | val64 |= PRC_CTRL_GROUP_READS; | ||
2002 | val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF); | ||
2003 | val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000); | ||
1951 | writeq(val64, &bar0->prc_ctrl_n[i]); | 2004 | writeq(val64, &bar0->prc_ctrl_n[i]); |
1952 | } | 2005 | } |
1953 | 2006 | ||
@@ -2018,6 +2071,13 @@ static int start_nic(struct s2io_nic *nic) | |||
2018 | val64 |= ADAPTER_EOI_TX_ON; | 2071 | val64 |= ADAPTER_EOI_TX_ON; |
2019 | writeq(val64, &bar0->adapter_control); | 2072 | writeq(val64, &bar0->adapter_control); |
2020 | 2073 | ||
2074 | if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) { | ||
2075 | /* | ||
2076 | * Dont see link state interrupts initally on some switches, | ||
2077 | * so directly scheduling the link state task here. | ||
2078 | */ | ||
2079 | schedule_work(&nic->set_link_task); | ||
2080 | } | ||
2021 | /* SXE-002: Initialize link and activity LED */ | 2081 | /* SXE-002: Initialize link and activity LED */ |
2022 | subid = nic->pdev->subsystem_device; | 2082 | subid = nic->pdev->subsystem_device; |
2023 | if (((subid & 0xFF) >= 0x07) && | 2083 | if (((subid & 0xFF) >= 0x07) && |
@@ -2029,12 +2089,6 @@ static int start_nic(struct s2io_nic *nic) | |||
2029 | writeq(val64, (void __iomem *)bar0 + 0x2700); | 2089 | writeq(val64, (void __iomem *)bar0 + 0x2700); |
2030 | } | 2090 | } |
2031 | 2091 | ||
2032 | /* | ||
2033 | * Don't see link state interrupts on certain switches, so | ||
2034 | * directly scheduling a link state task from here. | ||
2035 | */ | ||
2036 | schedule_work(&nic->set_link_task); | ||
2037 | |||
2038 | return SUCCESS; | 2092 | return SUCCESS; |
2039 | } | 2093 | } |
2040 | /** | 2094 | /** |
@@ -2134,7 +2188,7 @@ static void stop_nic(struct s2io_nic *nic) | |||
2134 | { | 2188 | { |
2135 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 2189 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
2136 | register u64 val64 = 0; | 2190 | register u64 val64 = 0; |
2137 | u16 interruptible, i; | 2191 | u16 interruptible; |
2138 | mac_info_t *mac_control; | 2192 | mac_info_t *mac_control; |
2139 | struct config_param *config; | 2193 | struct config_param *config; |
2140 | 2194 | ||
@@ -2147,12 +2201,10 @@ static void stop_nic(struct s2io_nic *nic) | |||
2147 | interruptible |= TX_MAC_INTR | RX_MAC_INTR; | 2201 | interruptible |= TX_MAC_INTR | RX_MAC_INTR; |
2148 | en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS); | 2202 | en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS); |
2149 | 2203 | ||
2150 | /* Disable PRCs */ | 2204 | /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */ |
2151 | for (i = 0; i < config->rx_ring_num; i++) { | 2205 | val64 = readq(&bar0->adapter_control); |
2152 | val64 = readq(&bar0->prc_ctrl_n[i]); | 2206 | val64 &= ~(ADAPTER_CNTL_EN); |
2153 | val64 &= ~((u64) PRC_CTRL_RC_ENABLED); | 2207 | writeq(val64, &bar0->adapter_control); |
2154 | writeq(val64, &bar0->prc_ctrl_n[i]); | ||
2155 | } | ||
2156 | } | 2208 | } |
2157 | 2209 | ||
2158 | static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) | 2210 | static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) |
@@ -2231,13 +2283,12 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2231 | alloc_cnt = mac_control->rings[ring_no].pkt_cnt - | 2283 | alloc_cnt = mac_control->rings[ring_no].pkt_cnt - |
2232 | atomic_read(&nic->rx_bufs_left[ring_no]); | 2284 | atomic_read(&nic->rx_bufs_left[ring_no]); |
2233 | 2285 | ||
2286 | block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index; | ||
2287 | off1 = mac_control->rings[ring_no].rx_curr_get_info.offset; | ||
2234 | while (alloc_tab < alloc_cnt) { | 2288 | while (alloc_tab < alloc_cnt) { |
2235 | block_no = mac_control->rings[ring_no].rx_curr_put_info. | 2289 | block_no = mac_control->rings[ring_no].rx_curr_put_info. |
2236 | block_index; | 2290 | block_index; |
2237 | block_no1 = mac_control->rings[ring_no].rx_curr_get_info. | ||
2238 | block_index; | ||
2239 | off = mac_control->rings[ring_no].rx_curr_put_info.offset; | 2291 | off = mac_control->rings[ring_no].rx_curr_put_info.offset; |
2240 | off1 = mac_control->rings[ring_no].rx_curr_get_info.offset; | ||
2241 | 2292 | ||
2242 | rxdp = mac_control->rings[ring_no]. | 2293 | rxdp = mac_control->rings[ring_no]. |
2243 | rx_blocks[block_no].rxds[off].virt_addr; | 2294 | rx_blocks[block_no].rxds[off].virt_addr; |
@@ -2307,9 +2358,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2307 | memset(rxdp, 0, sizeof(RxD1_t)); | 2358 | memset(rxdp, 0, sizeof(RxD1_t)); |
2308 | skb_reserve(skb, NET_IP_ALIGN); | 2359 | skb_reserve(skb, NET_IP_ALIGN); |
2309 | ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single | 2360 | ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single |
2310 | (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE); | 2361 | (nic->pdev, skb->data, size - NET_IP_ALIGN, |
2311 | rxdp->Control_2 &= (~MASK_BUFFER0_SIZE_1); | 2362 | PCI_DMA_FROMDEVICE); |
2312 | rxdp->Control_2 |= SET_BUFFER0_SIZE_1(size); | 2363 | rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); |
2313 | 2364 | ||
2314 | } else if (nic->rxd_mode >= RXD_MODE_3A) { | 2365 | } else if (nic->rxd_mode >= RXD_MODE_3A) { |
2315 | /* | 2366 | /* |
@@ -2516,7 +2567,7 @@ static int s2io_poll(struct net_device *dev, int *budget) | |||
2516 | mac_info_t *mac_control; | 2567 | mac_info_t *mac_control; |
2517 | struct config_param *config; | 2568 | struct config_param *config; |
2518 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 2569 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
2519 | u64 val64; | 2570 | u64 val64 = 0xFFFFFFFFFFFFFFFFULL; |
2520 | int i; | 2571 | int i; |
2521 | 2572 | ||
2522 | atomic_inc(&nic->isr_cnt); | 2573 | atomic_inc(&nic->isr_cnt); |
@@ -2528,8 +2579,8 @@ static int s2io_poll(struct net_device *dev, int *budget) | |||
2528 | nic->pkts_to_process = dev->quota; | 2579 | nic->pkts_to_process = dev->quota; |
2529 | org_pkts_to_process = nic->pkts_to_process; | 2580 | org_pkts_to_process = nic->pkts_to_process; |
2530 | 2581 | ||
2531 | val64 = readq(&bar0->rx_traffic_int); | ||
2532 | writeq(val64, &bar0->rx_traffic_int); | 2582 | writeq(val64, &bar0->rx_traffic_int); |
2583 | val64 = readl(&bar0->rx_traffic_int); | ||
2533 | 2584 | ||
2534 | for (i = 0; i < config->rx_ring_num; i++) { | 2585 | for (i = 0; i < config->rx_ring_num; i++) { |
2535 | rx_intr_handler(&mac_control->rings[i]); | 2586 | rx_intr_handler(&mac_control->rings[i]); |
@@ -2554,7 +2605,8 @@ static int s2io_poll(struct net_device *dev, int *budget) | |||
2554 | } | 2605 | } |
2555 | } | 2606 | } |
2556 | /* Re enable the Rx interrupts. */ | 2607 | /* Re enable the Rx interrupts. */ |
2557 | en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS); | 2608 | writeq(0x0, &bar0->rx_traffic_mask); |
2609 | val64 = readl(&bar0->rx_traffic_mask); | ||
2558 | atomic_dec(&nic->isr_cnt); | 2610 | atomic_dec(&nic->isr_cnt); |
2559 | return 0; | 2611 | return 0; |
2560 | 2612 | ||
@@ -2666,6 +2718,7 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
2666 | ((RxD3_t*)rxdp)->Buffer2_ptr, | 2718 | ((RxD3_t*)rxdp)->Buffer2_ptr, |
2667 | dev->mtu, PCI_DMA_FROMDEVICE); | 2719 | dev->mtu, PCI_DMA_FROMDEVICE); |
2668 | } | 2720 | } |
2721 | prefetch(skb->data); | ||
2669 | rx_osm_handler(ring_data, rxdp); | 2722 | rx_osm_handler(ring_data, rxdp); |
2670 | get_info.offset++; | 2723 | get_info.offset++; |
2671 | ring_data->rx_curr_get_info.offset = get_info.offset; | 2724 | ring_data->rx_curr_get_info.offset = get_info.offset; |
@@ -2737,6 +2790,10 @@ static void tx_intr_handler(fifo_info_t *fifo_data) | |||
2737 | if (txdlp->Control_1 & TXD_T_CODE) { | 2790 | if (txdlp->Control_1 & TXD_T_CODE) { |
2738 | unsigned long long err; | 2791 | unsigned long long err; |
2739 | err = txdlp->Control_1 & TXD_T_CODE; | 2792 | err = txdlp->Control_1 & TXD_T_CODE; |
2793 | if (err & 0x1) { | ||
2794 | nic->mac_control.stats_info->sw_stat. | ||
2795 | parity_err_cnt++; | ||
2796 | } | ||
2740 | if ((err >> 48) == 0xA) { | 2797 | if ((err >> 48) == 0xA) { |
2741 | DBG_PRINT(TX_DBG, "TxD returned due \ | 2798 | DBG_PRINT(TX_DBG, "TxD returned due \ |
2742 | to loss of link\n"); | 2799 | to loss of link\n"); |
@@ -2760,7 +2817,8 @@ to loss of link\n"); | |||
2760 | dev_kfree_skb_irq(skb); | 2817 | dev_kfree_skb_irq(skb); |
2761 | 2818 | ||
2762 | get_info.offset++; | 2819 | get_info.offset++; |
2763 | get_info.offset %= get_info.fifo_len + 1; | 2820 | if (get_info.offset == get_info.fifo_len + 1) |
2821 | get_info.offset = 0; | ||
2764 | txdlp = (TxD_t *) fifo_data->list_info | 2822 | txdlp = (TxD_t *) fifo_data->list_info |
2765 | [get_info.offset].list_virt_addr; | 2823 | [get_info.offset].list_virt_addr; |
2766 | fifo_data->tx_curr_get_info.offset = | 2824 | fifo_data->tx_curr_get_info.offset = |
@@ -2774,6 +2832,256 @@ to loss of link\n"); | |||
2774 | } | 2832 | } |
2775 | 2833 | ||
2776 | /** | 2834 | /** |
2835 | * s2io_mdio_write - Function to write in to MDIO registers | ||
2836 | * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS) | ||
2837 | * @addr : address value | ||
2838 | * @value : data value | ||
2839 | * @dev : pointer to net_device structure | ||
2840 | * Description: | ||
2841 | * This function is used to write values to the MDIO registers | ||
2842 | * NONE | ||
2843 | */ | ||
2844 | static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev) | ||
2845 | { | ||
2846 | u64 val64 = 0x0; | ||
2847 | nic_t *sp = dev->priv; | ||
2848 | XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0; | ||
2849 | |||
2850 | //address transaction | ||
2851 | val64 = val64 | MDIO_MMD_INDX_ADDR(addr) | ||
2852 | | MDIO_MMD_DEV_ADDR(mmd_type) | ||
2853 | | MDIO_MMS_PRT_ADDR(0x0); | ||
2854 | writeq(val64, &bar0->mdio_control); | ||
2855 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); | ||
2856 | writeq(val64, &bar0->mdio_control); | ||
2857 | udelay(100); | ||
2858 | |||
2859 | //Data transaction | ||
2860 | val64 = 0x0; | ||
2861 | val64 = val64 | MDIO_MMD_INDX_ADDR(addr) | ||
2862 | | MDIO_MMD_DEV_ADDR(mmd_type) | ||
2863 | | MDIO_MMS_PRT_ADDR(0x0) | ||
2864 | | MDIO_MDIO_DATA(value) | ||
2865 | | MDIO_OP(MDIO_OP_WRITE_TRANS); | ||
2866 | writeq(val64, &bar0->mdio_control); | ||
2867 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); | ||
2868 | writeq(val64, &bar0->mdio_control); | ||
2869 | udelay(100); | ||
2870 | |||
2871 | val64 = 0x0; | ||
2872 | val64 = val64 | MDIO_MMD_INDX_ADDR(addr) | ||
2873 | | MDIO_MMD_DEV_ADDR(mmd_type) | ||
2874 | | MDIO_MMS_PRT_ADDR(0x0) | ||
2875 | | MDIO_OP(MDIO_OP_READ_TRANS); | ||
2876 | writeq(val64, &bar0->mdio_control); | ||
2877 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); | ||
2878 | writeq(val64, &bar0->mdio_control); | ||
2879 | udelay(100); | ||
2880 | |||
2881 | } | ||
2882 | |||
2883 | /** | ||
2884 | * s2io_mdio_read - Function to write in to MDIO registers | ||
2885 | * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS) | ||
2886 | * @addr : address value | ||
2887 | * @dev : pointer to net_device structure | ||
2888 | * Description: | ||
2889 | * This function is used to read values to the MDIO registers | ||
2890 | * NONE | ||
2891 | */ | ||
2892 | static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev) | ||
2893 | { | ||
2894 | u64 val64 = 0x0; | ||
2895 | u64 rval64 = 0x0; | ||
2896 | nic_t *sp = dev->priv; | ||
2897 | XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0; | ||
2898 | |||
2899 | /* address transaction */ | ||
2900 | val64 = val64 | MDIO_MMD_INDX_ADDR(addr) | ||
2901 | | MDIO_MMD_DEV_ADDR(mmd_type) | ||
2902 | | MDIO_MMS_PRT_ADDR(0x0); | ||
2903 | writeq(val64, &bar0->mdio_control); | ||
2904 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); | ||
2905 | writeq(val64, &bar0->mdio_control); | ||
2906 | udelay(100); | ||
2907 | |||
2908 | /* Data transaction */ | ||
2909 | val64 = 0x0; | ||
2910 | val64 = val64 | MDIO_MMD_INDX_ADDR(addr) | ||
2911 | | MDIO_MMD_DEV_ADDR(mmd_type) | ||
2912 | | MDIO_MMS_PRT_ADDR(0x0) | ||
2913 | | MDIO_OP(MDIO_OP_READ_TRANS); | ||
2914 | writeq(val64, &bar0->mdio_control); | ||
2915 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); | ||
2916 | writeq(val64, &bar0->mdio_control); | ||
2917 | udelay(100); | ||
2918 | |||
2919 | /* Read the value from regs */ | ||
2920 | rval64 = readq(&bar0->mdio_control); | ||
2921 | rval64 = rval64 & 0xFFFF0000; | ||
2922 | rval64 = rval64 >> 16; | ||
2923 | return rval64; | ||
2924 | } | ||
2925 | /** | ||
2926 | * s2io_chk_xpak_counter - Function to check the status of the xpak counters | ||
2927 | * @counter : couter value to be updated | ||
2928 | * @flag : flag to indicate the status | ||
2929 | * @type : counter type | ||
2930 | * Description: | ||
2931 | * This function is to check the status of the xpak counters value | ||
2932 | * NONE | ||
2933 | */ | ||
2934 | |||
2935 | static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type) | ||
2936 | { | ||
2937 | u64 mask = 0x3; | ||
2938 | u64 val64; | ||
2939 | int i; | ||
2940 | for(i = 0; i <index; i++) | ||
2941 | mask = mask << 0x2; | ||
2942 | |||
2943 | if(flag > 0) | ||
2944 | { | ||
2945 | *counter = *counter + 1; | ||
2946 | val64 = *regs_stat & mask; | ||
2947 | val64 = val64 >> (index * 0x2); | ||
2948 | val64 = val64 + 1; | ||
2949 | if(val64 == 3) | ||
2950 | { | ||
2951 | switch(type) | ||
2952 | { | ||
2953 | case 1: | ||
2954 | DBG_PRINT(ERR_DBG, "Take Xframe NIC out of " | ||
2955 | "service. Excessive temperatures may " | ||
2956 | "result in premature transceiver " | ||
2957 | "failure \n"); | ||
2958 | break; | ||
2959 | case 2: | ||
2960 | DBG_PRINT(ERR_DBG, "Take Xframe NIC out of " | ||
2961 | "service Excessive bias currents may " | ||
2962 | "indicate imminent laser diode " | ||
2963 | "failure \n"); | ||
2964 | break; | ||
2965 | case 3: | ||
2966 | DBG_PRINT(ERR_DBG, "Take Xframe NIC out of " | ||
2967 | "service Excessive laser output " | ||
2968 | "power may saturate far-end " | ||
2969 | "receiver\n"); | ||
2970 | break; | ||
2971 | default: | ||
2972 | DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm " | ||
2973 | "type \n"); | ||
2974 | } | ||
2975 | val64 = 0x0; | ||
2976 | } | ||
2977 | val64 = val64 << (index * 0x2); | ||
2978 | *regs_stat = (*regs_stat & (~mask)) | (val64); | ||
2979 | |||
2980 | } else { | ||
2981 | *regs_stat = *regs_stat & (~mask); | ||
2982 | } | ||
2983 | } | ||
2984 | |||
2985 | /** | ||
2986 | * s2io_updt_xpak_counter - Function to update the xpak counters | ||
2987 | * @dev : pointer to net_device struct | ||
2988 | * Description: | ||
2989 | * This function is to upate the status of the xpak counters value | ||
2990 | * NONE | ||
2991 | */ | ||
2992 | static void s2io_updt_xpak_counter(struct net_device *dev) | ||
2993 | { | ||
2994 | u16 flag = 0x0; | ||
2995 | u16 type = 0x0; | ||
2996 | u16 val16 = 0x0; | ||
2997 | u64 val64 = 0x0; | ||
2998 | u64 addr = 0x0; | ||
2999 | |||
3000 | nic_t *sp = dev->priv; | ||
3001 | StatInfo_t *stat_info = sp->mac_control.stats_info; | ||
3002 | |||
3003 | /* Check the communication with the MDIO slave */ | ||
3004 | addr = 0x0000; | ||
3005 | val64 = 0x0; | ||
3006 | val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev); | ||
3007 | if((val64 == 0xFFFF) || (val64 == 0x0000)) | ||
3008 | { | ||
3009 | DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - " | ||
3010 | "Returned %llx\n", (unsigned long long)val64); | ||
3011 | return; | ||
3012 | } | ||
3013 | |||
3014 | /* Check for the expecte value of 2040 at PMA address 0x0000 */ | ||
3015 | if(val64 != 0x2040) | ||
3016 | { | ||
3017 | DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "); | ||
3018 | DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n", | ||
3019 | (unsigned long long)val64); | ||
3020 | return; | ||
3021 | } | ||
3022 | |||
3023 | /* Loading the DOM register to MDIO register */ | ||
3024 | addr = 0xA100; | ||
3025 | s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev); | ||
3026 | val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev); | ||
3027 | |||
3028 | /* Reading the Alarm flags */ | ||
3029 | addr = 0xA070; | ||
3030 | val64 = 0x0; | ||
3031 | val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev); | ||
3032 | |||
3033 | flag = CHECKBIT(val64, 0x7); | ||
3034 | type = 1; | ||
3035 | s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high, | ||
3036 | &stat_info->xpak_stat.xpak_regs_stat, | ||
3037 | 0x0, flag, type); | ||
3038 | |||
3039 | if(CHECKBIT(val64, 0x6)) | ||
3040 | stat_info->xpak_stat.alarm_transceiver_temp_low++; | ||
3041 | |||
3042 | flag = CHECKBIT(val64, 0x3); | ||
3043 | type = 2; | ||
3044 | s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high, | ||
3045 | &stat_info->xpak_stat.xpak_regs_stat, | ||
3046 | 0x2, flag, type); | ||
3047 | |||
3048 | if(CHECKBIT(val64, 0x2)) | ||
3049 | stat_info->xpak_stat.alarm_laser_bias_current_low++; | ||
3050 | |||
3051 | flag = CHECKBIT(val64, 0x1); | ||
3052 | type = 3; | ||
3053 | s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high, | ||
3054 | &stat_info->xpak_stat.xpak_regs_stat, | ||
3055 | 0x4, flag, type); | ||
3056 | |||
3057 | if(CHECKBIT(val64, 0x0)) | ||
3058 | stat_info->xpak_stat.alarm_laser_output_power_low++; | ||
3059 | |||
3060 | /* Reading the Warning flags */ | ||
3061 | addr = 0xA074; | ||
3062 | val64 = 0x0; | ||
3063 | val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev); | ||
3064 | |||
3065 | if(CHECKBIT(val64, 0x7)) | ||
3066 | stat_info->xpak_stat.warn_transceiver_temp_high++; | ||
3067 | |||
3068 | if(CHECKBIT(val64, 0x6)) | ||
3069 | stat_info->xpak_stat.warn_transceiver_temp_low++; | ||
3070 | |||
3071 | if(CHECKBIT(val64, 0x3)) | ||
3072 | stat_info->xpak_stat.warn_laser_bias_current_high++; | ||
3073 | |||
3074 | if(CHECKBIT(val64, 0x2)) | ||
3075 | stat_info->xpak_stat.warn_laser_bias_current_low++; | ||
3076 | |||
3077 | if(CHECKBIT(val64, 0x1)) | ||
3078 | stat_info->xpak_stat.warn_laser_output_power_high++; | ||
3079 | |||
3080 | if(CHECKBIT(val64, 0x0)) | ||
3081 | stat_info->xpak_stat.warn_laser_output_power_low++; | ||
3082 | } | ||
3083 | |||
3084 | /** | ||
2777 | * alarm_intr_handler - Alarm Interrrupt handler | 3085 | * alarm_intr_handler - Alarm Interrrupt handler |
2778 | * @nic: device private variable | 3086 | * @nic: device private variable |
2779 | * Description: If the interrupt was neither because of Rx packet or Tx | 3087 | * Description: If the interrupt was neither because of Rx packet or Tx |
@@ -2790,6 +3098,18 @@ static void alarm_intr_handler(struct s2io_nic *nic) | |||
2790 | struct net_device *dev = (struct net_device *) nic->dev; | 3098 | struct net_device *dev = (struct net_device *) nic->dev; |
2791 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 3099 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
2792 | register u64 val64 = 0, err_reg = 0; | 3100 | register u64 val64 = 0, err_reg = 0; |
3101 | u64 cnt; | ||
3102 | int i; | ||
3103 | nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0; | ||
3104 | /* Handling the XPAK counters update */ | ||
3105 | if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) { | ||
3106 | /* waiting for an hour */ | ||
3107 | nic->mac_control.stats_info->xpak_stat.xpak_timer_count++; | ||
3108 | } else { | ||
3109 | s2io_updt_xpak_counter(dev); | ||
3110 | /* reset the count to zero */ | ||
3111 | nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0; | ||
3112 | } | ||
2793 | 3113 | ||
2794 | /* Handling link status change error Intr */ | 3114 | /* Handling link status change error Intr */ |
2795 | if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) { | 3115 | if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) { |
@@ -2816,6 +3136,8 @@ static void alarm_intr_handler(struct s2io_nic *nic) | |||
2816 | MC_ERR_REG_MIRI_ECC_DB_ERR_1)) { | 3136 | MC_ERR_REG_MIRI_ECC_DB_ERR_1)) { |
2817 | netif_stop_queue(dev); | 3137 | netif_stop_queue(dev); |
2818 | schedule_work(&nic->rst_timer_task); | 3138 | schedule_work(&nic->rst_timer_task); |
3139 | nic->mac_control.stats_info->sw_stat. | ||
3140 | soft_reset_cnt++; | ||
2819 | } | 3141 | } |
2820 | } | 3142 | } |
2821 | } else { | 3143 | } else { |
@@ -2827,11 +3149,13 @@ static void alarm_intr_handler(struct s2io_nic *nic) | |||
2827 | /* In case of a serious error, the device will be Reset. */ | 3149 | /* In case of a serious error, the device will be Reset. */ |
2828 | val64 = readq(&bar0->serr_source); | 3150 | val64 = readq(&bar0->serr_source); |
2829 | if (val64 & SERR_SOURCE_ANY) { | 3151 | if (val64 & SERR_SOURCE_ANY) { |
3152 | nic->mac_control.stats_info->sw_stat.serious_err_cnt++; | ||
2830 | DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name); | 3153 | DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name); |
2831 | DBG_PRINT(ERR_DBG, "serious error %llx!!\n", | 3154 | DBG_PRINT(ERR_DBG, "serious error %llx!!\n", |
2832 | (unsigned long long)val64); | 3155 | (unsigned long long)val64); |
2833 | netif_stop_queue(dev); | 3156 | netif_stop_queue(dev); |
2834 | schedule_work(&nic->rst_timer_task); | 3157 | schedule_work(&nic->rst_timer_task); |
3158 | nic->mac_control.stats_info->sw_stat.soft_reset_cnt++; | ||
2835 | } | 3159 | } |
2836 | 3160 | ||
2837 | /* | 3161 | /* |
@@ -2849,6 +3173,35 @@ static void alarm_intr_handler(struct s2io_nic *nic) | |||
2849 | ac = readq(&bar0->adapter_control); | 3173 | ac = readq(&bar0->adapter_control); |
2850 | schedule_work(&nic->set_link_task); | 3174 | schedule_work(&nic->set_link_task); |
2851 | } | 3175 | } |
3176 | /* Check for data parity error */ | ||
3177 | val64 = readq(&bar0->pic_int_status); | ||
3178 | if (val64 & PIC_INT_GPIO) { | ||
3179 | val64 = readq(&bar0->gpio_int_reg); | ||
3180 | if (val64 & GPIO_INT_REG_DP_ERR_INT) { | ||
3181 | nic->mac_control.stats_info->sw_stat.parity_err_cnt++; | ||
3182 | schedule_work(&nic->rst_timer_task); | ||
3183 | nic->mac_control.stats_info->sw_stat.soft_reset_cnt++; | ||
3184 | } | ||
3185 | } | ||
3186 | |||
3187 | /* Check for ring full counter */ | ||
3188 | if (nic->device_type & XFRAME_II_DEVICE) { | ||
3189 | val64 = readq(&bar0->ring_bump_counter1); | ||
3190 | for (i=0; i<4; i++) { | ||
3191 | cnt = ( val64 & vBIT(0xFFFF,(i*16),16)); | ||
3192 | cnt >>= 64 - ((i+1)*16); | ||
3193 | nic->mac_control.stats_info->sw_stat.ring_full_cnt | ||
3194 | += cnt; | ||
3195 | } | ||
3196 | |||
3197 | val64 = readq(&bar0->ring_bump_counter2); | ||
3198 | for (i=0; i<4; i++) { | ||
3199 | cnt = ( val64 & vBIT(0xFFFF,(i*16),16)); | ||
3200 | cnt >>= 64 - ((i+1)*16); | ||
3201 | nic->mac_control.stats_info->sw_stat.ring_full_cnt | ||
3202 | += cnt; | ||
3203 | } | ||
3204 | } | ||
2852 | 3205 | ||
2853 | /* Other type of interrupts are not being handled now, TODO */ | 3206 | /* Other type of interrupts are not being handled now, TODO */ |
2854 | } | 3207 | } |
@@ -2864,23 +3217,26 @@ static void alarm_intr_handler(struct s2io_nic *nic) | |||
2864 | * SUCCESS on success and FAILURE on failure. | 3217 | * SUCCESS on success and FAILURE on failure. |
2865 | */ | 3218 | */ |
2866 | 3219 | ||
2867 | static int wait_for_cmd_complete(nic_t * sp) | 3220 | static int wait_for_cmd_complete(void *addr, u64 busy_bit) |
2868 | { | 3221 | { |
2869 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | ||
2870 | int ret = FAILURE, cnt = 0; | 3222 | int ret = FAILURE, cnt = 0; |
2871 | u64 val64; | 3223 | u64 val64; |
2872 | 3224 | ||
2873 | while (TRUE) { | 3225 | while (TRUE) { |
2874 | val64 = readq(&bar0->rmac_addr_cmd_mem); | 3226 | val64 = readq(addr); |
2875 | if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) { | 3227 | if (!(val64 & busy_bit)) { |
2876 | ret = SUCCESS; | 3228 | ret = SUCCESS; |
2877 | break; | 3229 | break; |
2878 | } | 3230 | } |
2879 | msleep(50); | 3231 | |
3232 | if(in_interrupt()) | ||
3233 | mdelay(50); | ||
3234 | else | ||
3235 | msleep(50); | ||
3236 | |||
2880 | if (cnt++ > 10) | 3237 | if (cnt++ > 10) |
2881 | break; | 3238 | break; |
2882 | } | 3239 | } |
2883 | |||
2884 | return ret; | 3240 | return ret; |
2885 | } | 3241 | } |
2886 | 3242 | ||
@@ -2919,6 +3275,9 @@ static void s2io_reset(nic_t * sp) | |||
2919 | * PCI write to sw_reset register is done by this time. | 3275 | * PCI write to sw_reset register is done by this time. |
2920 | */ | 3276 | */ |
2921 | msleep(250); | 3277 | msleep(250); |
3278 | if (strstr(sp->product_name, "CX4")) { | ||
3279 | msleep(750); | ||
3280 | } | ||
2922 | 3281 | ||
2923 | /* Restore the PCI state saved during initialization. */ | 3282 | /* Restore the PCI state saved during initialization. */ |
2924 | pci_restore_state(sp->pdev); | 3283 | pci_restore_state(sp->pdev); |
@@ -3137,7 +3496,7 @@ static void restore_xmsi_data(nic_t *nic) | |||
3137 | u64 val64; | 3496 | u64 val64; |
3138 | int i; | 3497 | int i; |
3139 | 3498 | ||
3140 | for (i=0; i< MAX_REQUESTED_MSI_X; i++) { | 3499 | for (i=0; i< nic->avail_msix_vectors; i++) { |
3141 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); | 3500 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); |
3142 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); | 3501 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); |
3143 | val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); | 3502 | val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); |
@@ -3156,7 +3515,7 @@ static void store_xmsi_data(nic_t *nic) | |||
3156 | int i; | 3515 | int i; |
3157 | 3516 | ||
3158 | /* Store and display */ | 3517 | /* Store and display */ |
3159 | for (i=0; i< MAX_REQUESTED_MSI_X; i++) { | 3518 | for (i=0; i< nic->avail_msix_vectors; i++) { |
3160 | val64 = (BIT(15) | vBIT(i, 26, 6)); | 3519 | val64 = (BIT(15) | vBIT(i, 26, 6)); |
3161 | writeq(val64, &bar0->xmsi_access); | 3520 | writeq(val64, &bar0->xmsi_access); |
3162 | if (wait_for_msix_trans(nic, i)) { | 3521 | if (wait_for_msix_trans(nic, i)) { |
@@ -3284,15 +3643,24 @@ static int s2io_enable_msi_x(nic_t *nic) | |||
3284 | writeq(tx_mat, &bar0->tx_mat0_n[7]); | 3643 | writeq(tx_mat, &bar0->tx_mat0_n[7]); |
3285 | } | 3644 | } |
3286 | 3645 | ||
3646 | nic->avail_msix_vectors = 0; | ||
3287 | ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X); | 3647 | ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X); |
3648 | /* We fail init if error or we get less vectors than min required */ | ||
3649 | if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) { | ||
3650 | nic->avail_msix_vectors = ret; | ||
3651 | ret = pci_enable_msix(nic->pdev, nic->entries, ret); | ||
3652 | } | ||
3288 | if (ret) { | 3653 | if (ret) { |
3289 | DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); | 3654 | DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); |
3290 | kfree(nic->entries); | 3655 | kfree(nic->entries); |
3291 | kfree(nic->s2io_entries); | 3656 | kfree(nic->s2io_entries); |
3292 | nic->entries = NULL; | 3657 | nic->entries = NULL; |
3293 | nic->s2io_entries = NULL; | 3658 | nic->s2io_entries = NULL; |
3659 | nic->avail_msix_vectors = 0; | ||
3294 | return -ENOMEM; | 3660 | return -ENOMEM; |
3295 | } | 3661 | } |
3662 | if (!nic->avail_msix_vectors) | ||
3663 | nic->avail_msix_vectors = MAX_REQUESTED_MSI_X; | ||
3296 | 3664 | ||
3297 | /* | 3665 | /* |
3298 | * To enable MSI-X, MSI also needs to be enabled, due to a bug | 3666 | * To enable MSI-X, MSI also needs to be enabled, due to a bug |
@@ -3325,8 +3693,6 @@ static int s2io_open(struct net_device *dev) | |||
3325 | { | 3693 | { |
3326 | nic_t *sp = dev->priv; | 3694 | nic_t *sp = dev->priv; |
3327 | int err = 0; | 3695 | int err = 0; |
3328 | int i; | ||
3329 | u16 msi_control; /* Temp variable */ | ||
3330 | 3696 | ||
3331 | /* | 3697 | /* |
3332 | * Make sure you have link off by default every time | 3698 | * Make sure you have link off by default every time |
@@ -3336,11 +3702,14 @@ static int s2io_open(struct net_device *dev) | |||
3336 | sp->last_link_state = 0; | 3702 | sp->last_link_state = 0; |
3337 | 3703 | ||
3338 | /* Initialize H/W and enable interrupts */ | 3704 | /* Initialize H/W and enable interrupts */ |
3339 | if (s2io_card_up(sp)) { | 3705 | err = s2io_card_up(sp); |
3706 | if (err) { | ||
3340 | DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", | 3707 | DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", |
3341 | dev->name); | 3708 | dev->name); |
3342 | err = -ENODEV; | 3709 | if (err == -ENODEV) |
3343 | goto hw_init_failed; | 3710 | goto hw_init_failed; |
3711 | else | ||
3712 | goto hw_enable_failed; | ||
3344 | } | 3713 | } |
3345 | 3714 | ||
3346 | /* Store the values of the MSIX table in the nic_t structure */ | 3715 | /* Store the values of the MSIX table in the nic_t structure */ |
@@ -3357,6 +3726,8 @@ failed\n", dev->name); | |||
3357 | } | 3726 | } |
3358 | } | 3727 | } |
3359 | if (sp->intr_type == MSI_X) { | 3728 | if (sp->intr_type == MSI_X) { |
3729 | int i; | ||
3730 | |||
3360 | for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { | 3731 | for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { |
3361 | if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { | 3732 | if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { |
3362 | sprintf(sp->desc1, "%s:MSI-X-%d-TX", | 3733 | sprintf(sp->desc1, "%s:MSI-X-%d-TX", |
@@ -3409,24 +3780,26 @@ setting_mac_address_failed: | |||
3409 | isr_registration_failed: | 3780 | isr_registration_failed: |
3410 | del_timer_sync(&sp->alarm_timer); | 3781 | del_timer_sync(&sp->alarm_timer); |
3411 | if (sp->intr_type == MSI_X) { | 3782 | if (sp->intr_type == MSI_X) { |
3412 | if (sp->device_type == XFRAME_II_DEVICE) { | 3783 | int i; |
3413 | for (i=1; (sp->s2io_entries[i].in_use == | 3784 | u16 msi_control; /* Temp variable */ |
3414 | MSIX_REGISTERED_SUCCESS); i++) { | ||
3415 | int vector = sp->entries[i].vector; | ||
3416 | void *arg = sp->s2io_entries[i].arg; | ||
3417 | 3785 | ||
3418 | free_irq(vector, arg); | 3786 | for (i=1; (sp->s2io_entries[i].in_use == |
3419 | } | 3787 | MSIX_REGISTERED_SUCCESS); i++) { |
3420 | pci_disable_msix(sp->pdev); | 3788 | int vector = sp->entries[i].vector; |
3789 | void *arg = sp->s2io_entries[i].arg; | ||
3421 | 3790 | ||
3422 | /* Temp */ | 3791 | free_irq(vector, arg); |
3423 | pci_read_config_word(sp->pdev, 0x42, &msi_control); | ||
3424 | msi_control &= 0xFFFE; /* Disable MSI */ | ||
3425 | pci_write_config_word(sp->pdev, 0x42, msi_control); | ||
3426 | } | 3792 | } |
3793 | pci_disable_msix(sp->pdev); | ||
3794 | |||
3795 | /* Temp */ | ||
3796 | pci_read_config_word(sp->pdev, 0x42, &msi_control); | ||
3797 | msi_control &= 0xFFFE; /* Disable MSI */ | ||
3798 | pci_write_config_word(sp->pdev, 0x42, msi_control); | ||
3427 | } | 3799 | } |
3428 | else if (sp->intr_type == MSI) | 3800 | else if (sp->intr_type == MSI) |
3429 | pci_disable_msi(sp->pdev); | 3801 | pci_disable_msi(sp->pdev); |
3802 | hw_enable_failed: | ||
3430 | s2io_reset(sp); | 3803 | s2io_reset(sp); |
3431 | hw_init_failed: | 3804 | hw_init_failed: |
3432 | if (sp->intr_type == MSI_X) { | 3805 | if (sp->intr_type == MSI_X) { |
@@ -3454,35 +3827,12 @@ hw_init_failed: | |||
3454 | static int s2io_close(struct net_device *dev) | 3827 | static int s2io_close(struct net_device *dev) |
3455 | { | 3828 | { |
3456 | nic_t *sp = dev->priv; | 3829 | nic_t *sp = dev->priv; |
3457 | int i; | ||
3458 | u16 msi_control; | ||
3459 | 3830 | ||
3460 | flush_scheduled_work(); | 3831 | flush_scheduled_work(); |
3461 | netif_stop_queue(dev); | 3832 | netif_stop_queue(dev); |
3462 | /* Reset card, kill tasklet and free Tx and Rx buffers. */ | 3833 | /* Reset card, kill tasklet and free Tx and Rx buffers. */ |
3463 | s2io_card_down(sp); | 3834 | s2io_card_down(sp, 1); |
3464 | |||
3465 | if (sp->intr_type == MSI_X) { | ||
3466 | if (sp->device_type == XFRAME_II_DEVICE) { | ||
3467 | for (i=1; (sp->s2io_entries[i].in_use == | ||
3468 | MSIX_REGISTERED_SUCCESS); i++) { | ||
3469 | int vector = sp->entries[i].vector; | ||
3470 | void *arg = sp->s2io_entries[i].arg; | ||
3471 | 3835 | ||
3472 | free_irq(vector, arg); | ||
3473 | } | ||
3474 | pci_read_config_word(sp->pdev, 0x42, &msi_control); | ||
3475 | msi_control &= 0xFFFE; /* Disable MSI */ | ||
3476 | pci_write_config_word(sp->pdev, 0x42, msi_control); | ||
3477 | |||
3478 | pci_disable_msix(sp->pdev); | ||
3479 | } | ||
3480 | } | ||
3481 | else { | ||
3482 | free_irq(sp->pdev->irq, dev); | ||
3483 | if (sp->intr_type == MSI) | ||
3484 | pci_disable_msi(sp->pdev); | ||
3485 | } | ||
3486 | sp->device_close_flag = TRUE; /* Device is shut down. */ | 3836 | sp->device_close_flag = TRUE; /* Device is shut down. */ |
3487 | return 0; | 3837 | return 0; |
3488 | } | 3838 | } |
@@ -3545,7 +3895,8 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3545 | 3895 | ||
3546 | queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1; | 3896 | queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1; |
3547 | /* Avoid "put" pointer going beyond "get" pointer */ | 3897 | /* Avoid "put" pointer going beyond "get" pointer */ |
3548 | if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) { | 3898 | if (txdp->Host_Control || |
3899 | ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { | ||
3549 | DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); | 3900 | DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); |
3550 | netif_stop_queue(dev); | 3901 | netif_stop_queue(dev); |
3551 | dev_kfree_skb(skb); | 3902 | dev_kfree_skb(skb); |
@@ -3655,11 +4006,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3655 | mmiowb(); | 4006 | mmiowb(); |
3656 | 4007 | ||
3657 | put_off++; | 4008 | put_off++; |
3658 | put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1; | 4009 | if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1) |
4010 | put_off = 0; | ||
3659 | mac_control->fifos[queue].tx_curr_put_info.offset = put_off; | 4011 | mac_control->fifos[queue].tx_curr_put_info.offset = put_off; |
3660 | 4012 | ||
3661 | /* Avoid "put" pointer going beyond "get" pointer */ | 4013 | /* Avoid "put" pointer going beyond "get" pointer */ |
3662 | if (((put_off + 1) % queue_len) == get_off) { | 4014 | if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { |
4015 | sp->mac_control.stats_info->sw_stat.fifo_full_cnt++; | ||
3663 | DBG_PRINT(TX_DBG, | 4016 | DBG_PRINT(TX_DBG, |
3664 | "No free TxDs for xmit, Put: 0x%x Get:0x%x\n", | 4017 | "No free TxDs for xmit, Put: 0x%x Get:0x%x\n", |
3665 | put_off, get_off); | 4018 | put_off, get_off); |
@@ -3795,7 +4148,6 @@ s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs) | |||
3795 | atomic_dec(&sp->isr_cnt); | 4148 | atomic_dec(&sp->isr_cnt); |
3796 | return IRQ_HANDLED; | 4149 | return IRQ_HANDLED; |
3797 | } | 4150 | } |
3798 | |||
3799 | static void s2io_txpic_intr_handle(nic_t *sp) | 4151 | static void s2io_txpic_intr_handle(nic_t *sp) |
3800 | { | 4152 | { |
3801 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | 4153 | XENA_dev_config_t __iomem *bar0 = sp->bar0; |
@@ -3806,41 +4158,56 @@ static void s2io_txpic_intr_handle(nic_t *sp) | |||
3806 | val64 = readq(&bar0->gpio_int_reg); | 4158 | val64 = readq(&bar0->gpio_int_reg); |
3807 | if ((val64 & GPIO_INT_REG_LINK_DOWN) && | 4159 | if ((val64 & GPIO_INT_REG_LINK_DOWN) && |
3808 | (val64 & GPIO_INT_REG_LINK_UP)) { | 4160 | (val64 & GPIO_INT_REG_LINK_UP)) { |
4161 | /* | ||
4162 | * This is unstable state so clear both up/down | ||
4163 | * interrupt and adapter to re-evaluate the link state. | ||
4164 | */ | ||
3809 | val64 |= GPIO_INT_REG_LINK_DOWN; | 4165 | val64 |= GPIO_INT_REG_LINK_DOWN; |
3810 | val64 |= GPIO_INT_REG_LINK_UP; | 4166 | val64 |= GPIO_INT_REG_LINK_UP; |
3811 | writeq(val64, &bar0->gpio_int_reg); | 4167 | writeq(val64, &bar0->gpio_int_reg); |
3812 | goto masking; | ||
3813 | } | ||
3814 | |||
3815 | if (((sp->last_link_state == LINK_UP) && | ||
3816 | (val64 & GPIO_INT_REG_LINK_DOWN)) || | ||
3817 | ((sp->last_link_state == LINK_DOWN) && | ||
3818 | (val64 & GPIO_INT_REG_LINK_UP))) { | ||
3819 | val64 = readq(&bar0->gpio_int_mask); | 4168 | val64 = readq(&bar0->gpio_int_mask); |
3820 | val64 |= GPIO_INT_MASK_LINK_DOWN; | 4169 | val64 &= ~(GPIO_INT_MASK_LINK_UP | |
3821 | val64 |= GPIO_INT_MASK_LINK_UP; | 4170 | GPIO_INT_MASK_LINK_DOWN); |
3822 | writeq(val64, &bar0->gpio_int_mask); | 4171 | writeq(val64, &bar0->gpio_int_mask); |
3823 | s2io_set_link((unsigned long)sp); | ||
3824 | } | 4172 | } |
3825 | masking: | 4173 | else if (val64 & GPIO_INT_REG_LINK_UP) { |
3826 | if (sp->last_link_state == LINK_UP) { | 4174 | val64 = readq(&bar0->adapter_status); |
3827 | /*enable down interrupt */ | 4175 | if (verify_xena_quiescence(sp, val64, |
3828 | val64 = readq(&bar0->gpio_int_mask); | 4176 | sp->device_enabled_once)) { |
3829 | /* unmasks link down intr */ | 4177 | /* Enable Adapter */ |
3830 | val64 &= ~GPIO_INT_MASK_LINK_DOWN; | 4178 | val64 = readq(&bar0->adapter_control); |
3831 | /* masks link up intr */ | 4179 | val64 |= ADAPTER_CNTL_EN; |
3832 | val64 |= GPIO_INT_MASK_LINK_UP; | 4180 | writeq(val64, &bar0->adapter_control); |
3833 | writeq(val64, &bar0->gpio_int_mask); | 4181 | val64 |= ADAPTER_LED_ON; |
3834 | } else { | 4182 | writeq(val64, &bar0->adapter_control); |
3835 | /*enable UP Interrupt */ | 4183 | if (!sp->device_enabled_once) |
3836 | val64 = readq(&bar0->gpio_int_mask); | 4184 | sp->device_enabled_once = 1; |
3837 | /* unmasks link up interrupt */ | 4185 | |
3838 | val64 &= ~GPIO_INT_MASK_LINK_UP; | 4186 | s2io_link(sp, LINK_UP); |
3839 | /* masks link down interrupt */ | 4187 | /* |
3840 | val64 |= GPIO_INT_MASK_LINK_DOWN; | 4188 | * unmask link down interrupt and mask link-up |
3841 | writeq(val64, &bar0->gpio_int_mask); | 4189 | * intr |
4190 | */ | ||
4191 | val64 = readq(&bar0->gpio_int_mask); | ||
4192 | val64 &= ~GPIO_INT_MASK_LINK_DOWN; | ||
4193 | val64 |= GPIO_INT_MASK_LINK_UP; | ||
4194 | writeq(val64, &bar0->gpio_int_mask); | ||
4195 | |||
4196 | } | ||
4197 | }else if (val64 & GPIO_INT_REG_LINK_DOWN) { | ||
4198 | val64 = readq(&bar0->adapter_status); | ||
4199 | if (verify_xena_quiescence(sp, val64, | ||
4200 | sp->device_enabled_once)) { | ||
4201 | s2io_link(sp, LINK_DOWN); | ||
4202 | /* Link is down so unmaks link up interrupt */ | ||
4203 | val64 = readq(&bar0->gpio_int_mask); | ||
4204 | val64 &= ~GPIO_INT_MASK_LINK_UP; | ||
4205 | val64 |= GPIO_INT_MASK_LINK_DOWN; | ||
4206 | writeq(val64, &bar0->gpio_int_mask); | ||
4207 | } | ||
3842 | } | 4208 | } |
3843 | } | 4209 | } |
4210 | val64 = readq(&bar0->gpio_int_mask); | ||
3844 | } | 4211 | } |
3845 | 4212 | ||
3846 | /** | 4213 | /** |
@@ -3863,7 +4230,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) | |||
3863 | nic_t *sp = dev->priv; | 4230 | nic_t *sp = dev->priv; |
3864 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | 4231 | XENA_dev_config_t __iomem *bar0 = sp->bar0; |
3865 | int i; | 4232 | int i; |
3866 | u64 reason = 0, val64; | 4233 | u64 reason = 0, val64, org_mask; |
3867 | mac_info_t *mac_control; | 4234 | mac_info_t *mac_control; |
3868 | struct config_param *config; | 4235 | struct config_param *config; |
3869 | 4236 | ||
@@ -3887,43 +4254,41 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) | |||
3887 | return IRQ_NONE; | 4254 | return IRQ_NONE; |
3888 | } | 4255 | } |
3889 | 4256 | ||
4257 | val64 = 0xFFFFFFFFFFFFFFFFULL; | ||
4258 | /* Store current mask before masking all interrupts */ | ||
4259 | org_mask = readq(&bar0->general_int_mask); | ||
4260 | writeq(val64, &bar0->general_int_mask); | ||
4261 | |||
3890 | #ifdef CONFIG_S2IO_NAPI | 4262 | #ifdef CONFIG_S2IO_NAPI |
3891 | if (reason & GEN_INTR_RXTRAFFIC) { | 4263 | if (reason & GEN_INTR_RXTRAFFIC) { |
3892 | if (netif_rx_schedule_prep(dev)) { | 4264 | if (netif_rx_schedule_prep(dev)) { |
3893 | en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR, | 4265 | writeq(val64, &bar0->rx_traffic_mask); |
3894 | DISABLE_INTRS); | ||
3895 | __netif_rx_schedule(dev); | 4266 | __netif_rx_schedule(dev); |
3896 | } | 4267 | } |
3897 | } | 4268 | } |
3898 | #else | 4269 | #else |
3899 | /* If Intr is because of Rx Traffic */ | 4270 | /* |
3900 | if (reason & GEN_INTR_RXTRAFFIC) { | 4271 | * Rx handler is called by default, without checking for the |
3901 | /* | 4272 | * cause of interrupt. |
3902 | * rx_traffic_int reg is an R1 register, writing all 1's | 4273 | * rx_traffic_int reg is an R1 register, writing all 1's |
3903 | * will ensure that the actual interrupt causing bit get's | 4274 | * will ensure that the actual interrupt causing bit get's |
3904 | * cleared and hence a read can be avoided. | 4275 | * cleared and hence a read can be avoided. |
3905 | */ | 4276 | */ |
3906 | val64 = 0xFFFFFFFFFFFFFFFFULL; | 4277 | writeq(val64, &bar0->rx_traffic_int); |
3907 | writeq(val64, &bar0->rx_traffic_int); | 4278 | for (i = 0; i < config->rx_ring_num; i++) { |
3908 | for (i = 0; i < config->rx_ring_num; i++) { | 4279 | rx_intr_handler(&mac_control->rings[i]); |
3909 | rx_intr_handler(&mac_control->rings[i]); | ||
3910 | } | ||
3911 | } | 4280 | } |
3912 | #endif | 4281 | #endif |
3913 | 4282 | ||
3914 | /* If Intr is because of Tx Traffic */ | 4283 | /* |
3915 | if (reason & GEN_INTR_TXTRAFFIC) { | 4284 | * tx_traffic_int reg is an R1 register, writing all 1's |
3916 | /* | 4285 | * will ensure that the actual interrupt causing bit get's |
3917 | * tx_traffic_int reg is an R1 register, writing all 1's | 4286 | * cleared and hence a read can be avoided. |
3918 | * will ensure that the actual interrupt causing bit get's | 4287 | */ |
3919 | * cleared and hence a read can be avoided. | 4288 | writeq(val64, &bar0->tx_traffic_int); |
3920 | */ | ||
3921 | val64 = 0xFFFFFFFFFFFFFFFFULL; | ||
3922 | writeq(val64, &bar0->tx_traffic_int); | ||
3923 | 4289 | ||
3924 | for (i = 0; i < config->tx_fifo_num; i++) | 4290 | for (i = 0; i < config->tx_fifo_num; i++) |
3925 | tx_intr_handler(&mac_control->fifos[i]); | 4291 | tx_intr_handler(&mac_control->fifos[i]); |
3926 | } | ||
3927 | 4292 | ||
3928 | if (reason & GEN_INTR_TXPIC) | 4293 | if (reason & GEN_INTR_TXPIC) |
3929 | s2io_txpic_intr_handle(sp); | 4294 | s2io_txpic_intr_handle(sp); |
@@ -3949,6 +4314,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) | |||
3949 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | 4314 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); |
3950 | clear_bit(0, (&sp->tasklet_status)); | 4315 | clear_bit(0, (&sp->tasklet_status)); |
3951 | atomic_dec(&sp->isr_cnt); | 4316 | atomic_dec(&sp->isr_cnt); |
4317 | writeq(org_mask, &bar0->general_int_mask); | ||
3952 | return IRQ_HANDLED; | 4318 | return IRQ_HANDLED; |
3953 | } | 4319 | } |
3954 | clear_bit(0, (&sp->tasklet_status)); | 4320 | clear_bit(0, (&sp->tasklet_status)); |
@@ -3964,7 +4330,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) | |||
3964 | } | 4330 | } |
3965 | } | 4331 | } |
3966 | #endif | 4332 | #endif |
3967 | 4333 | writeq(org_mask, &bar0->general_int_mask); | |
3968 | atomic_dec(&sp->isr_cnt); | 4334 | atomic_dec(&sp->isr_cnt); |
3969 | return IRQ_HANDLED; | 4335 | return IRQ_HANDLED; |
3970 | } | 4336 | } |
@@ -4067,7 +4433,8 @@ static void s2io_set_multicast(struct net_device *dev) | |||
4067 | RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET); | 4433 | RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET); |
4068 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 4434 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
4069 | /* Wait till command completes */ | 4435 | /* Wait till command completes */ |
4070 | wait_for_cmd_complete(sp); | 4436 | wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
4437 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING); | ||
4071 | 4438 | ||
4072 | sp->m_cast_flg = 1; | 4439 | sp->m_cast_flg = 1; |
4073 | sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET; | 4440 | sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET; |
@@ -4082,7 +4449,8 @@ static void s2io_set_multicast(struct net_device *dev) | |||
4082 | RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos); | 4449 | RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos); |
4083 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 4450 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
4084 | /* Wait till command completes */ | 4451 | /* Wait till command completes */ |
4085 | wait_for_cmd_complete(sp); | 4452 | wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
4453 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING); | ||
4086 | 4454 | ||
4087 | sp->m_cast_flg = 0; | 4455 | sp->m_cast_flg = 0; |
4088 | sp->all_multi_pos = 0; | 4456 | sp->all_multi_pos = 0; |
@@ -4147,7 +4515,8 @@ static void s2io_set_multicast(struct net_device *dev) | |||
4147 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 4515 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
4148 | 4516 | ||
4149 | /* Wait for command completes */ | 4517 | /* Wait for command completes */ |
4150 | if (wait_for_cmd_complete(sp)) { | 4518 | if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
4519 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) { | ||
4151 | DBG_PRINT(ERR_DBG, "%s: Adding ", | 4520 | DBG_PRINT(ERR_DBG, "%s: Adding ", |
4152 | dev->name); | 4521 | dev->name); |
4153 | DBG_PRINT(ERR_DBG, "Multicasts failed\n"); | 4522 | DBG_PRINT(ERR_DBG, "Multicasts failed\n"); |
@@ -4177,7 +4546,8 @@ static void s2io_set_multicast(struct net_device *dev) | |||
4177 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 4546 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
4178 | 4547 | ||
4179 | /* Wait for command completes */ | 4548 | /* Wait for command completes */ |
4180 | if (wait_for_cmd_complete(sp)) { | 4549 | if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
4550 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) { | ||
4181 | DBG_PRINT(ERR_DBG, "%s: Adding ", | 4551 | DBG_PRINT(ERR_DBG, "%s: Adding ", |
4182 | dev->name); | 4552 | dev->name); |
4183 | DBG_PRINT(ERR_DBG, "Multicasts failed\n"); | 4553 | DBG_PRINT(ERR_DBG, "Multicasts failed\n"); |
@@ -4222,7 +4592,8 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr) | |||
4222 | RMAC_ADDR_CMD_MEM_OFFSET(0); | 4592 | RMAC_ADDR_CMD_MEM_OFFSET(0); |
4223 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 4593 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
4224 | /* Wait till command completes */ | 4594 | /* Wait till command completes */ |
4225 | if (wait_for_cmd_complete(sp)) { | 4595 | if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
4596 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) { | ||
4226 | DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name); | 4597 | DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name); |
4227 | return FAILURE; | 4598 | return FAILURE; |
4228 | } | 4599 | } |
@@ -4619,6 +4990,44 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt) | |||
4619 | } | 4990 | } |
4620 | return ret; | 4991 | return ret; |
4621 | } | 4992 | } |
4993 | static void s2io_vpd_read(nic_t *nic) | ||
4994 | { | ||
4995 | u8 vpd_data[256],data; | ||
4996 | int i=0, cnt, fail = 0; | ||
4997 | int vpd_addr = 0x80; | ||
4998 | |||
4999 | if (nic->device_type == XFRAME_II_DEVICE) { | ||
5000 | strcpy(nic->product_name, "Xframe II 10GbE network adapter"); | ||
5001 | vpd_addr = 0x80; | ||
5002 | } | ||
5003 | else { | ||
5004 | strcpy(nic->product_name, "Xframe I 10GbE network adapter"); | ||
5005 | vpd_addr = 0x50; | ||
5006 | } | ||
5007 | |||
5008 | for (i = 0; i < 256; i +=4 ) { | ||
5009 | pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); | ||
5010 | pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); | ||
5011 | pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0); | ||
5012 | for (cnt = 0; cnt <5; cnt++) { | ||
5013 | msleep(2); | ||
5014 | pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data); | ||
5015 | if (data == 0x80) | ||
5016 | break; | ||
5017 | } | ||
5018 | if (cnt >= 5) { | ||
5019 | DBG_PRINT(ERR_DBG, "Read of VPD data failed\n"); | ||
5020 | fail = 1; | ||
5021 | break; | ||
5022 | } | ||
5023 | pci_read_config_dword(nic->pdev, (vpd_addr + 4), | ||
5024 | (u32 *)&vpd_data[i]); | ||
5025 | } | ||
5026 | if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) { | ||
5027 | memset(nic->product_name, 0, vpd_data[1]); | ||
5028 | memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); | ||
5029 | } | ||
5030 | } | ||
4622 | 5031 | ||
4623 | /** | 5032 | /** |
4624 | * s2io_ethtool_geeprom - reads the value stored in the Eeprom. | 5033 | * s2io_ethtool_geeprom - reads the value stored in the Eeprom. |
@@ -4931,8 +5340,10 @@ static int s2io_link_test(nic_t * sp, uint64_t * data) | |||
4931 | u64 val64; | 5340 | u64 val64; |
4932 | 5341 | ||
4933 | val64 = readq(&bar0->adapter_status); | 5342 | val64 = readq(&bar0->adapter_status); |
4934 | if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT) | 5343 | if(!(LINK_IS_UP(val64))) |
4935 | *data = 1; | 5344 | *data = 1; |
5345 | else | ||
5346 | *data = 0; | ||
4936 | 5347 | ||
4937 | return 0; | 5348 | return 0; |
4938 | } | 5349 | } |
@@ -5112,7 +5523,6 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
5112 | int i = 0; | 5523 | int i = 0; |
5113 | nic_t *sp = dev->priv; | 5524 | nic_t *sp = dev->priv; |
5114 | StatInfo_t *stat_info = sp->mac_control.stats_info; | 5525 | StatInfo_t *stat_info = sp->mac_control.stats_info; |
5115 | u64 tmp; | ||
5116 | 5526 | ||
5117 | s2io_updt_stats(sp); | 5527 | s2io_updt_stats(sp); |
5118 | tmp_stats[i++] = | 5528 | tmp_stats[i++] = |
@@ -5129,9 +5539,19 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
5129 | (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 | | 5539 | (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 | |
5130 | le32_to_cpu(stat_info->tmac_bcst_frms); | 5540 | le32_to_cpu(stat_info->tmac_bcst_frms); |
5131 | tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms); | 5541 | tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms); |
5542 | tmp_stats[i++] = | ||
5543 | (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 | | ||
5544 | le32_to_cpu(stat_info->tmac_ttl_octets); | ||
5545 | tmp_stats[i++] = | ||
5546 | (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 | | ||
5547 | le32_to_cpu(stat_info->tmac_ucst_frms); | ||
5548 | tmp_stats[i++] = | ||
5549 | (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 | | ||
5550 | le32_to_cpu(stat_info->tmac_nucst_frms); | ||
5132 | tmp_stats[i++] = | 5551 | tmp_stats[i++] = |
5133 | (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 | | 5552 | (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 | |
5134 | le32_to_cpu(stat_info->tmac_any_err_frms); | 5553 | le32_to_cpu(stat_info->tmac_any_err_frms); |
5554 | tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets); | ||
5135 | tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets); | 5555 | tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets); |
5136 | tmp_stats[i++] = | 5556 | tmp_stats[i++] = |
5137 | (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 | | 5557 | (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 | |
@@ -5163,11 +5583,27 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
5163 | (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 | | 5583 | (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 | |
5164 | le32_to_cpu(stat_info->rmac_vld_bcst_frms); | 5584 | le32_to_cpu(stat_info->rmac_vld_bcst_frms); |
5165 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms); | 5585 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms); |
5586 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms); | ||
5166 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms); | 5587 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms); |
5167 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms); | 5588 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms); |
5589 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms); | ||
5590 | tmp_stats[i++] = | ||
5591 | (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 | | ||
5592 | le32_to_cpu(stat_info->rmac_ttl_octets); | ||
5593 | tmp_stats[i++] = | ||
5594 | (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow) | ||
5595 | << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms); | ||
5596 | tmp_stats[i++] = | ||
5597 | (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow) | ||
5598 | << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms); | ||
5168 | tmp_stats[i++] = | 5599 | tmp_stats[i++] = |
5169 | (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 | | 5600 | (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 | |
5170 | le32_to_cpu(stat_info->rmac_discarded_frms); | 5601 | le32_to_cpu(stat_info->rmac_discarded_frms); |
5602 | tmp_stats[i++] = | ||
5603 | (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow) | ||
5604 | << 32 | le32_to_cpu(stat_info->rmac_drop_events); | ||
5605 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets); | ||
5606 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms); | ||
5171 | tmp_stats[i++] = | 5607 | tmp_stats[i++] = |
5172 | (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 | | 5608 | (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 | |
5173 | le32_to_cpu(stat_info->rmac_usized_frms); | 5609 | le32_to_cpu(stat_info->rmac_usized_frms); |
@@ -5180,40 +5616,129 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
5180 | tmp_stats[i++] = | 5616 | tmp_stats[i++] = |
5181 | (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 | | 5617 | (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 | |
5182 | le32_to_cpu(stat_info->rmac_jabber_frms); | 5618 | le32_to_cpu(stat_info->rmac_jabber_frms); |
5183 | tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 | | 5619 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms); |
5620 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms); | ||
5621 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms); | ||
5622 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms); | ||
5623 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms); | ||
5624 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms); | ||
5625 | tmp_stats[i++] = | ||
5626 | (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 | | ||
5184 | le32_to_cpu(stat_info->rmac_ip); | 5627 | le32_to_cpu(stat_info->rmac_ip); |
5185 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets); | 5628 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets); |
5186 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip); | 5629 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip); |
5187 | tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 | | 5630 | tmp_stats[i++] = |
5631 | (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 | | ||
5188 | le32_to_cpu(stat_info->rmac_drop_ip); | 5632 | le32_to_cpu(stat_info->rmac_drop_ip); |
5189 | tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 | | 5633 | tmp_stats[i++] = |
5634 | (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 | | ||
5190 | le32_to_cpu(stat_info->rmac_icmp); | 5635 | le32_to_cpu(stat_info->rmac_icmp); |
5191 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp); | 5636 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp); |
5192 | tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 | | 5637 | tmp_stats[i++] = |
5638 | (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 | | ||
5193 | le32_to_cpu(stat_info->rmac_udp); | 5639 | le32_to_cpu(stat_info->rmac_udp); |
5194 | tmp_stats[i++] = | 5640 | tmp_stats[i++] = |
5195 | (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 | | 5641 | (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 | |
5196 | le32_to_cpu(stat_info->rmac_err_drp_udp); | 5642 | le32_to_cpu(stat_info->rmac_err_drp_udp); |
5643 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym); | ||
5644 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0); | ||
5645 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1); | ||
5646 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2); | ||
5647 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3); | ||
5648 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4); | ||
5649 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5); | ||
5650 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6); | ||
5651 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7); | ||
5652 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0); | ||
5653 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1); | ||
5654 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2); | ||
5655 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3); | ||
5656 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4); | ||
5657 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5); | ||
5658 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6); | ||
5659 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7); | ||
5197 | tmp_stats[i++] = | 5660 | tmp_stats[i++] = |
5198 | (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 | | 5661 | (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 | |
5199 | le32_to_cpu(stat_info->rmac_pause_cnt); | 5662 | le32_to_cpu(stat_info->rmac_pause_cnt); |
5663 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt); | ||
5664 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt); | ||
5200 | tmp_stats[i++] = | 5665 | tmp_stats[i++] = |
5201 | (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 | | 5666 | (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 | |
5202 | le32_to_cpu(stat_info->rmac_accepted_ip); | 5667 | le32_to_cpu(stat_info->rmac_accepted_ip); |
5203 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp); | 5668 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp); |
5669 | tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt); | ||
5670 | tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt); | ||
5671 | tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt); | ||
5672 | tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt); | ||
5673 | tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt); | ||
5674 | tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt); | ||
5675 | tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt); | ||
5676 | tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt); | ||
5677 | tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt); | ||
5678 | tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt); | ||
5679 | tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt); | ||
5680 | tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt); | ||
5681 | tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt); | ||
5682 | tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt); | ||
5683 | tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt); | ||
5684 | tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt); | ||
5685 | tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt); | ||
5686 | tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt); | ||
5687 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms); | ||
5688 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms); | ||
5689 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms); | ||
5690 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms); | ||
5691 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms); | ||
5692 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms); | ||
5693 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms); | ||
5694 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms); | ||
5695 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard); | ||
5696 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard); | ||
5697 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard); | ||
5698 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard); | ||
5699 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard); | ||
5700 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard); | ||
5701 | tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard); | ||
5702 | tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt); | ||
5204 | tmp_stats[i++] = 0; | 5703 | tmp_stats[i++] = 0; |
5205 | tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs; | 5704 | tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs; |
5206 | tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs; | 5705 | tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs; |
5706 | tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt; | ||
5707 | tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt; | ||
5708 | tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt; | ||
5709 | tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt; | ||
5710 | tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt; | ||
5711 | tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high; | ||
5712 | tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low; | ||
5713 | tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high; | ||
5714 | tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low; | ||
5715 | tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high; | ||
5716 | tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low; | ||
5717 | tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high; | ||
5718 | tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low; | ||
5719 | tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high; | ||
5720 | tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low; | ||
5721 | tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high; | ||
5722 | tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low; | ||
5207 | tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt; | 5723 | tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt; |
5208 | tmp_stats[i++] = stat_info->sw_stat.sending_both; | 5724 | tmp_stats[i++] = stat_info->sw_stat.sending_both; |
5209 | tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts; | 5725 | tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts; |
5210 | tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts; | 5726 | tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts; |
5211 | tmp = 0; | ||
5212 | if (stat_info->sw_stat.num_aggregations) { | 5727 | if (stat_info->sw_stat.num_aggregations) { |
5213 | tmp = stat_info->sw_stat.sum_avg_pkts_aggregated; | 5728 | u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated; |
5214 | do_div(tmp, stat_info->sw_stat.num_aggregations); | 5729 | int count = 0; |
5730 | /* | ||
5731 | * Since 64-bit divide does not work on all platforms, | ||
5732 | * do repeated subtraction. | ||
5733 | */ | ||
5734 | while (tmp >= stat_info->sw_stat.num_aggregations) { | ||
5735 | tmp -= stat_info->sw_stat.num_aggregations; | ||
5736 | count++; | ||
5737 | } | ||
5738 | tmp_stats[i++] = count; | ||
5215 | } | 5739 | } |
5216 | tmp_stats[i++] = tmp; | 5740 | else |
5741 | tmp_stats[i++] = 0; | ||
5217 | } | 5742 | } |
5218 | 5743 | ||
5219 | static int s2io_ethtool_get_regs_len(struct net_device *dev) | 5744 | static int s2io_ethtool_get_regs_len(struct net_device *dev) |
@@ -5351,7 +5876,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) | |||
5351 | 5876 | ||
5352 | dev->mtu = new_mtu; | 5877 | dev->mtu = new_mtu; |
5353 | if (netif_running(dev)) { | 5878 | if (netif_running(dev)) { |
5354 | s2io_card_down(sp); | 5879 | s2io_card_down(sp, 0); |
5355 | netif_stop_queue(dev); | 5880 | netif_stop_queue(dev); |
5356 | if (s2io_card_up(sp)) { | 5881 | if (s2io_card_up(sp)) { |
5357 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", | 5882 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", |
@@ -5489,12 +6014,172 @@ static void s2io_set_link(unsigned long data) | |||
5489 | clear_bit(0, &(nic->link_state)); | 6014 | clear_bit(0, &(nic->link_state)); |
5490 | } | 6015 | } |
5491 | 6016 | ||
5492 | static void s2io_card_down(nic_t * sp) | 6017 | static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba, |
6018 | struct sk_buff **skb, u64 *temp0, u64 *temp1, | ||
6019 | u64 *temp2, int size) | ||
6020 | { | ||
6021 | struct net_device *dev = sp->dev; | ||
6022 | struct sk_buff *frag_list; | ||
6023 | |||
6024 | if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) { | ||
6025 | /* allocate skb */ | ||
6026 | if (*skb) { | ||
6027 | DBG_PRINT(INFO_DBG, "SKB is not NULL\n"); | ||
6028 | /* | ||
6029 | * As Rx frame are not going to be processed, | ||
6030 | * using same mapped address for the Rxd | ||
6031 | * buffer pointer | ||
6032 | */ | ||
6033 | ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0; | ||
6034 | } else { | ||
6035 | *skb = dev_alloc_skb(size); | ||
6036 | if (!(*skb)) { | ||
6037 | DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name); | ||
6038 | DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n"); | ||
6039 | return -ENOMEM ; | ||
6040 | } | ||
6041 | /* storing the mapped addr in a temp variable | ||
6042 | * such it will be used for next rxd whose | ||
6043 | * Host Control is NULL | ||
6044 | */ | ||
6045 | ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 = | ||
6046 | pci_map_single( sp->pdev, (*skb)->data, | ||
6047 | size - NET_IP_ALIGN, | ||
6048 | PCI_DMA_FROMDEVICE); | ||
6049 | rxdp->Host_Control = (unsigned long) (*skb); | ||
6050 | } | ||
6051 | } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { | ||
6052 | /* Two buffer Mode */ | ||
6053 | if (*skb) { | ||
6054 | ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2; | ||
6055 | ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0; | ||
6056 | ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1; | ||
6057 | } else { | ||
6058 | *skb = dev_alloc_skb(size); | ||
6059 | ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 = | ||
6060 | pci_map_single(sp->pdev, (*skb)->data, | ||
6061 | dev->mtu + 4, | ||
6062 | PCI_DMA_FROMDEVICE); | ||
6063 | ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 = | ||
6064 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, | ||
6065 | PCI_DMA_FROMDEVICE); | ||
6066 | rxdp->Host_Control = (unsigned long) (*skb); | ||
6067 | |||
6068 | /* Buffer-1 will be dummy buffer not used */ | ||
6069 | ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 = | ||
6070 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, | ||
6071 | PCI_DMA_FROMDEVICE); | ||
6072 | } | ||
6073 | } else if ((rxdp->Host_Control == 0)) { | ||
6074 | /* Three buffer mode */ | ||
6075 | if (*skb) { | ||
6076 | ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0; | ||
6077 | ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1; | ||
6078 | ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2; | ||
6079 | } else { | ||
6080 | *skb = dev_alloc_skb(size); | ||
6081 | |||
6082 | ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 = | ||
6083 | pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, | ||
6084 | PCI_DMA_FROMDEVICE); | ||
6085 | /* Buffer-1 receives L3/L4 headers */ | ||
6086 | ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 = | ||
6087 | pci_map_single( sp->pdev, (*skb)->data, | ||
6088 | l3l4hdr_size + 4, | ||
6089 | PCI_DMA_FROMDEVICE); | ||
6090 | /* | ||
6091 | * skb_shinfo(skb)->frag_list will have L4 | ||
6092 | * data payload | ||
6093 | */ | ||
6094 | skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu + | ||
6095 | ALIGN_SIZE); | ||
6096 | if (skb_shinfo(*skb)->frag_list == NULL) { | ||
6097 | DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \ | ||
6098 | failed\n ", dev->name); | ||
6099 | return -ENOMEM ; | ||
6100 | } | ||
6101 | frag_list = skb_shinfo(*skb)->frag_list; | ||
6102 | frag_list->next = NULL; | ||
6103 | /* | ||
6104 | * Buffer-2 receives L4 data payload | ||
6105 | */ | ||
6106 | ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 = | ||
6107 | pci_map_single( sp->pdev, frag_list->data, | ||
6108 | dev->mtu, PCI_DMA_FROMDEVICE); | ||
6109 | } | ||
6110 | } | ||
6111 | return 0; | ||
6112 | } | ||
6113 | static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size) | ||
6114 | { | ||
6115 | struct net_device *dev = sp->dev; | ||
6116 | if (sp->rxd_mode == RXD_MODE_1) { | ||
6117 | rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN); | ||
6118 | } else if (sp->rxd_mode == RXD_MODE_3B) { | ||
6119 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | ||
6120 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); | ||
6121 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4); | ||
6122 | } else { | ||
6123 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | ||
6124 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4); | ||
6125 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu); | ||
6126 | } | ||
6127 | } | ||
6128 | |||
6129 | static int rxd_owner_bit_reset(nic_t *sp) | ||
6130 | { | ||
6131 | int i, j, k, blk_cnt = 0, size; | ||
6132 | mac_info_t * mac_control = &sp->mac_control; | ||
6133 | struct config_param *config = &sp->config; | ||
6134 | struct net_device *dev = sp->dev; | ||
6135 | RxD_t *rxdp = NULL; | ||
6136 | struct sk_buff *skb = NULL; | ||
6137 | buffAdd_t *ba = NULL; | ||
6138 | u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0; | ||
6139 | |||
6140 | /* Calculate the size based on ring mode */ | ||
6141 | size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + | ||
6142 | HEADER_802_2_SIZE + HEADER_SNAP_SIZE; | ||
6143 | if (sp->rxd_mode == RXD_MODE_1) | ||
6144 | size += NET_IP_ALIGN; | ||
6145 | else if (sp->rxd_mode == RXD_MODE_3B) | ||
6146 | size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; | ||
6147 | else | ||
6148 | size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4; | ||
6149 | |||
6150 | for (i = 0; i < config->rx_ring_num; i++) { | ||
6151 | blk_cnt = config->rx_cfg[i].num_rxd / | ||
6152 | (rxd_count[sp->rxd_mode] +1); | ||
6153 | |||
6154 | for (j = 0; j < blk_cnt; j++) { | ||
6155 | for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { | ||
6156 | rxdp = mac_control->rings[i]. | ||
6157 | rx_blocks[j].rxds[k].virt_addr; | ||
6158 | if(sp->rxd_mode >= RXD_MODE_3A) | ||
6159 | ba = &mac_control->rings[i].ba[j][k]; | ||
6160 | set_rxd_buffer_pointer(sp, rxdp, ba, | ||
6161 | &skb,(u64 *)&temp0_64, | ||
6162 | (u64 *)&temp1_64, | ||
6163 | (u64 *)&temp2_64, size); | ||
6164 | |||
6165 | set_rxd_buffer_size(sp, rxdp, size); | ||
6166 | wmb(); | ||
6167 | /* flip the Ownership bit to Hardware */ | ||
6168 | rxdp->Control_1 |= RXD_OWN_XENA; | ||
6169 | } | ||
6170 | } | ||
6171 | } | ||
6172 | return 0; | ||
6173 | |||
6174 | } | ||
6175 | |||
6176 | static void s2io_card_down(nic_t * sp, int flag) | ||
5493 | { | 6177 | { |
5494 | int cnt = 0; | 6178 | int cnt = 0; |
5495 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | 6179 | XENA_dev_config_t __iomem *bar0 = sp->bar0; |
5496 | unsigned long flags; | 6180 | unsigned long flags; |
5497 | register u64 val64 = 0; | 6181 | register u64 val64 = 0; |
6182 | struct net_device *dev = sp->dev; | ||
5498 | 6183 | ||
5499 | del_timer_sync(&sp->alarm_timer); | 6184 | del_timer_sync(&sp->alarm_timer); |
5500 | /* If s2io_set_link task is executing, wait till it completes. */ | 6185 | /* If s2io_set_link task is executing, wait till it completes. */ |
@@ -5505,12 +6190,51 @@ static void s2io_card_down(nic_t * sp) | |||
5505 | 6190 | ||
5506 | /* disable Tx and Rx traffic on the NIC */ | 6191 | /* disable Tx and Rx traffic on the NIC */ |
5507 | stop_nic(sp); | 6192 | stop_nic(sp); |
6193 | if (flag) { | ||
6194 | if (sp->intr_type == MSI_X) { | ||
6195 | int i; | ||
6196 | u16 msi_control; | ||
6197 | |||
6198 | for (i=1; (sp->s2io_entries[i].in_use == | ||
6199 | MSIX_REGISTERED_SUCCESS); i++) { | ||
6200 | int vector = sp->entries[i].vector; | ||
6201 | void *arg = sp->s2io_entries[i].arg; | ||
6202 | |||
6203 | free_irq(vector, arg); | ||
6204 | } | ||
6205 | pci_read_config_word(sp->pdev, 0x42, &msi_control); | ||
6206 | msi_control &= 0xFFFE; /* Disable MSI */ | ||
6207 | pci_write_config_word(sp->pdev, 0x42, msi_control); | ||
6208 | pci_disable_msix(sp->pdev); | ||
6209 | } else { | ||
6210 | free_irq(sp->pdev->irq, dev); | ||
6211 | if (sp->intr_type == MSI) | ||
6212 | pci_disable_msi(sp->pdev); | ||
6213 | } | ||
6214 | } | ||
6215 | /* Waiting till all Interrupt handlers are complete */ | ||
6216 | cnt = 0; | ||
6217 | do { | ||
6218 | msleep(10); | ||
6219 | if (!atomic_read(&sp->isr_cnt)) | ||
6220 | break; | ||
6221 | cnt++; | ||
6222 | } while(cnt < 5); | ||
5508 | 6223 | ||
5509 | /* Kill tasklet. */ | 6224 | /* Kill tasklet. */ |
5510 | tasklet_kill(&sp->task); | 6225 | tasklet_kill(&sp->task); |
5511 | 6226 | ||
5512 | /* Check if the device is Quiescent and then Reset the NIC */ | 6227 | /* Check if the device is Quiescent and then Reset the NIC */ |
5513 | do { | 6228 | do { |
6229 | /* As per the HW requirement we need to replenish the | ||
6230 | * receive buffer to avoid the ring bump. Since there is | ||
6231 | * no intention of processing the Rx frame at this pointwe are | ||
6232 | * just settting the ownership bit of rxd in Each Rx | ||
6233 | * ring to HW and set the appropriate buffer size | ||
6234 | * based on the ring mode | ||
6235 | */ | ||
6236 | rxd_owner_bit_reset(sp); | ||
6237 | |||
5514 | val64 = readq(&bar0->adapter_status); | 6238 | val64 = readq(&bar0->adapter_status); |
5515 | if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) { | 6239 | if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) { |
5516 | break; | 6240 | break; |
@@ -5528,15 +6252,6 @@ static void s2io_card_down(nic_t * sp) | |||
5528 | } while (1); | 6252 | } while (1); |
5529 | s2io_reset(sp); | 6253 | s2io_reset(sp); |
5530 | 6254 | ||
5531 | /* Waiting till all Interrupt handlers are complete */ | ||
5532 | cnt = 0; | ||
5533 | do { | ||
5534 | msleep(10); | ||
5535 | if (!atomic_read(&sp->isr_cnt)) | ||
5536 | break; | ||
5537 | cnt++; | ||
5538 | } while(cnt < 5); | ||
5539 | |||
5540 | spin_lock_irqsave(&sp->tx_lock, flags); | 6255 | spin_lock_irqsave(&sp->tx_lock, flags); |
5541 | /* Free all Tx buffers */ | 6256 | /* Free all Tx buffers */ |
5542 | free_tx_buffers(sp); | 6257 | free_tx_buffers(sp); |
@@ -5637,7 +6352,7 @@ static void s2io_restart_nic(unsigned long data) | |||
5637 | struct net_device *dev = (struct net_device *) data; | 6352 | struct net_device *dev = (struct net_device *) data; |
5638 | nic_t *sp = dev->priv; | 6353 | nic_t *sp = dev->priv; |
5639 | 6354 | ||
5640 | s2io_card_down(sp); | 6355 | s2io_card_down(sp, 0); |
5641 | if (s2io_card_up(sp)) { | 6356 | if (s2io_card_up(sp)) { |
5642 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", | 6357 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", |
5643 | dev->name); | 6358 | dev->name); |
@@ -5667,6 +6382,7 @@ static void s2io_tx_watchdog(struct net_device *dev) | |||
5667 | 6382 | ||
5668 | if (netif_carrier_ok(dev)) { | 6383 | if (netif_carrier_ok(dev)) { |
5669 | schedule_work(&sp->rst_timer_task); | 6384 | schedule_work(&sp->rst_timer_task); |
6385 | sp->mac_control.stats_info->sw_stat.soft_reset_cnt++; | ||
5670 | } | 6386 | } |
5671 | } | 6387 | } |
5672 | 6388 | ||
@@ -5695,18 +6411,33 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5695 | ((unsigned long) rxdp->Host_Control); | 6411 | ((unsigned long) rxdp->Host_Control); |
5696 | int ring_no = ring_data->ring_no; | 6412 | int ring_no = ring_data->ring_no; |
5697 | u16 l3_csum, l4_csum; | 6413 | u16 l3_csum, l4_csum; |
6414 | unsigned long long err = rxdp->Control_1 & RXD_T_CODE; | ||
5698 | lro_t *lro; | 6415 | lro_t *lro; |
5699 | 6416 | ||
5700 | skb->dev = dev; | 6417 | skb->dev = dev; |
5701 | if (rxdp->Control_1 & RXD_T_CODE) { | 6418 | |
5702 | unsigned long long err = rxdp->Control_1 & RXD_T_CODE; | 6419 | if (err) { |
5703 | DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n", | 6420 | /* Check for parity error */ |
5704 | dev->name, err); | 6421 | if (err & 0x1) { |
5705 | dev_kfree_skb(skb); | 6422 | sp->mac_control.stats_info->sw_stat.parity_err_cnt++; |
5706 | sp->stats.rx_crc_errors++; | 6423 | } |
5707 | atomic_dec(&sp->rx_bufs_left[ring_no]); | 6424 | |
5708 | rxdp->Host_Control = 0; | 6425 | /* |
5709 | return 0; | 6426 | * Drop the packet if bad transfer code. Exception being |
6427 | * 0x5, which could be due to unsupported IPv6 extension header. | ||
6428 | * In this case, we let stack handle the packet. | ||
6429 | * Note that in this case, since checksum will be incorrect, | ||
6430 | * stack will validate the same. | ||
6431 | */ | ||
6432 | if (err && ((err >> 48) != 0x5)) { | ||
6433 | DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n", | ||
6434 | dev->name, err); | ||
6435 | sp->stats.rx_crc_errors++; | ||
6436 | dev_kfree_skb(skb); | ||
6437 | atomic_dec(&sp->rx_bufs_left[ring_no]); | ||
6438 | rxdp->Host_Control = 0; | ||
6439 | return 0; | ||
6440 | } | ||
5710 | } | 6441 | } |
5711 | 6442 | ||
5712 | /* Updating statistics */ | 6443 | /* Updating statistics */ |
@@ -5792,6 +6523,9 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
5792 | clear_lro_session(lro); | 6523 | clear_lro_session(lro); |
5793 | goto send_up; | 6524 | goto send_up; |
5794 | case 0: /* sessions exceeded */ | 6525 | case 0: /* sessions exceeded */ |
6526 | case -1: /* non-TCP or not | ||
6527 | * L2 aggregatable | ||
6528 | */ | ||
5795 | case 5: /* | 6529 | case 5: /* |
5796 | * First pkt in session not | 6530 | * First pkt in session not |
5797 | * L3/L4 aggregatable | 6531 | * L3/L4 aggregatable |
@@ -5918,13 +6652,6 @@ static void s2io_init_pci(nic_t * sp) | |||
5918 | pci_write_config_word(sp->pdev, PCI_COMMAND, | 6652 | pci_write_config_word(sp->pdev, PCI_COMMAND, |
5919 | (pci_cmd | PCI_COMMAND_PARITY)); | 6653 | (pci_cmd | PCI_COMMAND_PARITY)); |
5920 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); | 6654 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); |
5921 | |||
5922 | /* Forcibly disabling relaxed ordering capability of the card. */ | ||
5923 | pcix_cmd &= 0xfffd; | ||
5924 | pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, | ||
5925 | pcix_cmd); | ||
5926 | pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, | ||
5927 | &(pcix_cmd)); | ||
5928 | } | 6655 | } |
5929 | 6656 | ||
5930 | MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); | 6657 | MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); |
@@ -5954,6 +6681,55 @@ module_param(intr_type, int, 0); | |||
5954 | module_param(lro, int, 0); | 6681 | module_param(lro, int, 0); |
5955 | module_param(lro_max_pkts, int, 0); | 6682 | module_param(lro_max_pkts, int, 0); |
5956 | 6683 | ||
6684 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) | ||
6685 | { | ||
6686 | if ( tx_fifo_num > 8) { | ||
6687 | DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not " | ||
6688 | "supported\n"); | ||
6689 | DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n"); | ||
6690 | tx_fifo_num = 8; | ||
6691 | } | ||
6692 | if ( rx_ring_num > 8) { | ||
6693 | DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not " | ||
6694 | "supported\n"); | ||
6695 | DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n"); | ||
6696 | rx_ring_num = 8; | ||
6697 | } | ||
6698 | #ifdef CONFIG_S2IO_NAPI | ||
6699 | if (*dev_intr_type != INTA) { | ||
6700 | DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when " | ||
6701 | "MSI/MSI-X is enabled. Defaulting to INTA\n"); | ||
6702 | *dev_intr_type = INTA; | ||
6703 | } | ||
6704 | #endif | ||
6705 | #ifndef CONFIG_PCI_MSI | ||
6706 | if (*dev_intr_type != INTA) { | ||
6707 | DBG_PRINT(ERR_DBG, "s2io: This kernel does not support" | ||
6708 | "MSI/MSI-X. Defaulting to INTA\n"); | ||
6709 | *dev_intr_type = INTA; | ||
6710 | } | ||
6711 | #else | ||
6712 | if (*dev_intr_type > MSI_X) { | ||
6713 | DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " | ||
6714 | "Defaulting to INTA\n"); | ||
6715 | *dev_intr_type = INTA; | ||
6716 | } | ||
6717 | #endif | ||
6718 | if ((*dev_intr_type == MSI_X) && | ||
6719 | ((pdev->device != PCI_DEVICE_ID_HERC_WIN) && | ||
6720 | (pdev->device != PCI_DEVICE_ID_HERC_UNI))) { | ||
6721 | DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. " | ||
6722 | "Defaulting to INTA\n"); | ||
6723 | *dev_intr_type = INTA; | ||
6724 | } | ||
6725 | if (rx_ring_mode > 3) { | ||
6726 | DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); | ||
6727 | DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n"); | ||
6728 | rx_ring_mode = 3; | ||
6729 | } | ||
6730 | return SUCCESS; | ||
6731 | } | ||
6732 | |||
5957 | /** | 6733 | /** |
5958 | * s2io_init_nic - Initialization of the adapter . | 6734 | * s2io_init_nic - Initialization of the adapter . |
5959 | * @pdev : structure containing the PCI related information of the device. | 6735 | * @pdev : structure containing the PCI related information of the device. |
@@ -5984,15 +6760,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
5984 | int mode; | 6760 | int mode; |
5985 | u8 dev_intr_type = intr_type; | 6761 | u8 dev_intr_type = intr_type; |
5986 | 6762 | ||
5987 | #ifdef CONFIG_S2IO_NAPI | 6763 | if ((ret = s2io_verify_parm(pdev, &dev_intr_type))) |
5988 | if (dev_intr_type != INTA) { | 6764 | return ret; |
5989 | DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \ | ||
5990 | is enabled. Defaulting to INTA\n"); | ||
5991 | dev_intr_type = INTA; | ||
5992 | } | ||
5993 | else | ||
5994 | DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n"); | ||
5995 | #endif | ||
5996 | 6765 | ||
5997 | if ((ret = pci_enable_device(pdev))) { | 6766 | if ((ret = pci_enable_device(pdev))) { |
5998 | DBG_PRINT(ERR_DBG, | 6767 | DBG_PRINT(ERR_DBG, |
@@ -6017,14 +6786,6 @@ is enabled. Defaulting to INTA\n"); | |||
6017 | pci_disable_device(pdev); | 6786 | pci_disable_device(pdev); |
6018 | return -ENOMEM; | 6787 | return -ENOMEM; |
6019 | } | 6788 | } |
6020 | |||
6021 | if ((dev_intr_type == MSI_X) && | ||
6022 | ((pdev->device != PCI_DEVICE_ID_HERC_WIN) && | ||
6023 | (pdev->device != PCI_DEVICE_ID_HERC_UNI))) { | ||
6024 | DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \ | ||
6025 | Defaulting to INTA\n"); | ||
6026 | dev_intr_type = INTA; | ||
6027 | } | ||
6028 | if (dev_intr_type != MSI_X) { | 6789 | if (dev_intr_type != MSI_X) { |
6029 | if (pci_request_regions(pdev, s2io_driver_name)) { | 6790 | if (pci_request_regions(pdev, s2io_driver_name)) { |
6030 | DBG_PRINT(ERR_DBG, "Request Regions failed\n"), | 6791 | DBG_PRINT(ERR_DBG, "Request Regions failed\n"), |
@@ -6100,8 +6861,6 @@ Defaulting to INTA\n"); | |||
6100 | config = &sp->config; | 6861 | config = &sp->config; |
6101 | 6862 | ||
6102 | /* Tx side parameters. */ | 6863 | /* Tx side parameters. */ |
6103 | if (tx_fifo_len[0] == 0) | ||
6104 | tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */ | ||
6105 | config->tx_fifo_num = tx_fifo_num; | 6864 | config->tx_fifo_num = tx_fifo_num; |
6106 | for (i = 0; i < MAX_TX_FIFOS; i++) { | 6865 | for (i = 0; i < MAX_TX_FIFOS; i++) { |
6107 | config->tx_cfg[i].fifo_len = tx_fifo_len[i]; | 6866 | config->tx_cfg[i].fifo_len = tx_fifo_len[i]; |
@@ -6125,8 +6884,6 @@ Defaulting to INTA\n"); | |||
6125 | config->max_txds = MAX_SKB_FRAGS + 2; | 6884 | config->max_txds = MAX_SKB_FRAGS + 2; |
6126 | 6885 | ||
6127 | /* Rx side parameters. */ | 6886 | /* Rx side parameters. */ |
6128 | if (rx_ring_sz[0] == 0) | ||
6129 | rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */ | ||
6130 | config->rx_ring_num = rx_ring_num; | 6887 | config->rx_ring_num = rx_ring_num; |
6131 | for (i = 0; i < MAX_RX_RINGS; i++) { | 6888 | for (i = 0; i < MAX_RX_RINGS; i++) { |
6132 | config->rx_cfg[i].num_rxd = rx_ring_sz[i] * | 6889 | config->rx_cfg[i].num_rxd = rx_ring_sz[i] * |
@@ -6267,8 +7024,8 @@ Defaulting to INTA\n"); | |||
6267 | val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | | 7024 | val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | |
6268 | RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET); | 7025 | RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET); |
6269 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 7026 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
6270 | wait_for_cmd_complete(sp); | 7027 | wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
6271 | 7028 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING); | |
6272 | tmp64 = readq(&bar0->rmac_addr_data0_mem); | 7029 | tmp64 = readq(&bar0->rmac_addr_data0_mem); |
6273 | mac_down = (u32) tmp64; | 7030 | mac_down = (u32) tmp64; |
6274 | mac_up = (u32) (tmp64 >> 32); | 7031 | mac_up = (u32) (tmp64 >> 32); |
@@ -6322,82 +7079,63 @@ Defaulting to INTA\n"); | |||
6322 | ret = -ENODEV; | 7079 | ret = -ENODEV; |
6323 | goto register_failed; | 7080 | goto register_failed; |
6324 | } | 7081 | } |
6325 | 7082 | s2io_vpd_read(sp); | |
6326 | if (sp->device_type & XFRAME_II_DEVICE) { | 7083 | DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name); |
6327 | DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ", | 7084 | DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n", |
6328 | dev->name); | ||
6329 | DBG_PRINT(ERR_DBG, "(rev %d), Version %s", | ||
6330 | get_xena_rev_id(sp->pdev), | 7085 | get_xena_rev_id(sp->pdev), |
6331 | s2io_driver_version); | 7086 | s2io_driver_version); |
6332 | switch(sp->intr_type) { | 7087 | DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); |
6333 | case INTA: | 7088 | DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " |
6334 | DBG_PRINT(ERR_DBG, ", Intr type INTA"); | 7089 | "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, |
6335 | break; | ||
6336 | case MSI: | ||
6337 | DBG_PRINT(ERR_DBG, ", Intr type MSI"); | ||
6338 | break; | ||
6339 | case MSI_X: | ||
6340 | DBG_PRINT(ERR_DBG, ", Intr type MSI-X"); | ||
6341 | break; | ||
6342 | } | ||
6343 | |||
6344 | DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n"); | ||
6345 | DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
6346 | sp->def_mac_addr[0].mac_addr[0], | 7090 | sp->def_mac_addr[0].mac_addr[0], |
6347 | sp->def_mac_addr[0].mac_addr[1], | 7091 | sp->def_mac_addr[0].mac_addr[1], |
6348 | sp->def_mac_addr[0].mac_addr[2], | 7092 | sp->def_mac_addr[0].mac_addr[2], |
6349 | sp->def_mac_addr[0].mac_addr[3], | 7093 | sp->def_mac_addr[0].mac_addr[3], |
6350 | sp->def_mac_addr[0].mac_addr[4], | 7094 | sp->def_mac_addr[0].mac_addr[4], |
6351 | sp->def_mac_addr[0].mac_addr[5]); | 7095 | sp->def_mac_addr[0].mac_addr[5]); |
7096 | if (sp->device_type & XFRAME_II_DEVICE) { | ||
6352 | mode = s2io_print_pci_mode(sp); | 7097 | mode = s2io_print_pci_mode(sp); |
6353 | if (mode < 0) { | 7098 | if (mode < 0) { |
6354 | DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode "); | 7099 | DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n"); |
6355 | ret = -EBADSLT; | 7100 | ret = -EBADSLT; |
7101 | unregister_netdev(dev); | ||
6356 | goto set_swap_failed; | 7102 | goto set_swap_failed; |
6357 | } | 7103 | } |
6358 | } else { | ||
6359 | DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ", | ||
6360 | dev->name); | ||
6361 | DBG_PRINT(ERR_DBG, "(rev %d), Version %s", | ||
6362 | get_xena_rev_id(sp->pdev), | ||
6363 | s2io_driver_version); | ||
6364 | switch(sp->intr_type) { | ||
6365 | case INTA: | ||
6366 | DBG_PRINT(ERR_DBG, ", Intr type INTA"); | ||
6367 | break; | ||
6368 | case MSI: | ||
6369 | DBG_PRINT(ERR_DBG, ", Intr type MSI"); | ||
6370 | break; | ||
6371 | case MSI_X: | ||
6372 | DBG_PRINT(ERR_DBG, ", Intr type MSI-X"); | ||
6373 | break; | ||
6374 | } | ||
6375 | DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n"); | ||
6376 | DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
6377 | sp->def_mac_addr[0].mac_addr[0], | ||
6378 | sp->def_mac_addr[0].mac_addr[1], | ||
6379 | sp->def_mac_addr[0].mac_addr[2], | ||
6380 | sp->def_mac_addr[0].mac_addr[3], | ||
6381 | sp->def_mac_addr[0].mac_addr[4], | ||
6382 | sp->def_mac_addr[0].mac_addr[5]); | ||
6383 | } | 7104 | } |
6384 | if (sp->rxd_mode == RXD_MODE_3B) | 7105 | switch(sp->rxd_mode) { |
6385 | DBG_PRINT(ERR_DBG, "%s: 2-Buffer mode support has been " | 7106 | case RXD_MODE_1: |
6386 | "enabled\n",dev->name); | 7107 | DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n", |
6387 | if (sp->rxd_mode == RXD_MODE_3A) | 7108 | dev->name); |
6388 | DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been " | 7109 | break; |
6389 | "enabled\n",dev->name); | 7110 | case RXD_MODE_3B: |
6390 | 7111 | DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", | |
7112 | dev->name); | ||
7113 | break; | ||
7114 | case RXD_MODE_3A: | ||
7115 | DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n", | ||
7116 | dev->name); | ||
7117 | break; | ||
7118 | } | ||
7119 | #ifdef CONFIG_S2IO_NAPI | ||
7120 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); | ||
7121 | #endif | ||
7122 | switch(sp->intr_type) { | ||
7123 | case INTA: | ||
7124 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); | ||
7125 | break; | ||
7126 | case MSI: | ||
7127 | DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name); | ||
7128 | break; | ||
7129 | case MSI_X: | ||
7130 | DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); | ||
7131 | break; | ||
7132 | } | ||
6391 | if (sp->lro) | 7133 | if (sp->lro) |
6392 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", | 7134 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", |
6393 | dev->name); | 7135 | dev->name); |
6394 | 7136 | ||
6395 | /* Initialize device name */ | 7137 | /* Initialize device name */ |
6396 | strcpy(sp->name, dev->name); | 7138 | sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); |
6397 | if (sp->device_type & XFRAME_II_DEVICE) | ||
6398 | strcat(sp->name, ": Neterion Xframe II 10GbE adapter"); | ||
6399 | else | ||
6400 | strcat(sp->name, ": Neterion Xframe I 10GbE adapter"); | ||
6401 | 7139 | ||
6402 | /* Initialize bimodal Interrupts */ | 7140 | /* Initialize bimodal Interrupts */ |
6403 | sp->config.bimodal = bimodal; | 7141 | sp->config.bimodal = bimodal; |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 0a0b5b29d81e..3203732a668d 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -31,6 +31,8 @@ | |||
31 | #define SUCCESS 0 | 31 | #define SUCCESS 0 |
32 | #define FAILURE -1 | 32 | #define FAILURE -1 |
33 | 33 | ||
34 | #define CHECKBIT(value, nbit) (value & (1 << nbit)) | ||
35 | |||
34 | /* Maximum time to flicker LED when asked to identify NIC using ethtool */ | 36 | /* Maximum time to flicker LED when asked to identify NIC using ethtool */ |
35 | #define MAX_FLICKER_TIME 60000 /* 60 Secs */ | 37 | #define MAX_FLICKER_TIME 60000 /* 60 Secs */ |
36 | 38 | ||
@@ -78,6 +80,11 @@ static int debug_level = ERR_DBG; | |||
78 | typedef struct { | 80 | typedef struct { |
79 | unsigned long long single_ecc_errs; | 81 | unsigned long long single_ecc_errs; |
80 | unsigned long long double_ecc_errs; | 82 | unsigned long long double_ecc_errs; |
83 | unsigned long long parity_err_cnt; | ||
84 | unsigned long long serious_err_cnt; | ||
85 | unsigned long long soft_reset_cnt; | ||
86 | unsigned long long fifo_full_cnt; | ||
87 | unsigned long long ring_full_cnt; | ||
81 | /* LRO statistics */ | 88 | /* LRO statistics */ |
82 | unsigned long long clubbed_frms_cnt; | 89 | unsigned long long clubbed_frms_cnt; |
83 | unsigned long long sending_both; | 90 | unsigned long long sending_both; |
@@ -87,6 +94,25 @@ typedef struct { | |||
87 | unsigned long long num_aggregations; | 94 | unsigned long long num_aggregations; |
88 | } swStat_t; | 95 | } swStat_t; |
89 | 96 | ||
97 | /* Xpak releated alarm and warnings */ | ||
98 | typedef struct { | ||
99 | u64 alarm_transceiver_temp_high; | ||
100 | u64 alarm_transceiver_temp_low; | ||
101 | u64 alarm_laser_bias_current_high; | ||
102 | u64 alarm_laser_bias_current_low; | ||
103 | u64 alarm_laser_output_power_high; | ||
104 | u64 alarm_laser_output_power_low; | ||
105 | u64 warn_transceiver_temp_high; | ||
106 | u64 warn_transceiver_temp_low; | ||
107 | u64 warn_laser_bias_current_high; | ||
108 | u64 warn_laser_bias_current_low; | ||
109 | u64 warn_laser_output_power_high; | ||
110 | u64 warn_laser_output_power_low; | ||
111 | u64 xpak_regs_stat; | ||
112 | u32 xpak_timer_count; | ||
113 | } xpakStat_t; | ||
114 | |||
115 | |||
90 | /* The statistics block of Xena */ | 116 | /* The statistics block of Xena */ |
91 | typedef struct stat_block { | 117 | typedef struct stat_block { |
92 | /* Tx MAC statistics counters. */ | 118 | /* Tx MAC statistics counters. */ |
@@ -263,7 +289,9 @@ typedef struct stat_block { | |||
263 | u32 rmac_accepted_ip_oflow; | 289 | u32 rmac_accepted_ip_oflow; |
264 | u32 reserved_14; | 290 | u32 reserved_14; |
265 | u32 link_fault_cnt; | 291 | u32 link_fault_cnt; |
292 | u8 buffer[20]; | ||
266 | swStat_t sw_stat; | 293 | swStat_t sw_stat; |
294 | xpakStat_t xpak_stat; | ||
267 | } StatInfo_t; | 295 | } StatInfo_t; |
268 | 296 | ||
269 | /* | 297 | /* |
@@ -659,7 +687,8 @@ typedef struct { | |||
659 | } usr_addr_t; | 687 | } usr_addr_t; |
660 | 688 | ||
661 | /* Default Tunable parameters of the NIC. */ | 689 | /* Default Tunable parameters of the NIC. */ |
662 | #define DEFAULT_FIFO_LEN 4096 | 690 | #define DEFAULT_FIFO_0_LEN 4096 |
691 | #define DEFAULT_FIFO_1_7_LEN 512 | ||
663 | #define SMALL_BLK_CNT 30 | 692 | #define SMALL_BLK_CNT 30 |
664 | #define LARGE_BLK_CNT 100 | 693 | #define LARGE_BLK_CNT 100 |
665 | 694 | ||
@@ -732,7 +761,7 @@ struct s2io_nic { | |||
732 | int device_close_flag; | 761 | int device_close_flag; |
733 | int device_enabled_once; | 762 | int device_enabled_once; |
734 | 763 | ||
735 | char name[50]; | 764 | char name[60]; |
736 | struct tasklet_struct task; | 765 | struct tasklet_struct task; |
737 | volatile unsigned long tasklet_status; | 766 | volatile unsigned long tasklet_status; |
738 | 767 | ||
@@ -803,6 +832,8 @@ struct s2io_nic { | |||
803 | char desc1[35]; | 832 | char desc1[35]; |
804 | char desc2[35]; | 833 | char desc2[35]; |
805 | 834 | ||
835 | int avail_msix_vectors; /* No. of MSI-X vectors granted by system */ | ||
836 | |||
806 | struct msix_info_st msix_info[0x3f]; | 837 | struct msix_info_st msix_info[0x3f]; |
807 | 838 | ||
808 | #define XFRAME_I_DEVICE 1 | 839 | #define XFRAME_I_DEVICE 1 |
@@ -824,6 +855,8 @@ struct s2io_nic { | |||
824 | spinlock_t rx_lock; | 855 | spinlock_t rx_lock; |
825 | atomic_t isr_cnt; | 856 | atomic_t isr_cnt; |
826 | u64 *ufo_in_band_v; | 857 | u64 *ufo_in_band_v; |
858 | #define VPD_PRODUCT_NAME_LEN 50 | ||
859 | u8 product_name[VPD_PRODUCT_NAME_LEN]; | ||
827 | }; | 860 | }; |
828 | 861 | ||
829 | #define RESET_ERROR 1; | 862 | #define RESET_ERROR 1; |
@@ -848,28 +881,32 @@ static inline void writeq(u64 val, void __iomem *addr) | |||
848 | writel((u32) (val), addr); | 881 | writel((u32) (val), addr); |
849 | writel((u32) (val >> 32), (addr + 4)); | 882 | writel((u32) (val >> 32), (addr + 4)); |
850 | } | 883 | } |
884 | #endif | ||
851 | 885 | ||
852 | /* In 32 bit modes, some registers have to be written in a | 886 | /* |
853 | * particular order to expect correct hardware operation. The | 887 | * Some registers have to be written in a particular order to |
854 | * macro SPECIAL_REG_WRITE is used to perform such ordered | 888 | * expect correct hardware operation. The macro SPECIAL_REG_WRITE |
855 | * writes. Defines UF (Upper First) and LF (Lower First) will | 889 | * is used to perform such ordered writes. Defines UF (Upper First) |
856 | * be used to specify the required write order. | 890 | * and LF (Lower First) will be used to specify the required write order. |
857 | */ | 891 | */ |
858 | #define UF 1 | 892 | #define UF 1 |
859 | #define LF 2 | 893 | #define LF 2 |
860 | static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order) | 894 | static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order) |
861 | { | 895 | { |
896 | u32 ret; | ||
897 | |||
862 | if (order == LF) { | 898 | if (order == LF) { |
863 | writel((u32) (val), addr); | 899 | writel((u32) (val), addr); |
900 | ret = readl(addr); | ||
864 | writel((u32) (val >> 32), (addr + 4)); | 901 | writel((u32) (val >> 32), (addr + 4)); |
902 | ret = readl(addr + 4); | ||
865 | } else { | 903 | } else { |
866 | writel((u32) (val >> 32), (addr + 4)); | 904 | writel((u32) (val >> 32), (addr + 4)); |
905 | ret = readl(addr + 4); | ||
867 | writel((u32) (val), addr); | 906 | writel((u32) (val), addr); |
907 | ret = readl(addr); | ||
868 | } | 908 | } |
869 | } | 909 | } |
870 | #else | ||
871 | #define SPECIAL_REG_WRITE(val, addr, dummy) writeq(val, addr) | ||
872 | #endif | ||
873 | 910 | ||
874 | /* Interrupt related values of Xena */ | 911 | /* Interrupt related values of Xena */ |
875 | 912 | ||
@@ -965,7 +1002,7 @@ static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); | |||
965 | static struct ethtool_ops netdev_ethtool_ops; | 1002 | static struct ethtool_ops netdev_ethtool_ops; |
966 | static void s2io_set_link(unsigned long data); | 1003 | static void s2io_set_link(unsigned long data); |
967 | static int s2io_set_swapper(nic_t * sp); | 1004 | static int s2io_set_swapper(nic_t * sp); |
968 | static void s2io_card_down(nic_t *nic); | 1005 | static void s2io_card_down(nic_t *nic, int flag); |
969 | static int s2io_card_up(nic_t *nic); | 1006 | static int s2io_card_up(nic_t *nic); |
970 | static int get_xena_rev_id(struct pci_dev *pdev); | 1007 | static int get_xena_rev_id(struct pci_dev *pdev); |
971 | static void restore_xmsi_data(nic_t *nic); | 1008 | static void restore_xmsi_data(nic_t *nic); |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index f5a3bf4d959a..d05874172209 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux. | 1 | /* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux. |
2 | Copyright 1999 Silicon Integrated System Corporation | 2 | Copyright 1999 Silicon Integrated System Corporation |
3 | Revision: 1.08.09 Sep. 19 2005 | 3 | Revision: 1.08.10 Apr. 2 2006 |
4 | 4 | ||
5 | Modified from the driver which is originally written by Donald Becker. | 5 | Modified from the driver which is originally written by Donald Becker. |
6 | 6 | ||
@@ -17,9 +17,10 @@ | |||
17 | SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution, | 17 | SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution, |
18 | preliminary Rev. 1.0 Jan. 18, 1998 | 18 | preliminary Rev. 1.0 Jan. 18, 1998 |
19 | 19 | ||
20 | Rev 1.08.10 Apr. 2 2006 Daniele Venzano add vlan (jumbo packets) support | ||
20 | Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support | 21 | Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support |
21 | Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages | 22 | Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages |
22 | Rev 1.08.07 Nov. 2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support | 23 | Rev 1.08.07 Nov. 2 2003 Daniele Venzano <venza@brownhat.org> add suspend/resume support |
23 | Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support | 24 | Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support |
24 | Rev 1.08.05 Jun. 6 2002 Mufasa Yang bug fix for read_eeprom & Tx descriptor over-boundary | 25 | Rev 1.08.05 Jun. 6 2002 Mufasa Yang bug fix for read_eeprom & Tx descriptor over-boundary |
25 | Rev 1.08.04 Apr. 25 2002 Mufasa Yang <mufasa@sis.com.tw> added SiS962 support | 26 | Rev 1.08.04 Apr. 25 2002 Mufasa Yang <mufasa@sis.com.tw> added SiS962 support |
@@ -77,7 +78,7 @@ | |||
77 | #include "sis900.h" | 78 | #include "sis900.h" |
78 | 79 | ||
79 | #define SIS900_MODULE_NAME "sis900" | 80 | #define SIS900_MODULE_NAME "sis900" |
80 | #define SIS900_DRV_VERSION "v1.08.09 Sep. 19 2005" | 81 | #define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006" |
81 | 82 | ||
82 | static char version[] __devinitdata = | 83 | static char version[] __devinitdata = |
83 | KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n"; | 84 | KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n"; |
@@ -1402,6 +1403,11 @@ static void sis900_set_mode (long ioaddr, int speed, int duplex) | |||
1402 | rx_flags |= RxATX; | 1403 | rx_flags |= RxATX; |
1403 | } | 1404 | } |
1404 | 1405 | ||
1406 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
1407 | /* Can accept Jumbo packet */ | ||
1408 | rx_flags |= RxAJAB; | ||
1409 | #endif | ||
1410 | |||
1405 | outl (tx_flags, ioaddr + txcfg); | 1411 | outl (tx_flags, ioaddr + txcfg); |
1406 | outl (rx_flags, ioaddr + rxcfg); | 1412 | outl (rx_flags, ioaddr + rxcfg); |
1407 | } | 1413 | } |
@@ -1714,18 +1720,26 @@ static int sis900_rx(struct net_device *net_dev) | |||
1714 | 1720 | ||
1715 | while (rx_status & OWN) { | 1721 | while (rx_status & OWN) { |
1716 | unsigned int rx_size; | 1722 | unsigned int rx_size; |
1723 | unsigned int data_size; | ||
1717 | 1724 | ||
1718 | if (--rx_work_limit < 0) | 1725 | if (--rx_work_limit < 0) |
1719 | break; | 1726 | break; |
1720 | 1727 | ||
1721 | rx_size = (rx_status & DSIZE) - CRC_SIZE; | 1728 | data_size = rx_status & DSIZE; |
1729 | rx_size = data_size - CRC_SIZE; | ||
1730 | |||
1731 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
1732 | /* ``TOOLONG'' flag means jumbo packet recived. */ | ||
1733 | if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE) | ||
1734 | rx_status &= (~ ((unsigned int)TOOLONG)); | ||
1735 | #endif | ||
1722 | 1736 | ||
1723 | if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) { | 1737 | if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) { |
1724 | /* corrupted packet received */ | 1738 | /* corrupted packet received */ |
1725 | if (netif_msg_rx_err(sis_priv)) | 1739 | if (netif_msg_rx_err(sis_priv)) |
1726 | printk(KERN_DEBUG "%s: Corrupted packet " | 1740 | printk(KERN_DEBUG "%s: Corrupted packet " |
1727 | "received, buffer status = 0x%8.8x.\n", | 1741 | "received, buffer status = 0x%8.8x/%d.\n", |
1728 | net_dev->name, rx_status); | 1742 | net_dev->name, rx_status, data_size); |
1729 | sis_priv->stats.rx_errors++; | 1743 | sis_priv->stats.rx_errors++; |
1730 | if (rx_status & OVERRUN) | 1744 | if (rx_status & OVERRUN) |
1731 | sis_priv->stats.rx_over_errors++; | 1745 | sis_priv->stats.rx_over_errors++; |
diff --git a/drivers/net/sis900.h b/drivers/net/sis900.h index 50323941e3c0..4834e3a15694 100644 --- a/drivers/net/sis900.h +++ b/drivers/net/sis900.h | |||
@@ -310,8 +310,14 @@ enum sis630_revision_id { | |||
310 | #define CRC_SIZE 4 | 310 | #define CRC_SIZE 4 |
311 | #define MAC_HEADER_SIZE 14 | 311 | #define MAC_HEADER_SIZE 14 |
312 | 312 | ||
313 | #define TX_BUF_SIZE 1536 | 313 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
314 | #define RX_BUF_SIZE 1536 | 314 | #define MAX_FRAME_SIZE (1518 + 4) |
315 | #else | ||
316 | #define MAX_FRAME_SIZE 1518 | ||
317 | #endif /* CONFIG_VLAN_802_1Q */ | ||
318 | |||
319 | #define TX_BUF_SIZE (MAX_FRAME_SIZE+18) | ||
320 | #define RX_BUF_SIZE (MAX_FRAME_SIZE+18) | ||
315 | 321 | ||
316 | #define NUM_TX_DESC 16 /* Number of Tx descriptor registers. */ | 322 | #define NUM_TX_DESC 16 /* Number of Tx descriptor registers. */ |
317 | #define NUM_RX_DESC 16 /* Number of Rx descriptor registers. */ | 323 | #define NUM_RX_DESC 16 /* Number of Rx descriptor registers. */ |
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c new file mode 100644 index 000000000000..bdd8702ead54 --- /dev/null +++ b/drivers/net/smc911x.c | |||
@@ -0,0 +1,2307 @@ | |||
1 | /* | ||
2 | * smc911x.c | ||
3 | * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices. | ||
4 | * | ||
5 | * Copyright (C) 2005 Sensoria Corp | ||
6 | * Derived from the unified SMC91x driver by Nicolas Pitre | ||
7 | * and the smsc911x.c reference driver by SMSC | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | * | ||
23 | * Arguments: | ||
24 | * watchdog = TX watchdog timeout | ||
25 | * tx_fifo_kb = Size of TX FIFO in KB | ||
26 | * | ||
27 | * History: | ||
28 | * 04/16/05 Dustin McIntire Initial version | ||
29 | */ | ||
30 | static const char version[] = | ||
31 | "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n"; | ||
32 | |||
33 | /* Debugging options */ | ||
34 | #define ENABLE_SMC_DEBUG_RX 0 | ||
35 | #define ENABLE_SMC_DEBUG_TX 0 | ||
36 | #define ENABLE_SMC_DEBUG_DMA 0 | ||
37 | #define ENABLE_SMC_DEBUG_PKTS 0 | ||
38 | #define ENABLE_SMC_DEBUG_MISC 0 | ||
39 | #define ENABLE_SMC_DEBUG_FUNC 0 | ||
40 | |||
41 | #define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0) | ||
42 | #define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1) | ||
43 | #define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2) | ||
44 | #define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3) | ||
45 | #define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4) | ||
46 | #define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5) | ||
47 | |||
48 | #ifndef SMC_DEBUG | ||
49 | #define SMC_DEBUG ( SMC_DEBUG_RX | \ | ||
50 | SMC_DEBUG_TX | \ | ||
51 | SMC_DEBUG_DMA | \ | ||
52 | SMC_DEBUG_PKTS | \ | ||
53 | SMC_DEBUG_MISC | \ | ||
54 | SMC_DEBUG_FUNC \ | ||
55 | ) | ||
56 | #endif | ||
57 | |||
58 | |||
59 | #include <linux/config.h> | ||
60 | #include <linux/init.h> | ||
61 | #include <linux/module.h> | ||
62 | #include <linux/kernel.h> | ||
63 | #include <linux/sched.h> | ||
64 | #include <linux/slab.h> | ||
65 | #include <linux/delay.h> | ||
66 | #include <linux/interrupt.h> | ||
67 | #include <linux/errno.h> | ||
68 | #include <linux/ioport.h> | ||
69 | #include <linux/crc32.h> | ||
70 | #include <linux/device.h> | ||
71 | #include <linux/platform_device.h> | ||
72 | #include <linux/spinlock.h> | ||
73 | #include <linux/ethtool.h> | ||
74 | #include <linux/mii.h> | ||
75 | #include <linux/workqueue.h> | ||
76 | |||
77 | #include <linux/netdevice.h> | ||
78 | #include <linux/etherdevice.h> | ||
79 | #include <linux/skbuff.h> | ||
80 | |||
81 | #include <asm/io.h> | ||
82 | #include <asm/irq.h> | ||
83 | |||
84 | #include "smc911x.h" | ||
85 | |||
86 | /* | ||
87 | * Transmit timeout, default 5 seconds. | ||
88 | */ | ||
89 | static int watchdog = 5000; | ||
90 | module_param(watchdog, int, 0400); | ||
91 | MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); | ||
92 | |||
93 | static int tx_fifo_kb=8; | ||
94 | module_param(tx_fifo_kb, int, 0400); | ||
95 | MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)"); | ||
96 | |||
97 | MODULE_LICENSE("GPL"); | ||
98 | |||
99 | /* | ||
100 | * The internal workings of the driver. If you are changing anything | ||
101 | * here with the SMC stuff, you should have the datasheet and know | ||
102 | * what you are doing. | ||
103 | */ | ||
104 | #define CARDNAME "smc911x" | ||
105 | |||
106 | /* | ||
107 | * Use power-down feature of the chip | ||
108 | */ | ||
109 | #define POWER_DOWN 1 | ||
110 | |||
111 | |||
112 | /* store this information for the driver.. */ | ||
113 | struct smc911x_local { | ||
114 | /* | ||
115 | * If I have to wait until the DMA is finished and ready to reload a | ||
116 | * packet, I will store the skbuff here. Then, the DMA will send it | ||
117 | * out and free it. | ||
118 | */ | ||
119 | struct sk_buff *pending_tx_skb; | ||
120 | |||
121 | /* | ||
122 | * these are things that the kernel wants me to keep, so users | ||
123 | * can find out semi-useless statistics of how well the card is | ||
124 | * performing | ||
125 | */ | ||
126 | struct net_device_stats stats; | ||
127 | |||
128 | /* version/revision of the SMC911x chip */ | ||
129 | u16 version; | ||
130 | u16 revision; | ||
131 | |||
132 | /* FIFO sizes */ | ||
133 | int tx_fifo_kb; | ||
134 | int tx_fifo_size; | ||
135 | int rx_fifo_size; | ||
136 | int afc_cfg; | ||
137 | |||
138 | /* Contains the current active receive/phy mode */ | ||
139 | int ctl_rfduplx; | ||
140 | int ctl_rspeed; | ||
141 | |||
142 | u32 msg_enable; | ||
143 | u32 phy_type; | ||
144 | struct mii_if_info mii; | ||
145 | |||
146 | /* work queue */ | ||
147 | struct work_struct phy_configure; | ||
148 | int work_pending; | ||
149 | |||
150 | int tx_throttle; | ||
151 | spinlock_t lock; | ||
152 | |||
153 | #ifdef SMC_USE_DMA | ||
154 | /* DMA needs the physical address of the chip */ | ||
155 | u_long physaddr; | ||
156 | int rxdma; | ||
157 | int txdma; | ||
158 | int rxdma_active; | ||
159 | int txdma_active; | ||
160 | struct sk_buff *current_rx_skb; | ||
161 | struct sk_buff *current_tx_skb; | ||
162 | struct device *dev; | ||
163 | #endif | ||
164 | }; | ||
165 | |||
166 | #if SMC_DEBUG > 0 | ||
167 | #define DBG(n, args...) \ | ||
168 | do { \ | ||
169 | if (SMC_DEBUG & (n)) \ | ||
170 | printk(args); \ | ||
171 | } while (0) | ||
172 | |||
173 | #define PRINTK(args...) printk(args) | ||
174 | #else | ||
175 | #define DBG(n, args...) do { } while (0) | ||
176 | #define PRINTK(args...) printk(KERN_DEBUG args) | ||
177 | #endif | ||
178 | |||
179 | #if SMC_DEBUG_PKTS > 0 | ||
180 | static void PRINT_PKT(u_char *buf, int length) | ||
181 | { | ||
182 | int i; | ||
183 | int remainder; | ||
184 | int lines; | ||
185 | |||
186 | lines = length / 16; | ||
187 | remainder = length % 16; | ||
188 | |||
189 | for (i = 0; i < lines ; i ++) { | ||
190 | int cur; | ||
191 | for (cur = 0; cur < 8; cur++) { | ||
192 | u_char a, b; | ||
193 | a = *buf++; | ||
194 | b = *buf++; | ||
195 | printk("%02x%02x ", a, b); | ||
196 | } | ||
197 | printk("\n"); | ||
198 | } | ||
199 | for (i = 0; i < remainder/2 ; i++) { | ||
200 | u_char a, b; | ||
201 | a = *buf++; | ||
202 | b = *buf++; | ||
203 | printk("%02x%02x ", a, b); | ||
204 | } | ||
205 | printk("\n"); | ||
206 | } | ||
207 | #else | ||
208 | #define PRINT_PKT(x...) do { } while (0) | ||
209 | #endif | ||
210 | |||
211 | |||
212 | /* this enables an interrupt in the interrupt mask register */ | ||
213 | #define SMC_ENABLE_INT(x) do { \ | ||
214 | unsigned int __mask; \ | ||
215 | unsigned long __flags; \ | ||
216 | spin_lock_irqsave(&lp->lock, __flags); \ | ||
217 | __mask = SMC_GET_INT_EN(); \ | ||
218 | __mask |= (x); \ | ||
219 | SMC_SET_INT_EN(__mask); \ | ||
220 | spin_unlock_irqrestore(&lp->lock, __flags); \ | ||
221 | } while (0) | ||
222 | |||
223 | /* this disables an interrupt from the interrupt mask register */ | ||
224 | #define SMC_DISABLE_INT(x) do { \ | ||
225 | unsigned int __mask; \ | ||
226 | unsigned long __flags; \ | ||
227 | spin_lock_irqsave(&lp->lock, __flags); \ | ||
228 | __mask = SMC_GET_INT_EN(); \ | ||
229 | __mask &= ~(x); \ | ||
230 | SMC_SET_INT_EN(__mask); \ | ||
231 | spin_unlock_irqrestore(&lp->lock, __flags); \ | ||
232 | } while (0) | ||
233 | |||
234 | /* | ||
235 | * this does a soft reset on the device | ||
236 | */ | ||
237 | static void smc911x_reset(struct net_device *dev) | ||
238 | { | ||
239 | unsigned long ioaddr = dev->base_addr; | ||
240 | struct smc911x_local *lp = netdev_priv(dev); | ||
241 | unsigned int reg, timeout=0, resets=1; | ||
242 | unsigned long flags; | ||
243 | |||
244 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
245 | |||
246 | /* Take out of PM setting first */ | ||
247 | if ((SMC_GET_PMT_CTRL() & PMT_CTRL_READY_) == 0) { | ||
248 | /* Write to the bytetest will take out of powerdown */ | ||
249 | SMC_SET_BYTE_TEST(0); | ||
250 | timeout=10; | ||
251 | do { | ||
252 | udelay(10); | ||
253 | reg = SMC_GET_PMT_CTRL() & PMT_CTRL_READY_; | ||
254 | } while ( timeout-- && !reg); | ||
255 | if (timeout == 0) { | ||
256 | PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name); | ||
257 | return; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /* Disable all interrupts */ | ||
262 | spin_lock_irqsave(&lp->lock, flags); | ||
263 | SMC_SET_INT_EN(0); | ||
264 | spin_unlock_irqrestore(&lp->lock, flags); | ||
265 | |||
266 | while (resets--) { | ||
267 | SMC_SET_HW_CFG(HW_CFG_SRST_); | ||
268 | timeout=10; | ||
269 | do { | ||
270 | udelay(10); | ||
271 | reg = SMC_GET_HW_CFG(); | ||
272 | /* If chip indicates reset timeout then try again */ | ||
273 | if (reg & HW_CFG_SRST_TO_) { | ||
274 | PRINTK("%s: chip reset timeout, retrying...\n", dev->name); | ||
275 | resets++; | ||
276 | break; | ||
277 | } | ||
278 | } while ( timeout-- && (reg & HW_CFG_SRST_)); | ||
279 | } | ||
280 | if (timeout == 0) { | ||
281 | PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name); | ||
282 | return; | ||
283 | } | ||
284 | |||
285 | /* make sure EEPROM has finished loading before setting GPIO_CFG */ | ||
286 | timeout=1000; | ||
287 | while ( timeout-- && (SMC_GET_E2P_CMD() & E2P_CMD_EPC_BUSY_)) { | ||
288 | udelay(10); | ||
289 | } | ||
290 | if (timeout == 0){ | ||
291 | PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name); | ||
292 | return; | ||
293 | } | ||
294 | |||
295 | /* Initialize interrupts */ | ||
296 | SMC_SET_INT_EN(0); | ||
297 | SMC_ACK_INT(-1); | ||
298 | |||
299 | /* Reset the FIFO level and flow control settings */ | ||
300 | SMC_SET_HW_CFG((lp->tx_fifo_kb & 0xF) << 16); | ||
301 | //TODO: Figure out what appropriate pause time is | ||
302 | SMC_SET_FLOW(FLOW_FCPT_ | FLOW_FCEN_); | ||
303 | SMC_SET_AFC_CFG(lp->afc_cfg); | ||
304 | |||
305 | |||
306 | /* Set to LED outputs */ | ||
307 | SMC_SET_GPIO_CFG(0x70070000); | ||
308 | |||
309 | /* | ||
310 | * Deassert IRQ for 1*10us for edge type interrupts | ||
311 | * and drive IRQ pin push-pull | ||
312 | */ | ||
313 | SMC_SET_IRQ_CFG( (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_ ); | ||
314 | |||
315 | /* clear anything saved */ | ||
316 | if (lp->pending_tx_skb != NULL) { | ||
317 | dev_kfree_skb (lp->pending_tx_skb); | ||
318 | lp->pending_tx_skb = NULL; | ||
319 | lp->stats.tx_errors++; | ||
320 | lp->stats.tx_aborted_errors++; | ||
321 | } | ||
322 | } | ||
323 | |||
324 | /* | ||
325 | * Enable Interrupts, Receive, and Transmit | ||
326 | */ | ||
327 | static void smc911x_enable(struct net_device *dev) | ||
328 | { | ||
329 | unsigned long ioaddr = dev->base_addr; | ||
330 | struct smc911x_local *lp = netdev_priv(dev); | ||
331 | unsigned mask, cfg, cr; | ||
332 | unsigned long flags; | ||
333 | |||
334 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
335 | |||
336 | SMC_SET_MAC_ADDR(dev->dev_addr); | ||
337 | |||
338 | /* Enable TX */ | ||
339 | cfg = SMC_GET_HW_CFG(); | ||
340 | cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF; | ||
341 | cfg |= HW_CFG_SF_; | ||
342 | SMC_SET_HW_CFG(cfg); | ||
343 | SMC_SET_FIFO_TDA(0xFF); | ||
344 | /* Update TX stats on every 64 packets received or every 1 sec */ | ||
345 | SMC_SET_FIFO_TSL(64); | ||
346 | SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000); | ||
347 | |||
348 | spin_lock_irqsave(&lp->lock, flags); | ||
349 | SMC_GET_MAC_CR(cr); | ||
350 | cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; | ||
351 | SMC_SET_MAC_CR(cr); | ||
352 | SMC_SET_TX_CFG(TX_CFG_TX_ON_); | ||
353 | spin_unlock_irqrestore(&lp->lock, flags); | ||
354 | |||
355 | /* Add 2 byte padding to start of packets */ | ||
356 | SMC_SET_RX_CFG((2<<8) & RX_CFG_RXDOFF_); | ||
357 | |||
358 | /* Turn on receiver and enable RX */ | ||
359 | if (cr & MAC_CR_RXEN_) | ||
360 | DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name); | ||
361 | |||
362 | spin_lock_irqsave(&lp->lock, flags); | ||
363 | SMC_SET_MAC_CR( cr | MAC_CR_RXEN_ ); | ||
364 | spin_unlock_irqrestore(&lp->lock, flags); | ||
365 | |||
366 | /* Interrupt on every received packet */ | ||
367 | SMC_SET_FIFO_RSA(0x01); | ||
368 | SMC_SET_FIFO_RSL(0x00); | ||
369 | |||
370 | /* now, enable interrupts */ | ||
371 | mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ | | ||
372 | INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ | | ||
373 | INT_EN_PHY_INT_EN_; | ||
374 | if (IS_REV_A(lp->revision)) | ||
375 | mask|=INT_EN_RDFL_EN_; | ||
376 | else { | ||
377 | mask|=INT_EN_RDFO_EN_; | ||
378 | } | ||
379 | SMC_ENABLE_INT(mask); | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * this puts the device in an inactive state | ||
384 | */ | ||
385 | static void smc911x_shutdown(struct net_device *dev) | ||
386 | { | ||
387 | unsigned long ioaddr = dev->base_addr; | ||
388 | struct smc911x_local *lp = netdev_priv(dev); | ||
389 | unsigned cr; | ||
390 | unsigned long flags; | ||
391 | |||
392 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __FUNCTION__); | ||
393 | |||
394 | /* Disable IRQ's */ | ||
395 | SMC_SET_INT_EN(0); | ||
396 | |||
397 | /* Turn of Rx and TX */ | ||
398 | spin_lock_irqsave(&lp->lock, flags); | ||
399 | SMC_GET_MAC_CR(cr); | ||
400 | cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); | ||
401 | SMC_SET_MAC_CR(cr); | ||
402 | SMC_SET_TX_CFG(TX_CFG_STOP_TX_); | ||
403 | spin_unlock_irqrestore(&lp->lock, flags); | ||
404 | } | ||
405 | |||
406 | static inline void smc911x_drop_pkt(struct net_device *dev) | ||
407 | { | ||
408 | unsigned long ioaddr = dev->base_addr; | ||
409 | unsigned int fifo_count, timeout, reg; | ||
410 | |||
411 | DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __FUNCTION__); | ||
412 | fifo_count = SMC_GET_RX_FIFO_INF() & 0xFFFF; | ||
413 | if (fifo_count <= 4) { | ||
414 | /* Manually dump the packet data */ | ||
415 | while (fifo_count--) | ||
416 | SMC_GET_RX_FIFO(); | ||
417 | } else { | ||
418 | /* Fast forward through the bad packet */ | ||
419 | SMC_SET_RX_DP_CTRL(RX_DP_CTRL_FFWD_BUSY_); | ||
420 | timeout=50; | ||
421 | do { | ||
422 | udelay(10); | ||
423 | reg = SMC_GET_RX_DP_CTRL() & RX_DP_CTRL_FFWD_BUSY_; | ||
424 | } while ( timeout-- && reg); | ||
425 | if (timeout == 0) { | ||
426 | PRINTK("%s: timeout waiting for RX fast forward\n", dev->name); | ||
427 | } | ||
428 | } | ||
429 | } | ||
430 | |||
431 | /* | ||
432 | * This is the procedure to handle the receipt of a packet. | ||
433 | * It should be called after checking for packet presence in | ||
434 | * the RX status FIFO. It must be called with the spin lock | ||
435 | * already held. | ||
436 | */ | ||
437 | static inline void smc911x_rcv(struct net_device *dev) | ||
438 | { | ||
439 | struct smc911x_local *lp = netdev_priv(dev); | ||
440 | unsigned long ioaddr = dev->base_addr; | ||
441 | unsigned int pkt_len, status; | ||
442 | struct sk_buff *skb; | ||
443 | unsigned char *data; | ||
444 | |||
445 | DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", | ||
446 | dev->name, __FUNCTION__); | ||
447 | status = SMC_GET_RX_STS_FIFO(); | ||
448 | DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n", | ||
449 | dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); | ||
450 | pkt_len = (status & RX_STS_PKT_LEN_) >> 16; | ||
451 | if (status & RX_STS_ES_) { | ||
452 | /* Deal with a bad packet */ | ||
453 | lp->stats.rx_errors++; | ||
454 | if (status & RX_STS_CRC_ERR_) | ||
455 | lp->stats.rx_crc_errors++; | ||
456 | else { | ||
457 | if (status & RX_STS_LEN_ERR_) | ||
458 | lp->stats.rx_length_errors++; | ||
459 | if (status & RX_STS_MCAST_) | ||
460 | lp->stats.multicast++; | ||
461 | } | ||
462 | /* Remove the bad packet data from the RX FIFO */ | ||
463 | smc911x_drop_pkt(dev); | ||
464 | } else { | ||
465 | /* Receive a valid packet */ | ||
466 | /* Alloc a buffer with extra room for DMA alignment */ | ||
467 | skb=dev_alloc_skb(pkt_len+32); | ||
468 | if (unlikely(skb == NULL)) { | ||
469 | PRINTK( "%s: Low memory, rcvd packet dropped.\n", | ||
470 | dev->name); | ||
471 | lp->stats.rx_dropped++; | ||
472 | smc911x_drop_pkt(dev); | ||
473 | return; | ||
474 | } | ||
475 | /* Align IP header to 32 bits | ||
476 | * Note that the device is configured to add a 2 | ||
477 | * byte padding to the packet start, so we really | ||
478 | * want to write to the orignal data pointer */ | ||
479 | data = skb->data; | ||
480 | skb_reserve(skb, 2); | ||
481 | skb_put(skb,pkt_len-4); | ||
482 | #ifdef SMC_USE_DMA | ||
483 | { | ||
484 | unsigned int fifo; | ||
485 | /* Lower the FIFO threshold if possible */ | ||
486 | fifo = SMC_GET_FIFO_INT(); | ||
487 | if (fifo & 0xFF) fifo--; | ||
488 | DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n", | ||
489 | dev->name, fifo & 0xff); | ||
490 | SMC_SET_FIFO_INT(fifo); | ||
491 | /* Setup RX DMA */ | ||
492 | SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_)); | ||
493 | lp->rxdma_active = 1; | ||
494 | lp->current_rx_skb = skb; | ||
495 | SMC_PULL_DATA(data, (pkt_len+2+15) & ~15); | ||
496 | /* Packet processing deferred to DMA RX interrupt */ | ||
497 | } | ||
498 | #else | ||
499 | SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_)); | ||
500 | SMC_PULL_DATA(data, pkt_len+2+3); | ||
501 | |||
502 | DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,); | ||
503 | PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); | ||
504 | dev->last_rx = jiffies; | ||
505 | skb->dev = dev; | ||
506 | skb->protocol = eth_type_trans(skb, dev); | ||
507 | netif_rx(skb); | ||
508 | lp->stats.rx_packets++; | ||
509 | lp->stats.rx_bytes += pkt_len-4; | ||
510 | #endif | ||
511 | } | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * This is called to actually send a packet to the chip. | ||
516 | */ | ||
517 | static void smc911x_hardware_send_pkt(struct net_device *dev) | ||
518 | { | ||
519 | struct smc911x_local *lp = netdev_priv(dev); | ||
520 | unsigned long ioaddr = dev->base_addr; | ||
521 | struct sk_buff *skb; | ||
522 | unsigned int cmdA, cmdB, len; | ||
523 | unsigned char *buf; | ||
524 | unsigned long flags; | ||
525 | |||
526 | DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
527 | BUG_ON(lp->pending_tx_skb == NULL); | ||
528 | |||
529 | skb = lp->pending_tx_skb; | ||
530 | lp->pending_tx_skb = NULL; | ||
531 | |||
532 | /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */ | ||
533 | /* cmdB {31:16] pkt tag [10:0] length */ | ||
534 | #ifdef SMC_USE_DMA | ||
535 | /* 16 byte buffer alignment mode */ | ||
536 | buf = (char*)((u32)(skb->data) & ~0xF); | ||
537 | len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF; | ||
538 | cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) | | ||
539 | TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | | ||
540 | skb->len; | ||
541 | #else | ||
542 | buf = (char*)((u32)skb->data & ~0x3); | ||
543 | len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3; | ||
544 | cmdA = (((u32)skb->data & 0x3) << 16) | | ||
545 | TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | | ||
546 | skb->len; | ||
547 | #endif | ||
548 | /* tag is packet length so we can use this in stats update later */ | ||
549 | cmdB = (skb->len << 16) | (skb->len & 0x7FF); | ||
550 | |||
551 | DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n", | ||
552 | dev->name, len, len, buf, cmdA, cmdB); | ||
553 | SMC_SET_TX_FIFO(cmdA); | ||
554 | SMC_SET_TX_FIFO(cmdB); | ||
555 | |||
556 | DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name); | ||
557 | PRINT_PKT(buf, len <= 64 ? len : 64); | ||
558 | |||
559 | /* Send pkt via PIO or DMA */ | ||
560 | #ifdef SMC_USE_DMA | ||
561 | lp->current_tx_skb = skb; | ||
562 | SMC_PUSH_DATA(buf, len); | ||
563 | /* DMA complete IRQ will free buffer and set jiffies */ | ||
564 | #else | ||
565 | SMC_PUSH_DATA(buf, len); | ||
566 | dev->trans_start = jiffies; | ||
567 | dev_kfree_skb(skb); | ||
568 | #endif | ||
569 | spin_lock_irqsave(&lp->lock, flags); | ||
570 | if (!lp->tx_throttle) { | ||
571 | netif_wake_queue(dev); | ||
572 | } | ||
573 | spin_unlock_irqrestore(&lp->lock, flags); | ||
574 | SMC_ENABLE_INT(INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * Since I am not sure if I will have enough room in the chip's ram | ||
579 | * to store the packet, I call this routine which either sends it | ||
580 | * now, or set the card to generates an interrupt when ready | ||
581 | * for the packet. | ||
582 | */ | ||
583 | static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
584 | { | ||
585 | struct smc911x_local *lp = netdev_priv(dev); | ||
586 | unsigned long ioaddr = dev->base_addr; | ||
587 | unsigned int free; | ||
588 | unsigned long flags; | ||
589 | |||
590 | DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", | ||
591 | dev->name, __FUNCTION__); | ||
592 | |||
593 | BUG_ON(lp->pending_tx_skb != NULL); | ||
594 | |||
595 | free = SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TDFREE_; | ||
596 | DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free); | ||
597 | |||
598 | /* Turn off the flow when running out of space in FIFO */ | ||
599 | if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { | ||
600 | DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n", | ||
601 | dev->name, free); | ||
602 | spin_lock_irqsave(&lp->lock, flags); | ||
603 | /* Reenable when at least 1 packet of size MTU present */ | ||
604 | SMC_SET_FIFO_TDA((SMC911X_TX_FIFO_LOW_THRESHOLD)/64); | ||
605 | lp->tx_throttle = 1; | ||
606 | netif_stop_queue(dev); | ||
607 | spin_unlock_irqrestore(&lp->lock, flags); | ||
608 | } | ||
609 | |||
610 | /* Drop packets when we run out of space in TX FIFO | ||
611 | * Account for overhead required for: | ||
612 | * | ||
613 | * Tx command words 8 bytes | ||
614 | * Start offset 15 bytes | ||
615 | * End padding 15 bytes | ||
616 | */ | ||
617 | if (unlikely(free < (skb->len + 8 + 15 + 15))) { | ||
618 | printk("%s: No Tx free space %d < %d\n", | ||
619 | dev->name, free, skb->len); | ||
620 | lp->pending_tx_skb = NULL; | ||
621 | lp->stats.tx_errors++; | ||
622 | lp->stats.tx_dropped++; | ||
623 | dev_kfree_skb(skb); | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | #ifdef SMC_USE_DMA | ||
628 | { | ||
629 | /* If the DMA is already running then defer this packet Tx until | ||
630 | * the DMA IRQ starts it | ||
631 | */ | ||
632 | spin_lock_irqsave(&lp->lock, flags); | ||
633 | if (lp->txdma_active) { | ||
634 | DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name); | ||
635 | lp->pending_tx_skb = skb; | ||
636 | netif_stop_queue(dev); | ||
637 | spin_unlock_irqrestore(&lp->lock, flags); | ||
638 | return 0; | ||
639 | } else { | ||
640 | DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name); | ||
641 | lp->txdma_active = 1; | ||
642 | } | ||
643 | spin_unlock_irqrestore(&lp->lock, flags); | ||
644 | } | ||
645 | #endif | ||
646 | lp->pending_tx_skb = skb; | ||
647 | smc911x_hardware_send_pkt(dev); | ||
648 | |||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | /* | ||
653 | * This handles a TX status interrupt, which is only called when: | ||
654 | * - a TX error occurred, or | ||
655 | * - TX of a packet completed. | ||
656 | */ | ||
657 | static void smc911x_tx(struct net_device *dev) | ||
658 | { | ||
659 | unsigned long ioaddr = dev->base_addr; | ||
660 | struct smc911x_local *lp = netdev_priv(dev); | ||
661 | unsigned int tx_status; | ||
662 | |||
663 | DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", | ||
664 | dev->name, __FUNCTION__); | ||
665 | |||
666 | /* Collect the TX status */ | ||
667 | while (((SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16) != 0) { | ||
668 | DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n", | ||
669 | dev->name, | ||
670 | (SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16); | ||
671 | tx_status = SMC_GET_TX_STS_FIFO(); | ||
672 | lp->stats.tx_packets++; | ||
673 | lp->stats.tx_bytes+=tx_status>>16; | ||
674 | DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n", | ||
675 | dev->name, (tx_status & 0xffff0000) >> 16, | ||
676 | tx_status & 0x0000ffff); | ||
677 | /* count Tx errors, but ignore lost carrier errors when in | ||
678 | * full-duplex mode */ | ||
679 | if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx && | ||
680 | !(tx_status & 0x00000306))) { | ||
681 | lp->stats.tx_errors++; | ||
682 | } | ||
683 | if (tx_status & TX_STS_MANY_COLL_) { | ||
684 | lp->stats.collisions+=16; | ||
685 | lp->stats.tx_aborted_errors++; | ||
686 | } else { | ||
687 | lp->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; | ||
688 | } | ||
689 | /* carrier error only has meaning for half-duplex communication */ | ||
690 | if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) && | ||
691 | !lp->ctl_rfduplx) { | ||
692 | lp->stats.tx_carrier_errors++; | ||
693 | } | ||
694 | if (tx_status & TX_STS_LATE_COLL_) { | ||
695 | lp->stats.collisions++; | ||
696 | lp->stats.tx_aborted_errors++; | ||
697 | } | ||
698 | } | ||
699 | } | ||
700 | |||
701 | |||
702 | /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ | ||
703 | /* | ||
704 | * Reads a register from the MII Management serial interface | ||
705 | */ | ||
706 | |||
707 | static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg) | ||
708 | { | ||
709 | unsigned long ioaddr = dev->base_addr; | ||
710 | unsigned int phydata; | ||
711 | |||
712 | SMC_GET_MII(phyreg, phyaddr, phydata); | ||
713 | |||
714 | DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", | ||
715 | __FUNCTION__, phyaddr, phyreg, phydata); | ||
716 | return phydata; | ||
717 | } | ||
718 | |||
719 | |||
720 | /* | ||
721 | * Writes a register to the MII Management serial interface | ||
722 | */ | ||
723 | static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg, | ||
724 | int phydata) | ||
725 | { | ||
726 | unsigned long ioaddr = dev->base_addr; | ||
727 | |||
728 | DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", | ||
729 | __FUNCTION__, phyaddr, phyreg, phydata); | ||
730 | |||
731 | SMC_SET_MII(phyreg, phyaddr, phydata); | ||
732 | } | ||
733 | |||
734 | /* | ||
735 | * Finds and reports the PHY address (115 and 117 have external | ||
736 | * PHY interface 118 has internal only | ||
737 | */ | ||
738 | static void smc911x_phy_detect(struct net_device *dev) | ||
739 | { | ||
740 | unsigned long ioaddr = dev->base_addr; | ||
741 | struct smc911x_local *lp = netdev_priv(dev); | ||
742 | int phyaddr; | ||
743 | unsigned int cfg, id1, id2; | ||
744 | |||
745 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
746 | |||
747 | lp->phy_type = 0; | ||
748 | |||
749 | /* | ||
750 | * Scan all 32 PHY addresses if necessary, starting at | ||
751 | * PHY#1 to PHY#31, and then PHY#0 last. | ||
752 | */ | ||
753 | switch(lp->version) { | ||
754 | case 0x115: | ||
755 | case 0x117: | ||
756 | cfg = SMC_GET_HW_CFG(); | ||
757 | if (cfg & HW_CFG_EXT_PHY_DET_) { | ||
758 | cfg &= ~HW_CFG_PHY_CLK_SEL_; | ||
759 | cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; | ||
760 | SMC_SET_HW_CFG(cfg); | ||
761 | udelay(10); /* Wait for clocks to stop */ | ||
762 | |||
763 | cfg |= HW_CFG_EXT_PHY_EN_; | ||
764 | SMC_SET_HW_CFG(cfg); | ||
765 | udelay(10); /* Wait for clocks to stop */ | ||
766 | |||
767 | cfg &= ~HW_CFG_PHY_CLK_SEL_; | ||
768 | cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; | ||
769 | SMC_SET_HW_CFG(cfg); | ||
770 | udelay(10); /* Wait for clocks to stop */ | ||
771 | |||
772 | cfg |= HW_CFG_SMI_SEL_; | ||
773 | SMC_SET_HW_CFG(cfg); | ||
774 | |||
775 | for (phyaddr = 1; phyaddr < 32; ++phyaddr) { | ||
776 | |||
777 | /* Read the PHY identifiers */ | ||
778 | SMC_GET_PHY_ID1(phyaddr & 31, id1); | ||
779 | SMC_GET_PHY_ID2(phyaddr & 31, id2); | ||
780 | |||
781 | /* Make sure it is a valid identifier */ | ||
782 | if (id1 != 0x0000 && id1 != 0xffff && | ||
783 | id1 != 0x8000 && id2 != 0x0000 && | ||
784 | id2 != 0xffff && id2 != 0x8000) { | ||
785 | /* Save the PHY's address */ | ||
786 | lp->mii.phy_id = phyaddr & 31; | ||
787 | lp->phy_type = id1 << 16 | id2; | ||
788 | break; | ||
789 | } | ||
790 | } | ||
791 | } | ||
792 | default: | ||
793 | /* Internal media only */ | ||
794 | SMC_GET_PHY_ID1(1, id1); | ||
795 | SMC_GET_PHY_ID2(1, id2); | ||
796 | /* Save the PHY's address */ | ||
797 | lp->mii.phy_id = 1; | ||
798 | lp->phy_type = id1 << 16 | id2; | ||
799 | } | ||
800 | |||
801 | DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n", | ||
802 | dev->name, id1, id2, lp->mii.phy_id); | ||
803 | } | ||
804 | |||
805 | /* | ||
806 | * Sets the PHY to a configuration as determined by the user. | ||
807 | * Called with spin_lock held. | ||
808 | */ | ||
809 | static int smc911x_phy_fixed(struct net_device *dev) | ||
810 | { | ||
811 | struct smc911x_local *lp = netdev_priv(dev); | ||
812 | unsigned long ioaddr = dev->base_addr; | ||
813 | int phyaddr = lp->mii.phy_id; | ||
814 | int bmcr; | ||
815 | |||
816 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
817 | |||
818 | /* Enter Link Disable state */ | ||
819 | SMC_GET_PHY_BMCR(phyaddr, bmcr); | ||
820 | bmcr |= BMCR_PDOWN; | ||
821 | SMC_SET_PHY_BMCR(phyaddr, bmcr); | ||
822 | |||
823 | /* | ||
824 | * Set our fixed capabilities | ||
825 | * Disable auto-negotiation | ||
826 | */ | ||
827 | bmcr &= ~BMCR_ANENABLE; | ||
828 | if (lp->ctl_rfduplx) | ||
829 | bmcr |= BMCR_FULLDPLX; | ||
830 | |||
831 | if (lp->ctl_rspeed == 100) | ||
832 | bmcr |= BMCR_SPEED100; | ||
833 | |||
834 | /* Write our capabilities to the phy control register */ | ||
835 | SMC_SET_PHY_BMCR(phyaddr, bmcr); | ||
836 | |||
837 | /* Re-Configure the Receive/Phy Control register */ | ||
838 | bmcr &= ~BMCR_PDOWN; | ||
839 | SMC_SET_PHY_BMCR(phyaddr, bmcr); | ||
840 | |||
841 | return 1; | ||
842 | } | ||
843 | |||
844 | /* | ||
845 | * smc911x_phy_reset - reset the phy | ||
846 | * @dev: net device | ||
847 | * @phy: phy address | ||
848 | * | ||
849 | * Issue a software reset for the specified PHY and | ||
850 | * wait up to 100ms for the reset to complete. We should | ||
851 | * not access the PHY for 50ms after issuing the reset. | ||
852 | * | ||
853 | * The time to wait appears to be dependent on the PHY. | ||
854 | * | ||
855 | */ | ||
856 | static int smc911x_phy_reset(struct net_device *dev, int phy) | ||
857 | { | ||
858 | struct smc911x_local *lp = netdev_priv(dev); | ||
859 | unsigned long ioaddr = dev->base_addr; | ||
860 | int timeout; | ||
861 | unsigned long flags; | ||
862 | unsigned int reg; | ||
863 | |||
864 | DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__); | ||
865 | |||
866 | spin_lock_irqsave(&lp->lock, flags); | ||
867 | reg = SMC_GET_PMT_CTRL(); | ||
868 | reg &= ~0xfffff030; | ||
869 | reg |= PMT_CTRL_PHY_RST_; | ||
870 | SMC_SET_PMT_CTRL(reg); | ||
871 | spin_unlock_irqrestore(&lp->lock, flags); | ||
872 | for (timeout = 2; timeout; timeout--) { | ||
873 | msleep(50); | ||
874 | spin_lock_irqsave(&lp->lock, flags); | ||
875 | reg = SMC_GET_PMT_CTRL(); | ||
876 | spin_unlock_irqrestore(&lp->lock, flags); | ||
877 | if (!(reg & PMT_CTRL_PHY_RST_)) { | ||
878 | /* extra delay required because the phy may | ||
879 | * not be completed with its reset | ||
880 | * when PHY_BCR_RESET_ is cleared. 256us | ||
881 | * should suffice, but use 500us to be safe | ||
882 | */ | ||
883 | udelay(500); | ||
884 | break; | ||
885 | } | ||
886 | } | ||
887 | |||
888 | return reg & PMT_CTRL_PHY_RST_; | ||
889 | } | ||
890 | |||
891 | /* | ||
892 | * smc911x_phy_powerdown - powerdown phy | ||
893 | * @dev: net device | ||
894 | * @phy: phy address | ||
895 | * | ||
896 | * Power down the specified PHY | ||
897 | */ | ||
898 | static void smc911x_phy_powerdown(struct net_device *dev, int phy) | ||
899 | { | ||
900 | unsigned long ioaddr = dev->base_addr; | ||
901 | unsigned int bmcr; | ||
902 | |||
903 | /* Enter Link Disable state */ | ||
904 | SMC_GET_PHY_BMCR(phy, bmcr); | ||
905 | bmcr |= BMCR_PDOWN; | ||
906 | SMC_SET_PHY_BMCR(phy, bmcr); | ||
907 | } | ||
908 | |||
909 | /* | ||
910 | * smc911x_phy_check_media - check the media status and adjust BMCR | ||
911 | * @dev: net device | ||
912 | * @init: set true for initialisation | ||
913 | * | ||
914 | * Select duplex mode depending on negotiation state. This | ||
915 | * also updates our carrier state. | ||
916 | */ | ||
917 | static void smc911x_phy_check_media(struct net_device *dev, int init) | ||
918 | { | ||
919 | struct smc911x_local *lp = netdev_priv(dev); | ||
920 | unsigned long ioaddr = dev->base_addr; | ||
921 | int phyaddr = lp->mii.phy_id; | ||
922 | unsigned int bmcr, cr; | ||
923 | |||
924 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
925 | |||
926 | if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { | ||
927 | /* duplex state has changed */ | ||
928 | SMC_GET_PHY_BMCR(phyaddr, bmcr); | ||
929 | SMC_GET_MAC_CR(cr); | ||
930 | if (lp->mii.full_duplex) { | ||
931 | DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name); | ||
932 | bmcr |= BMCR_FULLDPLX; | ||
933 | cr |= MAC_CR_RCVOWN_; | ||
934 | } else { | ||
935 | DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name); | ||
936 | bmcr &= ~BMCR_FULLDPLX; | ||
937 | cr &= ~MAC_CR_RCVOWN_; | ||
938 | } | ||
939 | SMC_SET_PHY_BMCR(phyaddr, bmcr); | ||
940 | SMC_SET_MAC_CR(cr); | ||
941 | } | ||
942 | } | ||
943 | |||
944 | /* | ||
945 | * Configures the specified PHY through the MII management interface | ||
946 | * using Autonegotiation. | ||
947 | * Calls smc911x_phy_fixed() if the user has requested a certain config. | ||
948 | * If RPC ANEG bit is set, the media selection is dependent purely on | ||
949 | * the selection by the MII (either in the MII BMCR reg or the result | ||
950 | * of autonegotiation.) If the RPC ANEG bit is cleared, the selection | ||
951 | * is controlled by the RPC SPEED and RPC DPLX bits. | ||
952 | */ | ||
953 | static void smc911x_phy_configure(void *data) | ||
954 | { | ||
955 | struct net_device *dev = data; | ||
956 | struct smc911x_local *lp = netdev_priv(dev); | ||
957 | unsigned long ioaddr = dev->base_addr; | ||
958 | int phyaddr = lp->mii.phy_id; | ||
959 | int my_phy_caps; /* My PHY capabilities */ | ||
960 | int my_ad_caps; /* My Advertised capabilities */ | ||
961 | int status; | ||
962 | unsigned long flags; | ||
963 | |||
964 | DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__); | ||
965 | |||
966 | /* | ||
967 | * We should not be called if phy_type is zero. | ||
968 | */ | ||
969 | if (lp->phy_type == 0) | ||
970 | goto smc911x_phy_configure_exit; | ||
971 | |||
972 | if (smc911x_phy_reset(dev, phyaddr)) { | ||
973 | printk("%s: PHY reset timed out\n", dev->name); | ||
974 | goto smc911x_phy_configure_exit; | ||
975 | } | ||
976 | spin_lock_irqsave(&lp->lock, flags); | ||
977 | |||
978 | /* | ||
979 | * Enable PHY Interrupts (for register 18) | ||
980 | * Interrupts listed here are enabled | ||
981 | */ | ||
982 | SMC_SET_PHY_INT_MASK(phyaddr, PHY_INT_MASK_ENERGY_ON_ | | ||
983 | PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ | | ||
984 | PHY_INT_MASK_LINK_DOWN_); | ||
985 | |||
986 | /* If the user requested no auto neg, then go set his request */ | ||
987 | if (lp->mii.force_media) { | ||
988 | smc911x_phy_fixed(dev); | ||
989 | goto smc911x_phy_configure_exit; | ||
990 | } | ||
991 | |||
992 | /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ | ||
993 | SMC_GET_PHY_BMSR(phyaddr, my_phy_caps); | ||
994 | if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { | ||
995 | printk(KERN_INFO "Auto negotiation NOT supported\n"); | ||
996 | smc911x_phy_fixed(dev); | ||
997 | goto smc911x_phy_configure_exit; | ||
998 | } | ||
999 | |||
1000 | /* CSMA capable w/ both pauses */ | ||
1001 | my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | ||
1002 | |||
1003 | if (my_phy_caps & BMSR_100BASE4) | ||
1004 | my_ad_caps |= ADVERTISE_100BASE4; | ||
1005 | if (my_phy_caps & BMSR_100FULL) | ||
1006 | my_ad_caps |= ADVERTISE_100FULL; | ||
1007 | if (my_phy_caps & BMSR_100HALF) | ||
1008 | my_ad_caps |= ADVERTISE_100HALF; | ||
1009 | if (my_phy_caps & BMSR_10FULL) | ||
1010 | my_ad_caps |= ADVERTISE_10FULL; | ||
1011 | if (my_phy_caps & BMSR_10HALF) | ||
1012 | my_ad_caps |= ADVERTISE_10HALF; | ||
1013 | |||
1014 | /* Disable capabilities not selected by our user */ | ||
1015 | if (lp->ctl_rspeed != 100) | ||
1016 | my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); | ||
1017 | |||
1018 | if (!lp->ctl_rfduplx) | ||
1019 | my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); | ||
1020 | |||
1021 | /* Update our Auto-Neg Advertisement Register */ | ||
1022 | SMC_SET_PHY_MII_ADV(phyaddr, my_ad_caps); | ||
1023 | lp->mii.advertising = my_ad_caps; | ||
1024 | |||
1025 | /* | ||
1026 | * Read the register back. Without this, it appears that when | ||
1027 | * auto-negotiation is restarted, sometimes it isn't ready and | ||
1028 | * the link does not come up. | ||
1029 | */ | ||
1030 | udelay(10); | ||
1031 | SMC_GET_PHY_MII_ADV(phyaddr, status); | ||
1032 | |||
1033 | DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps); | ||
1034 | DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps); | ||
1035 | |||
1036 | /* Restart auto-negotiation process in order to advertise my caps */ | ||
1037 | SMC_SET_PHY_BMCR(phyaddr, BMCR_ANENABLE | BMCR_ANRESTART); | ||
1038 | |||
1039 | smc911x_phy_check_media(dev, 1); | ||
1040 | |||
1041 | smc911x_phy_configure_exit: | ||
1042 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1043 | lp->work_pending = 0; | ||
1044 | } | ||
1045 | |||
1046 | /* | ||
1047 | * smc911x_phy_interrupt | ||
1048 | * | ||
1049 | * Purpose: Handle interrupts relating to PHY register 18. This is | ||
1050 | * called from the "hard" interrupt handler under our private spinlock. | ||
1051 | */ | ||
1052 | static void smc911x_phy_interrupt(struct net_device *dev) | ||
1053 | { | ||
1054 | struct smc911x_local *lp = netdev_priv(dev); | ||
1055 | unsigned long ioaddr = dev->base_addr; | ||
1056 | int phyaddr = lp->mii.phy_id; | ||
1057 | int status; | ||
1058 | |||
1059 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1060 | |||
1061 | if (lp->phy_type == 0) | ||
1062 | return; | ||
1063 | |||
1064 | smc911x_phy_check_media(dev, 0); | ||
1065 | /* read to clear status bits */ | ||
1066 | SMC_GET_PHY_INT_SRC(phyaddr,status); | ||
1067 | DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n", | ||
1068 | dev->name, status & 0xffff); | ||
1069 | DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n", | ||
1070 | dev->name, SMC_GET_AFC_CFG()); | ||
1071 | } | ||
1072 | |||
1073 | /*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ | ||
1074 | |||
1075 | /* | ||
1076 | * This is the main routine of the driver, to handle the device when | ||
1077 | * it needs some attention. | ||
1078 | */ | ||
1079 | static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
1080 | { | ||
1081 | struct net_device *dev = dev_id; | ||
1082 | unsigned long ioaddr = dev->base_addr; | ||
1083 | struct smc911x_local *lp = netdev_priv(dev); | ||
1084 | unsigned int status, mask, timeout; | ||
1085 | unsigned int rx_overrun=0, cr, pkts; | ||
1086 | unsigned long flags; | ||
1087 | |||
1088 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1089 | |||
1090 | spin_lock_irqsave(&lp->lock, flags); | ||
1091 | |||
1092 | /* Spurious interrupt check */ | ||
1093 | if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != | ||
1094 | (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { | ||
1095 | return IRQ_NONE; | ||
1096 | } | ||
1097 | |||
1098 | mask = SMC_GET_INT_EN(); | ||
1099 | SMC_SET_INT_EN(0); | ||
1100 | |||
1101 | /* set a timeout value, so I don't stay here forever */ | ||
1102 | timeout = 8; | ||
1103 | |||
1104 | |||
1105 | do { | ||
1106 | status = SMC_GET_INT(); | ||
1107 | |||
1108 | DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n", | ||
1109 | dev->name, status, mask, status & ~mask); | ||
1110 | |||
1111 | status &= mask; | ||
1112 | if (!status) | ||
1113 | break; | ||
1114 | |||
1115 | /* Handle SW interrupt condition */ | ||
1116 | if (status & INT_STS_SW_INT_) { | ||
1117 | SMC_ACK_INT(INT_STS_SW_INT_); | ||
1118 | mask &= ~INT_EN_SW_INT_EN_; | ||
1119 | } | ||
1120 | /* Handle various error conditions */ | ||
1121 | if (status & INT_STS_RXE_) { | ||
1122 | SMC_ACK_INT(INT_STS_RXE_); | ||
1123 | lp->stats.rx_errors++; | ||
1124 | } | ||
1125 | if (status & INT_STS_RXDFH_INT_) { | ||
1126 | SMC_ACK_INT(INT_STS_RXDFH_INT_); | ||
1127 | lp->stats.rx_dropped+=SMC_GET_RX_DROP(); | ||
1128 | } | ||
1129 | /* Undocumented interrupt-what is the right thing to do here? */ | ||
1130 | if (status & INT_STS_RXDF_INT_) { | ||
1131 | SMC_ACK_INT(INT_STS_RXDF_INT_); | ||
1132 | } | ||
1133 | |||
1134 | /* Rx Data FIFO exceeds set level */ | ||
1135 | if (status & INT_STS_RDFL_) { | ||
1136 | if (IS_REV_A(lp->revision)) { | ||
1137 | rx_overrun=1; | ||
1138 | SMC_GET_MAC_CR(cr); | ||
1139 | cr &= ~MAC_CR_RXEN_; | ||
1140 | SMC_SET_MAC_CR(cr); | ||
1141 | DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); | ||
1142 | lp->stats.rx_errors++; | ||
1143 | lp->stats.rx_fifo_errors++; | ||
1144 | } | ||
1145 | SMC_ACK_INT(INT_STS_RDFL_); | ||
1146 | } | ||
1147 | if (status & INT_STS_RDFO_) { | ||
1148 | if (!IS_REV_A(lp->revision)) { | ||
1149 | SMC_GET_MAC_CR(cr); | ||
1150 | cr &= ~MAC_CR_RXEN_; | ||
1151 | SMC_SET_MAC_CR(cr); | ||
1152 | rx_overrun=1; | ||
1153 | DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); | ||
1154 | lp->stats.rx_errors++; | ||
1155 | lp->stats.rx_fifo_errors++; | ||
1156 | } | ||
1157 | SMC_ACK_INT(INT_STS_RDFO_); | ||
1158 | } | ||
1159 | /* Handle receive condition */ | ||
1160 | if ((status & INT_STS_RSFL_) || rx_overrun) { | ||
1161 | unsigned int fifo; | ||
1162 | DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name); | ||
1163 | fifo = SMC_GET_RX_FIFO_INF(); | ||
1164 | pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16; | ||
1165 | DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n", | ||
1166 | dev->name, pkts, fifo & 0xFFFF ); | ||
1167 | if (pkts != 0) { | ||
1168 | #ifdef SMC_USE_DMA | ||
1169 | unsigned int fifo; | ||
1170 | if (lp->rxdma_active){ | ||
1171 | DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, | ||
1172 | "%s: RX DMA active\n", dev->name); | ||
1173 | /* The DMA is already running so up the IRQ threshold */ | ||
1174 | fifo = SMC_GET_FIFO_INT() & ~0xFF; | ||
1175 | fifo |= pkts & 0xFF; | ||
1176 | DBG(SMC_DEBUG_RX, | ||
1177 | "%s: Setting RX stat FIFO threshold to %d\n", | ||
1178 | dev->name, fifo & 0xff); | ||
1179 | SMC_SET_FIFO_INT(fifo); | ||
1180 | } else | ||
1181 | #endif | ||
1182 | smc911x_rcv(dev); | ||
1183 | } | ||
1184 | SMC_ACK_INT(INT_STS_RSFL_); | ||
1185 | } | ||
1186 | /* Handle transmit FIFO available */ | ||
1187 | if (status & INT_STS_TDFA_) { | ||
1188 | DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name); | ||
1189 | SMC_SET_FIFO_TDA(0xFF); | ||
1190 | lp->tx_throttle = 0; | ||
1191 | #ifdef SMC_USE_DMA | ||
1192 | if (!lp->txdma_active) | ||
1193 | #endif | ||
1194 | netif_wake_queue(dev); | ||
1195 | SMC_ACK_INT(INT_STS_TDFA_); | ||
1196 | } | ||
1197 | /* Handle transmit done condition */ | ||
1198 | #if 1 | ||
1199 | if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) { | ||
1200 | DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, | ||
1201 | "%s: Tx stat FIFO limit (%d) /GPT irq\n", | ||
1202 | dev->name, (SMC_GET_FIFO_INT() & 0x00ff0000) >> 16); | ||
1203 | smc911x_tx(dev); | ||
1204 | SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000); | ||
1205 | SMC_ACK_INT(INT_STS_TSFL_); | ||
1206 | SMC_ACK_INT(INT_STS_TSFL_ | INT_STS_GPT_INT_); | ||
1207 | } | ||
1208 | #else | ||
1209 | if (status & INT_STS_TSFL_) { | ||
1210 | DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, ); | ||
1211 | smc911x_tx(dev); | ||
1212 | SMC_ACK_INT(INT_STS_TSFL_); | ||
1213 | } | ||
1214 | |||
1215 | if (status & INT_STS_GPT_INT_) { | ||
1216 | DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n", | ||
1217 | dev->name, | ||
1218 | SMC_GET_IRQ_CFG(), | ||
1219 | SMC_GET_FIFO_INT(), | ||
1220 | SMC_GET_RX_CFG()); | ||
1221 | DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x " | ||
1222 | "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n", | ||
1223 | dev->name, | ||
1224 | (SMC_GET_RX_FIFO_INF() & 0x00ff0000) >> 16, | ||
1225 | SMC_GET_RX_FIFO_INF() & 0xffff, | ||
1226 | SMC_GET_RX_STS_FIFO_PEEK()); | ||
1227 | SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000); | ||
1228 | SMC_ACK_INT(INT_STS_GPT_INT_); | ||
1229 | } | ||
1230 | #endif | ||
1231 | |||
1232 | /* Handle PHY interupt condition */ | ||
1233 | if (status & INT_STS_PHY_INT_) { | ||
1234 | DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name); | ||
1235 | smc911x_phy_interrupt(dev); | ||
1236 | SMC_ACK_INT(INT_STS_PHY_INT_); | ||
1237 | } | ||
1238 | } while (--timeout); | ||
1239 | |||
1240 | /* restore mask state */ | ||
1241 | SMC_SET_INT_EN(mask); | ||
1242 | |||
1243 | DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n", | ||
1244 | dev->name, 8-timeout); | ||
1245 | |||
1246 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1247 | |||
1248 | DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout); | ||
1249 | |||
1250 | return IRQ_HANDLED; | ||
1251 | } | ||
1252 | |||
1253 | #ifdef SMC_USE_DMA | ||
1254 | static void | ||
1255 | smc911x_tx_dma_irq(int dma, void *data, struct pt_regs *regs) | ||
1256 | { | ||
1257 | struct net_device *dev = (struct net_device *)data; | ||
1258 | struct smc911x_local *lp = netdev_priv(dev); | ||
1259 | struct sk_buff *skb = lp->current_tx_skb; | ||
1260 | unsigned long flags; | ||
1261 | |||
1262 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1263 | |||
1264 | DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name); | ||
1265 | /* Clear the DMA interrupt sources */ | ||
1266 | SMC_DMA_ACK_IRQ(dev, dma); | ||
1267 | BUG_ON(skb == NULL); | ||
1268 | dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); | ||
1269 | dev->trans_start = jiffies; | ||
1270 | dev_kfree_skb_irq(skb); | ||
1271 | lp->current_tx_skb = NULL; | ||
1272 | if (lp->pending_tx_skb != NULL) | ||
1273 | smc911x_hardware_send_pkt(dev); | ||
1274 | else { | ||
1275 | DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, | ||
1276 | "%s: No pending Tx packets. DMA disabled\n", dev->name); | ||
1277 | spin_lock_irqsave(&lp->lock, flags); | ||
1278 | lp->txdma_active = 0; | ||
1279 | if (!lp->tx_throttle) { | ||
1280 | netif_wake_queue(dev); | ||
1281 | } | ||
1282 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1283 | } | ||
1284 | |||
1285 | DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, | ||
1286 | "%s: TX DMA irq completed\n", dev->name); | ||
1287 | } | ||
1288 | static void | ||
1289 | smc911x_rx_dma_irq(int dma, void *data, struct pt_regs *regs) | ||
1290 | { | ||
1291 | struct net_device *dev = (struct net_device *)data; | ||
1292 | unsigned long ioaddr = dev->base_addr; | ||
1293 | struct smc911x_local *lp = netdev_priv(dev); | ||
1294 | struct sk_buff *skb = lp->current_rx_skb; | ||
1295 | unsigned long flags; | ||
1296 | unsigned int pkts; | ||
1297 | |||
1298 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1299 | DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name); | ||
1300 | /* Clear the DMA interrupt sources */ | ||
1301 | SMC_DMA_ACK_IRQ(dev, dma); | ||
1302 | dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); | ||
1303 | BUG_ON(skb == NULL); | ||
1304 | lp->current_rx_skb = NULL; | ||
1305 | PRINT_PKT(skb->data, skb->len); | ||
1306 | dev->last_rx = jiffies; | ||
1307 | skb->dev = dev; | ||
1308 | skb->protocol = eth_type_trans(skb, dev); | ||
1309 | netif_rx(skb); | ||
1310 | lp->stats.rx_packets++; | ||
1311 | lp->stats.rx_bytes += skb->len; | ||
1312 | |||
1313 | spin_lock_irqsave(&lp->lock, flags); | ||
1314 | pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16; | ||
1315 | if (pkts != 0) { | ||
1316 | smc911x_rcv(dev); | ||
1317 | }else { | ||
1318 | lp->rxdma_active = 0; | ||
1319 | } | ||
1320 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1321 | DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, | ||
1322 | "%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n", | ||
1323 | dev->name, pkts); | ||
1324 | } | ||
1325 | #endif /* SMC_USE_DMA */ | ||
1326 | |||
1327 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1328 | /* | ||
1329 | * Polling receive - used by netconsole and other diagnostic tools | ||
1330 | * to allow network i/o with interrupts disabled. | ||
1331 | */ | ||
1332 | static void smc911x_poll_controller(struct net_device *dev) | ||
1333 | { | ||
1334 | disable_irq(dev->irq); | ||
1335 | smc911x_interrupt(dev->irq, dev, NULL); | ||
1336 | enable_irq(dev->irq); | ||
1337 | } | ||
1338 | #endif | ||
1339 | |||
1340 | /* Our watchdog timed out. Called by the networking layer */ | ||
1341 | static void smc911x_timeout(struct net_device *dev) | ||
1342 | { | ||
1343 | struct smc911x_local *lp = netdev_priv(dev); | ||
1344 | unsigned long ioaddr = dev->base_addr; | ||
1345 | int status, mask; | ||
1346 | unsigned long flags; | ||
1347 | |||
1348 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1349 | |||
1350 | spin_lock_irqsave(&lp->lock, flags); | ||
1351 | status = SMC_GET_INT(); | ||
1352 | mask = SMC_GET_INT_EN(); | ||
1353 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1354 | DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n", | ||
1355 | dev->name, status, mask); | ||
1356 | |||
1357 | /* Dump the current TX FIFO contents and restart */ | ||
1358 | mask = SMC_GET_TX_CFG(); | ||
1359 | SMC_SET_TX_CFG(mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_); | ||
1360 | /* | ||
1361 | * Reconfiguring the PHY doesn't seem like a bad idea here, but | ||
1362 | * smc911x_phy_configure() calls msleep() which calls schedule_timeout() | ||
1363 | * which calls schedule(). Hence we use a work queue. | ||
1364 | */ | ||
1365 | if (lp->phy_type != 0) { | ||
1366 | if (schedule_work(&lp->phy_configure)) { | ||
1367 | lp->work_pending = 1; | ||
1368 | } | ||
1369 | } | ||
1370 | |||
1371 | /* We can accept TX packets again */ | ||
1372 | dev->trans_start = jiffies; | ||
1373 | netif_wake_queue(dev); | ||
1374 | } | ||
1375 | |||
1376 | /* | ||
1377 | * This routine will, depending on the values passed to it, | ||
1378 | * either make it accept multicast packets, go into | ||
1379 | * promiscuous mode (for TCPDUMP and cousins) or accept | ||
1380 | * a select set of multicast packets | ||
1381 | */ | ||
1382 | static void smc911x_set_multicast_list(struct net_device *dev) | ||
1383 | { | ||
1384 | struct smc911x_local *lp = netdev_priv(dev); | ||
1385 | unsigned long ioaddr = dev->base_addr; | ||
1386 | unsigned int multicast_table[2]; | ||
1387 | unsigned int mcr, update_multicast = 0; | ||
1388 | unsigned long flags; | ||
1389 | /* table for flipping the order of 5 bits */ | ||
1390 | static const unsigned char invert5[] = | ||
1391 | {0x00, 0x10, 0x08, 0x18, 0x04, 0x14, 0x0C, 0x1C, | ||
1392 | 0x02, 0x12, 0x0A, 0x1A, 0x06, 0x16, 0x0E, 0x1E, | ||
1393 | 0x01, 0x11, 0x09, 0x19, 0x05, 0x15, 0x0D, 0x1D, | ||
1394 | 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x17, 0x0F, 0x1F}; | ||
1395 | |||
1396 | |||
1397 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1398 | |||
1399 | spin_lock_irqsave(&lp->lock, flags); | ||
1400 | SMC_GET_MAC_CR(mcr); | ||
1401 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1402 | |||
1403 | if (dev->flags & IFF_PROMISC) { | ||
1404 | |||
1405 | DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name); | ||
1406 | mcr |= MAC_CR_PRMS_; | ||
1407 | } | ||
1408 | /* | ||
1409 | * Here, I am setting this to accept all multicast packets. | ||
1410 | * I don't need to zero the multicast table, because the flag is | ||
1411 | * checked before the table is | ||
1412 | */ | ||
1413 | else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) { | ||
1414 | DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name); | ||
1415 | mcr |= MAC_CR_MCPAS_; | ||
1416 | } | ||
1417 | |||
1418 | /* | ||
1419 | * This sets the internal hardware table to filter out unwanted | ||
1420 | * multicast packets before they take up memory. | ||
1421 | * | ||
1422 | * The SMC chip uses a hash table where the high 6 bits of the CRC of | ||
1423 | * address are the offset into the table. If that bit is 1, then the | ||
1424 | * multicast packet is accepted. Otherwise, it's dropped silently. | ||
1425 | * | ||
1426 | * To use the 6 bits as an offset into the table, the high 1 bit is | ||
1427 | * the number of the 32 bit register, while the low 5 bits are the bit | ||
1428 | * within that register. | ||
1429 | */ | ||
1430 | else if (dev->mc_count) { | ||
1431 | int i; | ||
1432 | struct dev_mc_list *cur_addr; | ||
1433 | |||
1434 | /* Set the Hash perfec mode */ | ||
1435 | mcr |= MAC_CR_HPFILT_; | ||
1436 | |||
1437 | /* start with a table of all zeros: reject all */ | ||
1438 | memset(multicast_table, 0, sizeof(multicast_table)); | ||
1439 | |||
1440 | cur_addr = dev->mc_list; | ||
1441 | for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) { | ||
1442 | int position; | ||
1443 | |||
1444 | /* do we have a pointer here? */ | ||
1445 | if (!cur_addr) | ||
1446 | break; | ||
1447 | /* make sure this is a multicast address - | ||
1448 | shouldn't this be a given if we have it here ? */ | ||
1449 | if (!(*cur_addr->dmi_addr & 1)) | ||
1450 | continue; | ||
1451 | |||
1452 | /* only use the low order bits */ | ||
1453 | position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f; | ||
1454 | |||
1455 | /* do some messy swapping to put the bit in the right spot */ | ||
1456 | multicast_table[invert5[position&0x1F]&0x1] |= | ||
1457 | (1<<invert5[(position>>1)&0x1F]); | ||
1458 | } | ||
1459 | |||
1460 | /* be sure I get rid of flags I might have set */ | ||
1461 | mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); | ||
1462 | |||
1463 | /* now, the table can be loaded into the chipset */ | ||
1464 | update_multicast = 1; | ||
1465 | } else { | ||
1466 | DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n", | ||
1467 | dev->name); | ||
1468 | mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); | ||
1469 | |||
1470 | /* | ||
1471 | * since I'm disabling all multicast entirely, I need to | ||
1472 | * clear the multicast list | ||
1473 | */ | ||
1474 | memset(multicast_table, 0, sizeof(multicast_table)); | ||
1475 | update_multicast = 1; | ||
1476 | } | ||
1477 | |||
1478 | spin_lock_irqsave(&lp->lock, flags); | ||
1479 | SMC_SET_MAC_CR(mcr); | ||
1480 | if (update_multicast) { | ||
1481 | DBG(SMC_DEBUG_MISC, | ||
1482 | "%s: update mcast hash table 0x%08x 0x%08x\n", | ||
1483 | dev->name, multicast_table[0], multicast_table[1]); | ||
1484 | SMC_SET_HASHL(multicast_table[0]); | ||
1485 | SMC_SET_HASHH(multicast_table[1]); | ||
1486 | } | ||
1487 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1488 | } | ||
1489 | |||
1490 | |||
1491 | /* | ||
1492 | * Open and Initialize the board | ||
1493 | * | ||
1494 | * Set up everything, reset the card, etc.. | ||
1495 | */ | ||
1496 | static int | ||
1497 | smc911x_open(struct net_device *dev) | ||
1498 | { | ||
1499 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1500 | |||
1501 | /* | ||
1502 | * Check that the address is valid. If its not, refuse | ||
1503 | * to bring the device up. The user must specify an | ||
1504 | * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx | ||
1505 | */ | ||
1506 | if (!is_valid_ether_addr(dev->dev_addr)) { | ||
1507 | PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__); | ||
1508 | return -EINVAL; | ||
1509 | } | ||
1510 | |||
1511 | /* reset the hardware */ | ||
1512 | smc911x_reset(dev); | ||
1513 | |||
1514 | /* Configure the PHY, initialize the link state */ | ||
1515 | smc911x_phy_configure(dev); | ||
1516 | |||
1517 | /* Turn on Tx + Rx */ | ||
1518 | smc911x_enable(dev); | ||
1519 | |||
1520 | netif_start_queue(dev); | ||
1521 | |||
1522 | return 0; | ||
1523 | } | ||
1524 | |||
1525 | /* | ||
1526 | * smc911x_close | ||
1527 | * | ||
1528 | * this makes the board clean up everything that it can | ||
1529 | * and not talk to the outside world. Caused by | ||
1530 | * an 'ifconfig ethX down' | ||
1531 | */ | ||
1532 | static int smc911x_close(struct net_device *dev) | ||
1533 | { | ||
1534 | struct smc911x_local *lp = netdev_priv(dev); | ||
1535 | |||
1536 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1537 | |||
1538 | netif_stop_queue(dev); | ||
1539 | netif_carrier_off(dev); | ||
1540 | |||
1541 | /* clear everything */ | ||
1542 | smc911x_shutdown(dev); | ||
1543 | |||
1544 | if (lp->phy_type != 0) { | ||
1545 | /* We need to ensure that no calls to | ||
1546 | * smc911x_phy_configure are pending. | ||
1547 | |||
1548 | * flush_scheduled_work() cannot be called because we | ||
1549 | * are running with the netlink semaphore held (from | ||
1550 | * devinet_ioctl()) and the pending work queue | ||
1551 | * contains linkwatch_event() (scheduled by | ||
1552 | * netif_carrier_off() above). linkwatch_event() also | ||
1553 | * wants the netlink semaphore. | ||
1554 | */ | ||
1555 | while (lp->work_pending) | ||
1556 | schedule(); | ||
1557 | smc911x_phy_powerdown(dev, lp->mii.phy_id); | ||
1558 | } | ||
1559 | |||
1560 | if (lp->pending_tx_skb) { | ||
1561 | dev_kfree_skb(lp->pending_tx_skb); | ||
1562 | lp->pending_tx_skb = NULL; | ||
1563 | } | ||
1564 | |||
1565 | return 0; | ||
1566 | } | ||
1567 | |||
1568 | /* | ||
1569 | * Get the current statistics. | ||
1570 | * This may be called with the card open or closed. | ||
1571 | */ | ||
1572 | static struct net_device_stats *smc911x_query_statistics(struct net_device *dev) | ||
1573 | { | ||
1574 | struct smc911x_local *lp = netdev_priv(dev); | ||
1575 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1576 | |||
1577 | |||
1578 | return &lp->stats; | ||
1579 | } | ||
1580 | |||
1581 | /* | ||
1582 | * Ethtool support | ||
1583 | */ | ||
1584 | static int | ||
1585 | smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1586 | { | ||
1587 | struct smc911x_local *lp = netdev_priv(dev); | ||
1588 | unsigned long ioaddr = dev->base_addr; | ||
1589 | int ret, status; | ||
1590 | unsigned long flags; | ||
1591 | |||
1592 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1593 | cmd->maxtxpkt = 1; | ||
1594 | cmd->maxrxpkt = 1; | ||
1595 | |||
1596 | if (lp->phy_type != 0) { | ||
1597 | spin_lock_irqsave(&lp->lock, flags); | ||
1598 | ret = mii_ethtool_gset(&lp->mii, cmd); | ||
1599 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1600 | } else { | ||
1601 | cmd->supported = SUPPORTED_10baseT_Half | | ||
1602 | SUPPORTED_10baseT_Full | | ||
1603 | SUPPORTED_TP | SUPPORTED_AUI; | ||
1604 | |||
1605 | if (lp->ctl_rspeed == 10) | ||
1606 | cmd->speed = SPEED_10; | ||
1607 | else if (lp->ctl_rspeed == 100) | ||
1608 | cmd->speed = SPEED_100; | ||
1609 | |||
1610 | cmd->autoneg = AUTONEG_DISABLE; | ||
1611 | if (lp->mii.phy_id==1) | ||
1612 | cmd->transceiver = XCVR_INTERNAL; | ||
1613 | else | ||
1614 | cmd->transceiver = XCVR_EXTERNAL; | ||
1615 | cmd->port = 0; | ||
1616 | SMC_GET_PHY_SPECIAL(lp->mii.phy_id, status); | ||
1617 | cmd->duplex = | ||
1618 | (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ? | ||
1619 | DUPLEX_FULL : DUPLEX_HALF; | ||
1620 | ret = 0; | ||
1621 | } | ||
1622 | |||
1623 | return ret; | ||
1624 | } | ||
1625 | |||
1626 | static int | ||
1627 | smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1628 | { | ||
1629 | struct smc911x_local *lp = netdev_priv(dev); | ||
1630 | int ret; | ||
1631 | unsigned long flags; | ||
1632 | |||
1633 | if (lp->phy_type != 0) { | ||
1634 | spin_lock_irqsave(&lp->lock, flags); | ||
1635 | ret = mii_ethtool_sset(&lp->mii, cmd); | ||
1636 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1637 | } else { | ||
1638 | if (cmd->autoneg != AUTONEG_DISABLE || | ||
1639 | cmd->speed != SPEED_10 || | ||
1640 | (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || | ||
1641 | (cmd->port != PORT_TP && cmd->port != PORT_AUI)) | ||
1642 | return -EINVAL; | ||
1643 | |||
1644 | lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; | ||
1645 | |||
1646 | ret = 0; | ||
1647 | } | ||
1648 | |||
1649 | return ret; | ||
1650 | } | ||
1651 | |||
1652 | static void | ||
1653 | smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
1654 | { | ||
1655 | strncpy(info->driver, CARDNAME, sizeof(info->driver)); | ||
1656 | strncpy(info->version, version, sizeof(info->version)); | ||
1657 | strncpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info)); | ||
1658 | } | ||
1659 | |||
1660 | static int smc911x_ethtool_nwayreset(struct net_device *dev) | ||
1661 | { | ||
1662 | struct smc911x_local *lp = netdev_priv(dev); | ||
1663 | int ret = -EINVAL; | ||
1664 | unsigned long flags; | ||
1665 | |||
1666 | if (lp->phy_type != 0) { | ||
1667 | spin_lock_irqsave(&lp->lock, flags); | ||
1668 | ret = mii_nway_restart(&lp->mii); | ||
1669 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1670 | } | ||
1671 | |||
1672 | return ret; | ||
1673 | } | ||
1674 | |||
1675 | static u32 smc911x_ethtool_getmsglevel(struct net_device *dev) | ||
1676 | { | ||
1677 | struct smc911x_local *lp = netdev_priv(dev); | ||
1678 | return lp->msg_enable; | ||
1679 | } | ||
1680 | |||
1681 | static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level) | ||
1682 | { | ||
1683 | struct smc911x_local *lp = netdev_priv(dev); | ||
1684 | lp->msg_enable = level; | ||
1685 | } | ||
1686 | |||
1687 | static int smc911x_ethtool_getregslen(struct net_device *dev) | ||
1688 | { | ||
1689 | /* System regs + MAC regs + PHY regs */ | ||
1690 | return (((E2P_CMD - ID_REV)/4 + 1) + | ||
1691 | (WUCSR - MAC_CR)+1 + 32) * sizeof(u32); | ||
1692 | } | ||
1693 | |||
1694 | static void smc911x_ethtool_getregs(struct net_device *dev, | ||
1695 | struct ethtool_regs* regs, void *buf) | ||
1696 | { | ||
1697 | unsigned long ioaddr = dev->base_addr; | ||
1698 | struct smc911x_local *lp = netdev_priv(dev); | ||
1699 | unsigned long flags; | ||
1700 | u32 reg,i,j=0; | ||
1701 | u32 *data = (u32*)buf; | ||
1702 | |||
1703 | regs->version = lp->version; | ||
1704 | for(i=ID_REV;i<=E2P_CMD;i+=4) { | ||
1705 | data[j++] = SMC_inl(ioaddr,i); | ||
1706 | } | ||
1707 | for(i=MAC_CR;i<=WUCSR;i++) { | ||
1708 | spin_lock_irqsave(&lp->lock, flags); | ||
1709 | SMC_GET_MAC_CSR(i, reg); | ||
1710 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1711 | data[j++] = reg; | ||
1712 | } | ||
1713 | for(i=0;i<=31;i++) { | ||
1714 | spin_lock_irqsave(&lp->lock, flags); | ||
1715 | SMC_GET_MII(i, lp->mii.phy_id, reg); | ||
1716 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1717 | data[j++] = reg & 0xFFFF; | ||
1718 | } | ||
1719 | } | ||
1720 | |||
1721 | static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev) | ||
1722 | { | ||
1723 | unsigned long ioaddr = dev->base_addr; | ||
1724 | unsigned int timeout; | ||
1725 | int e2p_cmd; | ||
1726 | |||
1727 | e2p_cmd = SMC_GET_E2P_CMD(); | ||
1728 | for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { | ||
1729 | if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { | ||
1730 | PRINTK("%s: %s timeout waiting for EEPROM to respond\n", | ||
1731 | dev->name, __FUNCTION__); | ||
1732 | return -EFAULT; | ||
1733 | } | ||
1734 | mdelay(1); | ||
1735 | e2p_cmd = SMC_GET_E2P_CMD(); | ||
1736 | } | ||
1737 | if (timeout == 0) { | ||
1738 | PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n", | ||
1739 | dev->name, __FUNCTION__); | ||
1740 | return -ETIMEDOUT; | ||
1741 | } | ||
1742 | return 0; | ||
1743 | } | ||
1744 | |||
1745 | static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev, | ||
1746 | int cmd, int addr) | ||
1747 | { | ||
1748 | unsigned long ioaddr = dev->base_addr; | ||
1749 | int ret; | ||
1750 | |||
1751 | if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) | ||
1752 | return ret; | ||
1753 | SMC_SET_E2P_CMD(E2P_CMD_EPC_BUSY_ | | ||
1754 | ((cmd) & (0x7<<28)) | | ||
1755 | ((addr) & 0xFF)); | ||
1756 | return 0; | ||
1757 | } | ||
1758 | |||
1759 | static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev, | ||
1760 | u8 *data) | ||
1761 | { | ||
1762 | unsigned long ioaddr = dev->base_addr; | ||
1763 | int ret; | ||
1764 | |||
1765 | if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) | ||
1766 | return ret; | ||
1767 | *data = SMC_GET_E2P_DATA(); | ||
1768 | return 0; | ||
1769 | } | ||
1770 | |||
1771 | static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev, | ||
1772 | u8 data) | ||
1773 | { | ||
1774 | unsigned long ioaddr = dev->base_addr; | ||
1775 | int ret; | ||
1776 | |||
1777 | if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) | ||
1778 | return ret; | ||
1779 | SMC_SET_E2P_DATA(data); | ||
1780 | return 0; | ||
1781 | } | ||
1782 | |||
1783 | static int smc911x_ethtool_geteeprom(struct net_device *dev, | ||
1784 | struct ethtool_eeprom *eeprom, u8 *data) | ||
1785 | { | ||
1786 | u8 eebuf[SMC911X_EEPROM_LEN]; | ||
1787 | int i, ret; | ||
1788 | |||
1789 | for(i=0;i<SMC911X_EEPROM_LEN;i++) { | ||
1790 | if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0) | ||
1791 | return ret; | ||
1792 | if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0) | ||
1793 | return ret; | ||
1794 | } | ||
1795 | memcpy(data, eebuf+eeprom->offset, eeprom->len); | ||
1796 | return 0; | ||
1797 | } | ||
1798 | |||
1799 | static int smc911x_ethtool_seteeprom(struct net_device *dev, | ||
1800 | struct ethtool_eeprom *eeprom, u8 *data) | ||
1801 | { | ||
1802 | int i, ret; | ||
1803 | |||
1804 | /* Enable erase */ | ||
1805 | if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0) | ||
1806 | return ret; | ||
1807 | for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) { | ||
1808 | /* erase byte */ | ||
1809 | if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0) | ||
1810 | return ret; | ||
1811 | /* write byte */ | ||
1812 | if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0) | ||
1813 | return ret; | ||
1814 | if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0) | ||
1815 | return ret; | ||
1816 | } | ||
1817 | return 0; | ||
1818 | } | ||
1819 | |||
1820 | static int smc911x_ethtool_geteeprom_len(struct net_device *dev) | ||
1821 | { | ||
1822 | return SMC911X_EEPROM_LEN; | ||
1823 | } | ||
1824 | |||
1825 | static struct ethtool_ops smc911x_ethtool_ops = { | ||
1826 | .get_settings = smc911x_ethtool_getsettings, | ||
1827 | .set_settings = smc911x_ethtool_setsettings, | ||
1828 | .get_drvinfo = smc911x_ethtool_getdrvinfo, | ||
1829 | .get_msglevel = smc911x_ethtool_getmsglevel, | ||
1830 | .set_msglevel = smc911x_ethtool_setmsglevel, | ||
1831 | .nway_reset = smc911x_ethtool_nwayreset, | ||
1832 | .get_link = ethtool_op_get_link, | ||
1833 | .get_regs_len = smc911x_ethtool_getregslen, | ||
1834 | .get_regs = smc911x_ethtool_getregs, | ||
1835 | .get_eeprom_len = smc911x_ethtool_geteeprom_len, | ||
1836 | .get_eeprom = smc911x_ethtool_geteeprom, | ||
1837 | .set_eeprom = smc911x_ethtool_seteeprom, | ||
1838 | }; | ||
1839 | |||
1840 | /* | ||
1841 | * smc911x_findirq | ||
1842 | * | ||
1843 | * This routine has a simple purpose -- make the SMC chip generate an | ||
1844 | * interrupt, so an auto-detect routine can detect it, and find the IRQ, | ||
1845 | */ | ||
1846 | static int __init smc911x_findirq(unsigned long ioaddr) | ||
1847 | { | ||
1848 | int timeout = 20; | ||
1849 | unsigned long cookie; | ||
1850 | |||
1851 | DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); | ||
1852 | |||
1853 | cookie = probe_irq_on(); | ||
1854 | |||
1855 | /* | ||
1856 | * Force a SW interrupt | ||
1857 | */ | ||
1858 | |||
1859 | SMC_SET_INT_EN(INT_EN_SW_INT_EN_); | ||
1860 | |||
1861 | /* | ||
1862 | * Wait until positive that the interrupt has been generated | ||
1863 | */ | ||
1864 | do { | ||
1865 | int int_status; | ||
1866 | udelay(10); | ||
1867 | int_status = SMC_GET_INT_EN(); | ||
1868 | if (int_status & INT_EN_SW_INT_EN_) | ||
1869 | break; /* got the interrupt */ | ||
1870 | } while (--timeout); | ||
1871 | |||
1872 | /* | ||
1873 | * there is really nothing that I can do here if timeout fails, | ||
1874 | * as autoirq_report will return a 0 anyway, which is what I | ||
1875 | * want in this case. Plus, the clean up is needed in both | ||
1876 | * cases. | ||
1877 | */ | ||
1878 | |||
1879 | /* and disable all interrupts again */ | ||
1880 | SMC_SET_INT_EN(0); | ||
1881 | |||
1882 | /* and return what I found */ | ||
1883 | return probe_irq_off(cookie); | ||
1884 | } | ||
1885 | |||
1886 | /* | ||
1887 | * Function: smc911x_probe(unsigned long ioaddr) | ||
1888 | * | ||
1889 | * Purpose: | ||
1890 | * Tests to see if a given ioaddr points to an SMC911x chip. | ||
1891 | * Returns a 0 on success | ||
1892 | * | ||
1893 | * Algorithm: | ||
1894 | * (1) see if the endian word is OK | ||
1895 | * (1) see if I recognize the chip ID in the appropriate register | ||
1896 | * | ||
1897 | * Here I do typical initialization tasks. | ||
1898 | * | ||
1899 | * o Initialize the structure if needed | ||
1900 | * o print out my vanity message if not done so already | ||
1901 | * o print out what type of hardware is detected | ||
1902 | * o print out the ethernet address | ||
1903 | * o find the IRQ | ||
1904 | * o set up my private data | ||
1905 | * o configure the dev structure with my subroutines | ||
1906 | * o actually GRAB the irq. | ||
1907 | * o GRAB the region | ||
1908 | */ | ||
1909 | static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr) | ||
1910 | { | ||
1911 | struct smc911x_local *lp = netdev_priv(dev); | ||
1912 | int i, retval; | ||
1913 | unsigned int val, chip_id, revision; | ||
1914 | const char *version_string; | ||
1915 | |||
1916 | DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); | ||
1917 | |||
1918 | /* First, see if the endian word is recognized */ | ||
1919 | val = SMC_GET_BYTE_TEST(); | ||
1920 | DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val); | ||
1921 | if (val != 0x87654321) { | ||
1922 | printk(KERN_ERR "Invalid chip endian 0x08%x\n",val); | ||
1923 | retval = -ENODEV; | ||
1924 | goto err_out; | ||
1925 | } | ||
1926 | |||
1927 | /* | ||
1928 | * check if the revision register is something that I | ||
1929 | * recognize. These might need to be added to later, | ||
1930 | * as future revisions could be added. | ||
1931 | */ | ||
1932 | chip_id = SMC_GET_PN(); | ||
1933 | DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id); | ||
1934 | for(i=0;chip_ids[i].id != 0; i++) { | ||
1935 | if (chip_ids[i].id == chip_id) break; | ||
1936 | } | ||
1937 | if (!chip_ids[i].id) { | ||
1938 | printk(KERN_ERR "Unknown chip ID %04x\n", chip_id); | ||
1939 | retval = -ENODEV; | ||
1940 | goto err_out; | ||
1941 | } | ||
1942 | version_string = chip_ids[i].name; | ||
1943 | |||
1944 | revision = SMC_GET_REV(); | ||
1945 | DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision); | ||
1946 | |||
1947 | /* At this point I'll assume that the chip is an SMC911x. */ | ||
1948 | DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name); | ||
1949 | |||
1950 | /* Validate the TX FIFO size requested */ | ||
1951 | if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) { | ||
1952 | printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb); | ||
1953 | retval = -EINVAL; | ||
1954 | goto err_out; | ||
1955 | } | ||
1956 | |||
1957 | /* fill in some of the fields */ | ||
1958 | dev->base_addr = ioaddr; | ||
1959 | lp->version = chip_ids[i].id; | ||
1960 | lp->revision = revision; | ||
1961 | lp->tx_fifo_kb = tx_fifo_kb; | ||
1962 | /* Reverse calculate the RX FIFO size from the TX */ | ||
1963 | lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512; | ||
1964 | lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15; | ||
1965 | |||
1966 | /* Set the automatic flow control values */ | ||
1967 | switch(lp->tx_fifo_kb) { | ||
1968 | /* | ||
1969 | * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64 | ||
1970 | * AFC_LO is AFC_HI/2 | ||
1971 | * BACK_DUR is about 5uS*(AFC_LO) rounded down | ||
1972 | */ | ||
1973 | case 2:/* 13440 Rx Data Fifo Size */ | ||
1974 | lp->afc_cfg=0x008C46AF;break; | ||
1975 | case 3:/* 12480 Rx Data Fifo Size */ | ||
1976 | lp->afc_cfg=0x0082419F;break; | ||
1977 | case 4:/* 11520 Rx Data Fifo Size */ | ||
1978 | lp->afc_cfg=0x00783C9F;break; | ||
1979 | case 5:/* 10560 Rx Data Fifo Size */ | ||
1980 | lp->afc_cfg=0x006E374F;break; | ||
1981 | case 6:/* 9600 Rx Data Fifo Size */ | ||
1982 | lp->afc_cfg=0x0064328F;break; | ||
1983 | case 7:/* 8640 Rx Data Fifo Size */ | ||
1984 | lp->afc_cfg=0x005A2D7F;break; | ||
1985 | case 8:/* 7680 Rx Data Fifo Size */ | ||
1986 | lp->afc_cfg=0x0050287F;break; | ||
1987 | case 9:/* 6720 Rx Data Fifo Size */ | ||
1988 | lp->afc_cfg=0x0046236F;break; | ||
1989 | case 10:/* 5760 Rx Data Fifo Size */ | ||
1990 | lp->afc_cfg=0x003C1E6F;break; | ||
1991 | case 11:/* 4800 Rx Data Fifo Size */ | ||
1992 | lp->afc_cfg=0x0032195F;break; | ||
1993 | /* | ||
1994 | * AFC_HI is ~1520 bytes less than RX Data Fifo Size | ||
1995 | * AFC_LO is AFC_HI/2 | ||
1996 | * BACK_DUR is about 5uS*(AFC_LO) rounded down | ||
1997 | */ | ||
1998 | case 12:/* 3840 Rx Data Fifo Size */ | ||
1999 | lp->afc_cfg=0x0024124F;break; | ||
2000 | case 13:/* 2880 Rx Data Fifo Size */ | ||
2001 | lp->afc_cfg=0x0015073F;break; | ||
2002 | case 14:/* 1920 Rx Data Fifo Size */ | ||
2003 | lp->afc_cfg=0x0006032F;break; | ||
2004 | default: | ||
2005 | PRINTK("%s: ERROR -- no AFC_CFG setting found", | ||
2006 | dev->name); | ||
2007 | break; | ||
2008 | } | ||
2009 | |||
2010 | DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, | ||
2011 | "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME, | ||
2012 | lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg); | ||
2013 | |||
2014 | spin_lock_init(&lp->lock); | ||
2015 | |||
2016 | /* Get the MAC address */ | ||
2017 | SMC_GET_MAC_ADDR(dev->dev_addr); | ||
2018 | |||
2019 | /* now, reset the chip, and put it into a known state */ | ||
2020 | smc911x_reset(dev); | ||
2021 | |||
2022 | /* | ||
2023 | * If dev->irq is 0, then the device has to be banged on to see | ||
2024 | * what the IRQ is. | ||
2025 | * | ||
2026 | * Specifying an IRQ is done with the assumption that the user knows | ||
2027 | * what (s)he is doing. No checking is done!!!! | ||
2028 | */ | ||
2029 | if (dev->irq < 1) { | ||
2030 | int trials; | ||
2031 | |||
2032 | trials = 3; | ||
2033 | while (trials--) { | ||
2034 | dev->irq = smc911x_findirq(ioaddr); | ||
2035 | if (dev->irq) | ||
2036 | break; | ||
2037 | /* kick the card and try again */ | ||
2038 | smc911x_reset(dev); | ||
2039 | } | ||
2040 | } | ||
2041 | if (dev->irq == 0) { | ||
2042 | printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n", | ||
2043 | dev->name); | ||
2044 | retval = -ENODEV; | ||
2045 | goto err_out; | ||
2046 | } | ||
2047 | dev->irq = irq_canonicalize(dev->irq); | ||
2048 | |||
2049 | /* Fill in the fields of the device structure with ethernet values. */ | ||
2050 | ether_setup(dev); | ||
2051 | |||
2052 | dev->open = smc911x_open; | ||
2053 | dev->stop = smc911x_close; | ||
2054 | dev->hard_start_xmit = smc911x_hard_start_xmit; | ||
2055 | dev->tx_timeout = smc911x_timeout; | ||
2056 | dev->watchdog_timeo = msecs_to_jiffies(watchdog); | ||
2057 | dev->get_stats = smc911x_query_statistics; | ||
2058 | dev->set_multicast_list = smc911x_set_multicast_list; | ||
2059 | dev->ethtool_ops = &smc911x_ethtool_ops; | ||
2060 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2061 | dev->poll_controller = smc911x_poll_controller; | ||
2062 | #endif | ||
2063 | |||
2064 | INIT_WORK(&lp->phy_configure, smc911x_phy_configure, dev); | ||
2065 | lp->mii.phy_id_mask = 0x1f; | ||
2066 | lp->mii.reg_num_mask = 0x1f; | ||
2067 | lp->mii.force_media = 0; | ||
2068 | lp->mii.full_duplex = 0; | ||
2069 | lp->mii.dev = dev; | ||
2070 | lp->mii.mdio_read = smc911x_phy_read; | ||
2071 | lp->mii.mdio_write = smc911x_phy_write; | ||
2072 | |||
2073 | /* | ||
2074 | * Locate the phy, if any. | ||
2075 | */ | ||
2076 | smc911x_phy_detect(dev); | ||
2077 | |||
2078 | /* Set default parameters */ | ||
2079 | lp->msg_enable = NETIF_MSG_LINK; | ||
2080 | lp->ctl_rfduplx = 1; | ||
2081 | lp->ctl_rspeed = 100; | ||
2082 | |||
2083 | /* Grab the IRQ */ | ||
2084 | retval = request_irq(dev->irq, &smc911x_interrupt, SA_SHIRQ, dev->name, dev); | ||
2085 | if (retval) | ||
2086 | goto err_out; | ||
2087 | |||
2088 | set_irq_type(dev->irq, IRQT_FALLING); | ||
2089 | |||
2090 | #ifdef SMC_USE_DMA | ||
2091 | lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq); | ||
2092 | lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq); | ||
2093 | lp->rxdma_active = 0; | ||
2094 | lp->txdma_active = 0; | ||
2095 | dev->dma = lp->rxdma; | ||
2096 | #endif | ||
2097 | |||
2098 | retval = register_netdev(dev); | ||
2099 | if (retval == 0) { | ||
2100 | /* now, print out the card info, in a short format.. */ | ||
2101 | printk("%s: %s (rev %d) at %#lx IRQ %d", | ||
2102 | dev->name, version_string, lp->revision, | ||
2103 | dev->base_addr, dev->irq); | ||
2104 | |||
2105 | #ifdef SMC_USE_DMA | ||
2106 | if (lp->rxdma != -1) | ||
2107 | printk(" RXDMA %d ", lp->rxdma); | ||
2108 | |||
2109 | if (lp->txdma != -1) | ||
2110 | printk("TXDMA %d", lp->txdma); | ||
2111 | #endif | ||
2112 | printk("\n"); | ||
2113 | if (!is_valid_ether_addr(dev->dev_addr)) { | ||
2114 | printk("%s: Invalid ethernet MAC address. Please " | ||
2115 | "set using ifconfig\n", dev->name); | ||
2116 | } else { | ||
2117 | /* Print the Ethernet address */ | ||
2118 | printk("%s: Ethernet addr: ", dev->name); | ||
2119 | for (i = 0; i < 5; i++) | ||
2120 | printk("%2.2x:", dev->dev_addr[i]); | ||
2121 | printk("%2.2x\n", dev->dev_addr[5]); | ||
2122 | } | ||
2123 | |||
2124 | if (lp->phy_type == 0) { | ||
2125 | PRINTK("%s: No PHY found\n", dev->name); | ||
2126 | } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) { | ||
2127 | PRINTK("%s: LAN911x Internal PHY\n", dev->name); | ||
2128 | } else { | ||
2129 | PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type); | ||
2130 | } | ||
2131 | } | ||
2132 | |||
2133 | err_out: | ||
2134 | #ifdef SMC_USE_DMA | ||
2135 | if (retval) { | ||
2136 | if (lp->rxdma != -1) { | ||
2137 | SMC_DMA_FREE(dev, lp->rxdma); | ||
2138 | } | ||
2139 | if (lp->txdma != -1) { | ||
2140 | SMC_DMA_FREE(dev, lp->txdma); | ||
2141 | } | ||
2142 | } | ||
2143 | #endif | ||
2144 | return retval; | ||
2145 | } | ||
2146 | |||
2147 | /* | ||
2148 | * smc911x_init(void) | ||
2149 | * | ||
2150 | * Output: | ||
2151 | * 0 --> there is a device | ||
2152 | * anything else, error | ||
2153 | */ | ||
2154 | static int smc911x_drv_probe(struct platform_device *pdev) | ||
2155 | { | ||
2156 | struct net_device *ndev; | ||
2157 | struct resource *res; | ||
2158 | unsigned int *addr; | ||
2159 | int ret; | ||
2160 | |||
2161 | DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); | ||
2162 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2163 | if (!res) { | ||
2164 | ret = -ENODEV; | ||
2165 | goto out; | ||
2166 | } | ||
2167 | |||
2168 | /* | ||
2169 | * Request the regions. | ||
2170 | */ | ||
2171 | if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) { | ||
2172 | ret = -EBUSY; | ||
2173 | goto out; | ||
2174 | } | ||
2175 | |||
2176 | ndev = alloc_etherdev(sizeof(struct smc911x_local)); | ||
2177 | if (!ndev) { | ||
2178 | printk("%s: could not allocate device.\n", CARDNAME); | ||
2179 | ret = -ENOMEM; | ||
2180 | goto release_1; | ||
2181 | } | ||
2182 | SET_MODULE_OWNER(ndev); | ||
2183 | SET_NETDEV_DEV(ndev, &pdev->dev); | ||
2184 | |||
2185 | ndev->dma = (unsigned char)-1; | ||
2186 | ndev->irq = platform_get_irq(pdev, 0); | ||
2187 | |||
2188 | addr = ioremap(res->start, SMC911X_IO_EXTENT); | ||
2189 | if (!addr) { | ||
2190 | ret = -ENOMEM; | ||
2191 | goto release_both; | ||
2192 | } | ||
2193 | |||
2194 | platform_set_drvdata(pdev, ndev); | ||
2195 | ret = smc911x_probe(ndev, (unsigned long)addr); | ||
2196 | if (ret != 0) { | ||
2197 | platform_set_drvdata(pdev, NULL); | ||
2198 | iounmap(addr); | ||
2199 | release_both: | ||
2200 | free_netdev(ndev); | ||
2201 | release_1: | ||
2202 | release_mem_region(res->start, SMC911X_IO_EXTENT); | ||
2203 | out: | ||
2204 | printk("%s: not found (%d).\n", CARDNAME, ret); | ||
2205 | } | ||
2206 | #ifdef SMC_USE_DMA | ||
2207 | else { | ||
2208 | struct smc911x_local *lp = netdev_priv(ndev); | ||
2209 | lp->physaddr = res->start; | ||
2210 | lp->dev = &pdev->dev; | ||
2211 | } | ||
2212 | #endif | ||
2213 | |||
2214 | return ret; | ||
2215 | } | ||
2216 | |||
2217 | static int smc911x_drv_remove(struct platform_device *pdev) | ||
2218 | { | ||
2219 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
2220 | struct resource *res; | ||
2221 | |||
2222 | DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); | ||
2223 | platform_set_drvdata(pdev, NULL); | ||
2224 | |||
2225 | unregister_netdev(ndev); | ||
2226 | |||
2227 | free_irq(ndev->irq, ndev); | ||
2228 | |||
2229 | #ifdef SMC_USE_DMA | ||
2230 | { | ||
2231 | struct smc911x_local *lp = netdev_priv(ndev); | ||
2232 | if (lp->rxdma != -1) { | ||
2233 | SMC_DMA_FREE(dev, lp->rxdma); | ||
2234 | } | ||
2235 | if (lp->txdma != -1) { | ||
2236 | SMC_DMA_FREE(dev, lp->txdma); | ||
2237 | } | ||
2238 | } | ||
2239 | #endif | ||
2240 | iounmap((void *)ndev->base_addr); | ||
2241 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2242 | release_mem_region(res->start, SMC911X_IO_EXTENT); | ||
2243 | |||
2244 | free_netdev(ndev); | ||
2245 | return 0; | ||
2246 | } | ||
2247 | |||
2248 | static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state) | ||
2249 | { | ||
2250 | struct net_device *ndev = platform_get_drvdata(dev); | ||
2251 | unsigned long ioaddr = ndev->base_addr; | ||
2252 | |||
2253 | DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); | ||
2254 | if (ndev) { | ||
2255 | if (netif_running(ndev)) { | ||
2256 | netif_device_detach(ndev); | ||
2257 | smc911x_shutdown(ndev); | ||
2258 | #if POWER_DOWN | ||
2259 | /* Set D2 - Energy detect only setting */ | ||
2260 | SMC_SET_PMT_CTRL(2<<12); | ||
2261 | #endif | ||
2262 | } | ||
2263 | } | ||
2264 | return 0; | ||
2265 | } | ||
2266 | |||
2267 | static int smc911x_drv_resume(struct platform_device *dev) | ||
2268 | { | ||
2269 | struct net_device *ndev = platform_get_drvdata(dev); | ||
2270 | |||
2271 | DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); | ||
2272 | if (ndev) { | ||
2273 | struct smc911x_local *lp = netdev_priv(ndev); | ||
2274 | |||
2275 | if (netif_running(ndev)) { | ||
2276 | smc911x_reset(ndev); | ||
2277 | smc911x_enable(ndev); | ||
2278 | if (lp->phy_type != 0) | ||
2279 | smc911x_phy_configure(ndev); | ||
2280 | netif_device_attach(ndev); | ||
2281 | } | ||
2282 | } | ||
2283 | return 0; | ||
2284 | } | ||
2285 | |||
2286 | static struct platform_driver smc911x_driver = { | ||
2287 | .probe = smc911x_drv_probe, | ||
2288 | .remove = smc911x_drv_remove, | ||
2289 | .suspend = smc911x_drv_suspend, | ||
2290 | .resume = smc911x_drv_resume, | ||
2291 | .driver = { | ||
2292 | .name = CARDNAME, | ||
2293 | }, | ||
2294 | }; | ||
2295 | |||
2296 | static int __init smc911x_init(void) | ||
2297 | { | ||
2298 | return platform_driver_register(&smc911x_driver); | ||
2299 | } | ||
2300 | |||
2301 | static void __exit smc911x_cleanup(void) | ||
2302 | { | ||
2303 | platform_driver_unregister(&smc911x_driver); | ||
2304 | } | ||
2305 | |||
2306 | module_init(smc911x_init); | ||
2307 | module_exit(smc911x_cleanup); | ||
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h new file mode 100644 index 000000000000..962a710459fc --- /dev/null +++ b/drivers/net/smc911x.h | |||
@@ -0,0 +1,835 @@ | |||
1 | /*------------------------------------------------------------------------ | ||
2 | . smc911x.h - macros for SMSC's LAN911{5,6,7,8} single-chip Ethernet device. | ||
3 | . | ||
4 | . Copyright (C) 2005 Sensoria Corp. | ||
5 | . Derived from the unified SMC91x driver by Nicolas Pitre | ||
6 | . | ||
7 | . This program is free software; you can redistribute it and/or modify | ||
8 | . it under the terms of the GNU General Public License as published by | ||
9 | . the Free Software Foundation; either version 2 of the License, or | ||
10 | . (at your option) any later version. | ||
11 | . | ||
12 | . This program is distributed in the hope that it will be useful, | ||
13 | . but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | . GNU General Public License for more details. | ||
16 | . | ||
17 | . You should have received a copy of the GNU General Public License | ||
18 | . along with this program; if not, write to the Free Software | ||
19 | . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | . | ||
21 | . Information contained in this file was obtained from the LAN9118 | ||
22 | . manual from SMC. To get a copy, if you really want one, you can find | ||
23 | . information under www.smsc.com. | ||
24 | . | ||
25 | . Authors | ||
26 | . Dustin McIntire <dustin@sensoria.com> | ||
27 | . | ||
28 | ---------------------------------------------------------------------------*/ | ||
29 | #ifndef _SMC911X_H_ | ||
30 | #define _SMC911X_H_ | ||
31 | |||
32 | /* | ||
33 | * Use the DMA feature on PXA chips | ||
34 | */ | ||
35 | #ifdef CONFIG_ARCH_PXA | ||
36 | #define SMC_USE_PXA_DMA 1 | ||
37 | #define SMC_USE_16BIT 0 | ||
38 | #define SMC_USE_32BIT 1 | ||
39 | #endif | ||
40 | |||
41 | |||
42 | /* | ||
43 | * Define the bus width specific IO macros | ||
44 | */ | ||
45 | |||
46 | #if SMC_USE_16BIT | ||
47 | #define SMC_inb(a, r) readb((a) + (r)) | ||
48 | #define SMC_inw(a, r) readw((a) + (r)) | ||
49 | #define SMC_inl(a, r) ((SMC_inw(a, r) & 0xFFFF)+(SMC_inw(a+2, r)<<16)) | ||
50 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
51 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
52 | #define SMC_outl(v, a, r) \ | ||
53 | do{ \ | ||
54 | writel(v & 0xFFFF, (a) + (r)); \ | ||
55 | writel(v >> 16, (a) + (r) + 2); \ | ||
56 | } while (0) | ||
57 | #define SMC_insl(a, r, p, l) readsw((short*)((a) + (r)), p, l*2) | ||
58 | #define SMC_outsl(a, r, p, l) writesw((short*)((a) + (r)), p, l*2) | ||
59 | |||
60 | #elif SMC_USE_32BIT | ||
61 | #define SMC_inb(a, r) readb((a) + (r)) | ||
62 | #define SMC_inw(a, r) readw((a) + (r)) | ||
63 | #define SMC_inl(a, r) readl((a) + (r)) | ||
64 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
65 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
66 | #define SMC_insl(a, r, p, l) readsl((int*)((a) + (r)), p, l) | ||
67 | #define SMC_outsl(a, r, p, l) writesl((int*)((a) + (r)), p, l) | ||
68 | |||
69 | #endif /* SMC_USE_16BIT */ | ||
70 | |||
71 | |||
72 | |||
73 | #if SMC_USE_PXA_DMA | ||
74 | #define SMC_USE_DMA | ||
75 | |||
76 | /* | ||
77 | * Define the request and free functions | ||
78 | * These are unfortunately architecture specific as no generic allocation | ||
79 | * mechanism exits | ||
80 | */ | ||
81 | #define SMC_DMA_REQUEST(dev, handler) \ | ||
82 | pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev) | ||
83 | |||
84 | #define SMC_DMA_FREE(dev, dma) \ | ||
85 | pxa_free_dma(dma) | ||
86 | |||
87 | #define SMC_DMA_ACK_IRQ(dev, dma) \ | ||
88 | { \ | ||
89 | if (DCSR(dma) & DCSR_BUSERR) { \ | ||
90 | printk("%s: DMA %d bus error!\n", dev->name, dma); \ | ||
91 | } \ | ||
92 | DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \ | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * Use a DMA for RX and TX packets. | ||
97 | */ | ||
98 | #include <linux/dma-mapping.h> | ||
99 | #include <asm/dma.h> | ||
100 | #include <asm/arch/pxa-regs.h> | ||
101 | |||
102 | static dma_addr_t rx_dmabuf, tx_dmabuf; | ||
103 | static int rx_dmalen, tx_dmalen; | ||
104 | |||
105 | #ifdef SMC_insl | ||
106 | #undef SMC_insl | ||
107 | #define SMC_insl(a, r, p, l) \ | ||
108 | smc_pxa_dma_insl(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l) | ||
109 | |||
110 | static inline void | ||
111 | smc_pxa_dma_insl(struct device *dev, u_long ioaddr, u_long physaddr, | ||
112 | int reg, int dma, u_char *buf, int len) | ||
113 | { | ||
114 | /* 64 bit alignment is required for memory to memory DMA */ | ||
115 | if ((long)buf & 4) { | ||
116 | *((u32 *)buf) = SMC_inl(ioaddr, reg); | ||
117 | buf += 4; | ||
118 | len--; | ||
119 | } | ||
120 | |||
121 | len *= 4; | ||
122 | rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE); | ||
123 | rx_dmalen = len; | ||
124 | DCSR(dma) = DCSR_NODESC; | ||
125 | DTADR(dma) = rx_dmabuf; | ||
126 | DSADR(dma) = physaddr + reg; | ||
127 | DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | | ||
128 | DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen)); | ||
129 | DCSR(dma) = DCSR_NODESC | DCSR_RUN; | ||
130 | } | ||
131 | #endif | ||
132 | |||
133 | #ifdef SMC_insw | ||
134 | #undef SMC_insw | ||
135 | #define SMC_insw(a, r, p, l) \ | ||
136 | smc_pxa_dma_insw(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l) | ||
137 | |||
138 | static inline void | ||
139 | smc_pxa_dma_insw(struct device *dev, u_long ioaddr, u_long physaddr, | ||
140 | int reg, int dma, u_char *buf, int len) | ||
141 | { | ||
142 | /* 64 bit alignment is required for memory to memory DMA */ | ||
143 | while ((long)buf & 6) { | ||
144 | *((u16 *)buf) = SMC_inw(ioaddr, reg); | ||
145 | buf += 2; | ||
146 | len--; | ||
147 | } | ||
148 | |||
149 | len *= 2; | ||
150 | rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE); | ||
151 | rx_dmalen = len; | ||
152 | DCSR(dma) = DCSR_NODESC; | ||
153 | DTADR(dma) = rx_dmabuf; | ||
154 | DSADR(dma) = physaddr + reg; | ||
155 | DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | | ||
156 | DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen)); | ||
157 | DCSR(dma) = DCSR_NODESC | DCSR_RUN; | ||
158 | } | ||
159 | #endif | ||
160 | |||
161 | #ifdef SMC_outsl | ||
162 | #undef SMC_outsl | ||
163 | #define SMC_outsl(a, r, p, l) \ | ||
164 | smc_pxa_dma_outsl(lp->dev, a, lp->physaddr, r, lp->txdma, p, l) | ||
165 | |||
166 | static inline void | ||
167 | smc_pxa_dma_outsl(struct device *dev, u_long ioaddr, u_long physaddr, | ||
168 | int reg, int dma, u_char *buf, int len) | ||
169 | { | ||
170 | /* 64 bit alignment is required for memory to memory DMA */ | ||
171 | if ((long)buf & 4) { | ||
172 | SMC_outl(*((u32 *)buf), ioaddr, reg); | ||
173 | buf += 4; | ||
174 | len--; | ||
175 | } | ||
176 | |||
177 | len *= 4; | ||
178 | tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE); | ||
179 | tx_dmalen = len; | ||
180 | DCSR(dma) = DCSR_NODESC; | ||
181 | DSADR(dma) = tx_dmabuf; | ||
182 | DTADR(dma) = physaddr + reg; | ||
183 | DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 | | ||
184 | DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen)); | ||
185 | DCSR(dma) = DCSR_NODESC | DCSR_RUN; | ||
186 | } | ||
187 | #endif | ||
188 | |||
189 | #ifdef SMC_outsw | ||
190 | #undef SMC_outsw | ||
191 | #define SMC_outsw(a, r, p, l) \ | ||
192 | smc_pxa_dma_outsw(lp->dev, a, lp->physaddr, r, lp->txdma, p, l) | ||
193 | |||
194 | static inline void | ||
195 | smc_pxa_dma_outsw(struct device *dev, u_long ioaddr, u_long physaddr, | ||
196 | int reg, int dma, u_char *buf, int len) | ||
197 | { | ||
198 | /* 64 bit alignment is required for memory to memory DMA */ | ||
199 | while ((long)buf & 6) { | ||
200 | SMC_outw(*((u16 *)buf), ioaddr, reg); | ||
201 | buf += 2; | ||
202 | len--; | ||
203 | } | ||
204 | |||
205 | len *= 2; | ||
206 | tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE); | ||
207 | tx_dmalen = len; | ||
208 | DCSR(dma) = DCSR_NODESC; | ||
209 | DSADR(dma) = tx_dmabuf; | ||
210 | DTADR(dma) = physaddr + reg; | ||
211 | DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 | | ||
212 | DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen)); | ||
213 | DCSR(dma) = DCSR_NODESC | DCSR_RUN; | ||
214 | } | ||
215 | #endif | ||
216 | |||
217 | #endif /* SMC_USE_PXA_DMA */ | ||
218 | |||
219 | |||
220 | /* Chip Parameters and Register Definitions */ | ||
221 | |||
222 | #define SMC911X_TX_FIFO_LOW_THRESHOLD (1536*2) | ||
223 | |||
224 | #define SMC911X_IO_EXTENT 0x100 | ||
225 | |||
226 | #define SMC911X_EEPROM_LEN 7 | ||
227 | |||
228 | /* Below are the register offsets and bit definitions | ||
229 | * of the Lan911x memory space | ||
230 | */ | ||
231 | #define RX_DATA_FIFO (0x00) | ||
232 | |||
233 | #define TX_DATA_FIFO (0x20) | ||
234 | #define TX_CMD_A_INT_ON_COMP_ (0x80000000) | ||
235 | #define TX_CMD_A_INT_BUF_END_ALGN_ (0x03000000) | ||
236 | #define TX_CMD_A_INT_4_BYTE_ALGN_ (0x00000000) | ||
237 | #define TX_CMD_A_INT_16_BYTE_ALGN_ (0x01000000) | ||
238 | #define TX_CMD_A_INT_32_BYTE_ALGN_ (0x02000000) | ||
239 | #define TX_CMD_A_INT_DATA_OFFSET_ (0x001F0000) | ||
240 | #define TX_CMD_A_INT_FIRST_SEG_ (0x00002000) | ||
241 | #define TX_CMD_A_INT_LAST_SEG_ (0x00001000) | ||
242 | #define TX_CMD_A_BUF_SIZE_ (0x000007FF) | ||
243 | #define TX_CMD_B_PKT_TAG_ (0xFFFF0000) | ||
244 | #define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000) | ||
245 | #define TX_CMD_B_DISABLE_PADDING_ (0x00001000) | ||
246 | #define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF) | ||
247 | |||
248 | #define RX_STATUS_FIFO (0x40) | ||
249 | #define RX_STS_PKT_LEN_ (0x3FFF0000) | ||
250 | #define RX_STS_ES_ (0x00008000) | ||
251 | #define RX_STS_BCST_ (0x00002000) | ||
252 | #define RX_STS_LEN_ERR_ (0x00001000) | ||
253 | #define RX_STS_RUNT_ERR_ (0x00000800) | ||
254 | #define RX_STS_MCAST_ (0x00000400) | ||
255 | #define RX_STS_TOO_LONG_ (0x00000080) | ||
256 | #define RX_STS_COLL_ (0x00000040) | ||
257 | #define RX_STS_ETH_TYPE_ (0x00000020) | ||
258 | #define RX_STS_WDOG_TMT_ (0x00000010) | ||
259 | #define RX_STS_MII_ERR_ (0x00000008) | ||
260 | #define RX_STS_DRIBBLING_ (0x00000004) | ||
261 | #define RX_STS_CRC_ERR_ (0x00000002) | ||
262 | #define RX_STATUS_FIFO_PEEK (0x44) | ||
263 | #define TX_STATUS_FIFO (0x48) | ||
264 | #define TX_STS_TAG_ (0xFFFF0000) | ||
265 | #define TX_STS_ES_ (0x00008000) | ||
266 | #define TX_STS_LOC_ (0x00000800) | ||
267 | #define TX_STS_NO_CARR_ (0x00000400) | ||
268 | #define TX_STS_LATE_COLL_ (0x00000200) | ||
269 | #define TX_STS_MANY_COLL_ (0x00000100) | ||
270 | #define TX_STS_COLL_CNT_ (0x00000078) | ||
271 | #define TX_STS_MANY_DEFER_ (0x00000004) | ||
272 | #define TX_STS_UNDERRUN_ (0x00000002) | ||
273 | #define TX_STS_DEFERRED_ (0x00000001) | ||
274 | #define TX_STATUS_FIFO_PEEK (0x4C) | ||
275 | #define ID_REV (0x50) | ||
276 | #define ID_REV_CHIP_ID_ (0xFFFF0000) /* RO */ | ||
277 | #define ID_REV_REV_ID_ (0x0000FFFF) /* RO */ | ||
278 | |||
279 | #define INT_CFG (0x54) | ||
280 | #define INT_CFG_INT_DEAS_ (0xFF000000) /* R/W */ | ||
281 | #define INT_CFG_INT_DEAS_CLR_ (0x00004000) | ||
282 | #define INT_CFG_INT_DEAS_STS_ (0x00002000) | ||
283 | #define INT_CFG_IRQ_INT_ (0x00001000) /* RO */ | ||
284 | #define INT_CFG_IRQ_EN_ (0x00000100) /* R/W */ | ||
285 | #define INT_CFG_IRQ_POL_ (0x00000010) /* R/W Not Affected by SW Reset */ | ||
286 | #define INT_CFG_IRQ_TYPE_ (0x00000001) /* R/W Not Affected by SW Reset */ | ||
287 | |||
288 | #define INT_STS (0x58) | ||
289 | #define INT_STS_SW_INT_ (0x80000000) /* R/WC */ | ||
290 | #define INT_STS_TXSTOP_INT_ (0x02000000) /* R/WC */ | ||
291 | #define INT_STS_RXSTOP_INT_ (0x01000000) /* R/WC */ | ||
292 | #define INT_STS_RXDFH_INT_ (0x00800000) /* R/WC */ | ||
293 | #define INT_STS_RXDF_INT_ (0x00400000) /* R/WC */ | ||
294 | #define INT_STS_TX_IOC_ (0x00200000) /* R/WC */ | ||
295 | #define INT_STS_RXD_INT_ (0x00100000) /* R/WC */ | ||
296 | #define INT_STS_GPT_INT_ (0x00080000) /* R/WC */ | ||
297 | #define INT_STS_PHY_INT_ (0x00040000) /* RO */ | ||
298 | #define INT_STS_PME_INT_ (0x00020000) /* R/WC */ | ||
299 | #define INT_STS_TXSO_ (0x00010000) /* R/WC */ | ||
300 | #define INT_STS_RWT_ (0x00008000) /* R/WC */ | ||
301 | #define INT_STS_RXE_ (0x00004000) /* R/WC */ | ||
302 | #define INT_STS_TXE_ (0x00002000) /* R/WC */ | ||
303 | //#define INT_STS_ERX_ (0x00001000) /* R/WC */ | ||
304 | #define INT_STS_TDFU_ (0x00000800) /* R/WC */ | ||
305 | #define INT_STS_TDFO_ (0x00000400) /* R/WC */ | ||
306 | #define INT_STS_TDFA_ (0x00000200) /* R/WC */ | ||
307 | #define INT_STS_TSFF_ (0x00000100) /* R/WC */ | ||
308 | #define INT_STS_TSFL_ (0x00000080) /* R/WC */ | ||
309 | //#define INT_STS_RXDF_ (0x00000040) /* R/WC */ | ||
310 | #define INT_STS_RDFO_ (0x00000040) /* R/WC */ | ||
311 | #define INT_STS_RDFL_ (0x00000020) /* R/WC */ | ||
312 | #define INT_STS_RSFF_ (0x00000010) /* R/WC */ | ||
313 | #define INT_STS_RSFL_ (0x00000008) /* R/WC */ | ||
314 | #define INT_STS_GPIO2_INT_ (0x00000004) /* R/WC */ | ||
315 | #define INT_STS_GPIO1_INT_ (0x00000002) /* R/WC */ | ||
316 | #define INT_STS_GPIO0_INT_ (0x00000001) /* R/WC */ | ||
317 | |||
318 | #define INT_EN (0x5C) | ||
319 | #define INT_EN_SW_INT_EN_ (0x80000000) /* R/W */ | ||
320 | #define INT_EN_TXSTOP_INT_EN_ (0x02000000) /* R/W */ | ||
321 | #define INT_EN_RXSTOP_INT_EN_ (0x01000000) /* R/W */ | ||
322 | #define INT_EN_RXDFH_INT_EN_ (0x00800000) /* R/W */ | ||
323 | //#define INT_EN_RXDF_INT_EN_ (0x00400000) /* R/W */ | ||
324 | #define INT_EN_TIOC_INT_EN_ (0x00200000) /* R/W */ | ||
325 | #define INT_EN_RXD_INT_EN_ (0x00100000) /* R/W */ | ||
326 | #define INT_EN_GPT_INT_EN_ (0x00080000) /* R/W */ | ||
327 | #define INT_EN_PHY_INT_EN_ (0x00040000) /* R/W */ | ||
328 | #define INT_EN_PME_INT_EN_ (0x00020000) /* R/W */ | ||
329 | #define INT_EN_TXSO_EN_ (0x00010000) /* R/W */ | ||
330 | #define INT_EN_RWT_EN_ (0x00008000) /* R/W */ | ||
331 | #define INT_EN_RXE_EN_ (0x00004000) /* R/W */ | ||
332 | #define INT_EN_TXE_EN_ (0x00002000) /* R/W */ | ||
333 | //#define INT_EN_ERX_EN_ (0x00001000) /* R/W */ | ||
334 | #define INT_EN_TDFU_EN_ (0x00000800) /* R/W */ | ||
335 | #define INT_EN_TDFO_EN_ (0x00000400) /* R/W */ | ||
336 | #define INT_EN_TDFA_EN_ (0x00000200) /* R/W */ | ||
337 | #define INT_EN_TSFF_EN_ (0x00000100) /* R/W */ | ||
338 | #define INT_EN_TSFL_EN_ (0x00000080) /* R/W */ | ||
339 | //#define INT_EN_RXDF_EN_ (0x00000040) /* R/W */ | ||
340 | #define INT_EN_RDFO_EN_ (0x00000040) /* R/W */ | ||
341 | #define INT_EN_RDFL_EN_ (0x00000020) /* R/W */ | ||
342 | #define INT_EN_RSFF_EN_ (0x00000010) /* R/W */ | ||
343 | #define INT_EN_RSFL_EN_ (0x00000008) /* R/W */ | ||
344 | #define INT_EN_GPIO2_INT_ (0x00000004) /* R/W */ | ||
345 | #define INT_EN_GPIO1_INT_ (0x00000002) /* R/W */ | ||
346 | #define INT_EN_GPIO0_INT_ (0x00000001) /* R/W */ | ||
347 | |||
348 | #define BYTE_TEST (0x64) | ||
349 | #define FIFO_INT (0x68) | ||
350 | #define FIFO_INT_TX_AVAIL_LEVEL_ (0xFF000000) /* R/W */ | ||
351 | #define FIFO_INT_TX_STS_LEVEL_ (0x00FF0000) /* R/W */ | ||
352 | #define FIFO_INT_RX_AVAIL_LEVEL_ (0x0000FF00) /* R/W */ | ||
353 | #define FIFO_INT_RX_STS_LEVEL_ (0x000000FF) /* R/W */ | ||
354 | |||
355 | #define RX_CFG (0x6C) | ||
356 | #define RX_CFG_RX_END_ALGN_ (0xC0000000) /* R/W */ | ||
357 | #define RX_CFG_RX_END_ALGN4_ (0x00000000) /* R/W */ | ||
358 | #define RX_CFG_RX_END_ALGN16_ (0x40000000) /* R/W */ | ||
359 | #define RX_CFG_RX_END_ALGN32_ (0x80000000) /* R/W */ | ||
360 | #define RX_CFG_RX_DMA_CNT_ (0x0FFF0000) /* R/W */ | ||
361 | #define RX_CFG_RX_DUMP_ (0x00008000) /* R/W */ | ||
362 | #define RX_CFG_RXDOFF_ (0x00001F00) /* R/W */ | ||
363 | //#define RX_CFG_RXBAD_ (0x00000001) /* R/W */ | ||
364 | |||
365 | #define TX_CFG (0x70) | ||
366 | //#define TX_CFG_TX_DMA_LVL_ (0xE0000000) /* R/W */ | ||
367 | //#define TX_CFG_TX_DMA_CNT_ (0x0FFF0000) /* R/W Self Clearing */ | ||
368 | #define TX_CFG_TXS_DUMP_ (0x00008000) /* Self Clearing */ | ||
369 | #define TX_CFG_TXD_DUMP_ (0x00004000) /* Self Clearing */ | ||
370 | #define TX_CFG_TXSAO_ (0x00000004) /* R/W */ | ||
371 | #define TX_CFG_TX_ON_ (0x00000002) /* R/W */ | ||
372 | #define TX_CFG_STOP_TX_ (0x00000001) /* Self Clearing */ | ||
373 | |||
374 | #define HW_CFG (0x74) | ||
375 | #define HW_CFG_TTM_ (0x00200000) /* R/W */ | ||
376 | #define HW_CFG_SF_ (0x00100000) /* R/W */ | ||
377 | #define HW_CFG_TX_FIF_SZ_ (0x000F0000) /* R/W */ | ||
378 | #define HW_CFG_TR_ (0x00003000) /* R/W */ | ||
379 | #define HW_CFG_PHY_CLK_SEL_ (0x00000060) /* R/W */ | ||
380 | #define HW_CFG_PHY_CLK_SEL_INT_PHY_ (0x00000000) /* R/W */ | ||
381 | #define HW_CFG_PHY_CLK_SEL_EXT_PHY_ (0x00000020) /* R/W */ | ||
382 | #define HW_CFG_PHY_CLK_SEL_CLK_DIS_ (0x00000040) /* R/W */ | ||
383 | #define HW_CFG_SMI_SEL_ (0x00000010) /* R/W */ | ||
384 | #define HW_CFG_EXT_PHY_DET_ (0x00000008) /* RO */ | ||
385 | #define HW_CFG_EXT_PHY_EN_ (0x00000004) /* R/W */ | ||
386 | #define HW_CFG_32_16_BIT_MODE_ (0x00000004) /* RO */ | ||
387 | #define HW_CFG_SRST_TO_ (0x00000002) /* RO */ | ||
388 | #define HW_CFG_SRST_ (0x00000001) /* Self Clearing */ | ||
389 | |||
390 | #define RX_DP_CTRL (0x78) | ||
391 | #define RX_DP_CTRL_RX_FFWD_ (0x80000000) /* R/W */ | ||
392 | #define RX_DP_CTRL_FFWD_BUSY_ (0x80000000) /* RO */ | ||
393 | |||
394 | #define RX_FIFO_INF (0x7C) | ||
395 | #define RX_FIFO_INF_RXSUSED_ (0x00FF0000) /* RO */ | ||
396 | #define RX_FIFO_INF_RXDUSED_ (0x0000FFFF) /* RO */ | ||
397 | |||
398 | #define TX_FIFO_INF (0x80) | ||
399 | #define TX_FIFO_INF_TSUSED_ (0x00FF0000) /* RO */ | ||
400 | #define TX_FIFO_INF_TDFREE_ (0x0000FFFF) /* RO */ | ||
401 | |||
402 | #define PMT_CTRL (0x84) | ||
403 | #define PMT_CTRL_PM_MODE_ (0x00003000) /* Self Clearing */ | ||
404 | #define PMT_CTRL_PHY_RST_ (0x00000400) /* Self Clearing */ | ||
405 | #define PMT_CTRL_WOL_EN_ (0x00000200) /* R/W */ | ||
406 | #define PMT_CTRL_ED_EN_ (0x00000100) /* R/W */ | ||
407 | #define PMT_CTRL_PME_TYPE_ (0x00000040) /* R/W Not Affected by SW Reset */ | ||
408 | #define PMT_CTRL_WUPS_ (0x00000030) /* R/WC */ | ||
409 | #define PMT_CTRL_WUPS_NOWAKE_ (0x00000000) /* R/WC */ | ||
410 | #define PMT_CTRL_WUPS_ED_ (0x00000010) /* R/WC */ | ||
411 | #define PMT_CTRL_WUPS_WOL_ (0x00000020) /* R/WC */ | ||
412 | #define PMT_CTRL_WUPS_MULTI_ (0x00000030) /* R/WC */ | ||
413 | #define PMT_CTRL_PME_IND_ (0x00000008) /* R/W */ | ||
414 | #define PMT_CTRL_PME_POL_ (0x00000004) /* R/W */ | ||
415 | #define PMT_CTRL_PME_EN_ (0x00000002) /* R/W Not Affected by SW Reset */ | ||
416 | #define PMT_CTRL_READY_ (0x00000001) /* RO */ | ||
417 | |||
418 | #define GPIO_CFG (0x88) | ||
419 | #define GPIO_CFG_LED3_EN_ (0x40000000) /* R/W */ | ||
420 | #define GPIO_CFG_LED2_EN_ (0x20000000) /* R/W */ | ||
421 | #define GPIO_CFG_LED1_EN_ (0x10000000) /* R/W */ | ||
422 | #define GPIO_CFG_GPIO2_INT_POL_ (0x04000000) /* R/W */ | ||
423 | #define GPIO_CFG_GPIO1_INT_POL_ (0x02000000) /* R/W */ | ||
424 | #define GPIO_CFG_GPIO0_INT_POL_ (0x01000000) /* R/W */ | ||
425 | #define GPIO_CFG_EEPR_EN_ (0x00700000) /* R/W */ | ||
426 | #define GPIO_CFG_GPIOBUF2_ (0x00040000) /* R/W */ | ||
427 | #define GPIO_CFG_GPIOBUF1_ (0x00020000) /* R/W */ | ||
428 | #define GPIO_CFG_GPIOBUF0_ (0x00010000) /* R/W */ | ||
429 | #define GPIO_CFG_GPIODIR2_ (0x00000400) /* R/W */ | ||
430 | #define GPIO_CFG_GPIODIR1_ (0x00000200) /* R/W */ | ||
431 | #define GPIO_CFG_GPIODIR0_ (0x00000100) /* R/W */ | ||
432 | #define GPIO_CFG_GPIOD4_ (0x00000010) /* R/W */ | ||
433 | #define GPIO_CFG_GPIOD3_ (0x00000008) /* R/W */ | ||
434 | #define GPIO_CFG_GPIOD2_ (0x00000004) /* R/W */ | ||
435 | #define GPIO_CFG_GPIOD1_ (0x00000002) /* R/W */ | ||
436 | #define GPIO_CFG_GPIOD0_ (0x00000001) /* R/W */ | ||
437 | |||
438 | #define GPT_CFG (0x8C) | ||
439 | #define GPT_CFG_TIMER_EN_ (0x20000000) /* R/W */ | ||
440 | #define GPT_CFG_GPT_LOAD_ (0x0000FFFF) /* R/W */ | ||
441 | |||
442 | #define GPT_CNT (0x90) | ||
443 | #define GPT_CNT_GPT_CNT_ (0x0000FFFF) /* RO */ | ||
444 | |||
445 | #define ENDIAN (0x98) | ||
446 | #define FREE_RUN (0x9C) | ||
447 | #define RX_DROP (0xA0) | ||
448 | #define MAC_CSR_CMD (0xA4) | ||
449 | #define MAC_CSR_CMD_CSR_BUSY_ (0x80000000) /* Self Clearing */ | ||
450 | #define MAC_CSR_CMD_R_NOT_W_ (0x40000000) /* R/W */ | ||
451 | #define MAC_CSR_CMD_CSR_ADDR_ (0x000000FF) /* R/W */ | ||
452 | |||
453 | #define MAC_CSR_DATA (0xA8) | ||
454 | #define AFC_CFG (0xAC) | ||
455 | #define AFC_CFG_AFC_HI_ (0x00FF0000) /* R/W */ | ||
456 | #define AFC_CFG_AFC_LO_ (0x0000FF00) /* R/W */ | ||
457 | #define AFC_CFG_BACK_DUR_ (0x000000F0) /* R/W */ | ||
458 | #define AFC_CFG_FCMULT_ (0x00000008) /* R/W */ | ||
459 | #define AFC_CFG_FCBRD_ (0x00000004) /* R/W */ | ||
460 | #define AFC_CFG_FCADD_ (0x00000002) /* R/W */ | ||
461 | #define AFC_CFG_FCANY_ (0x00000001) /* R/W */ | ||
462 | |||
463 | #define E2P_CMD (0xB0) | ||
464 | #define E2P_CMD_EPC_BUSY_ (0x80000000) /* Self Clearing */ | ||
465 | #define E2P_CMD_EPC_CMD_ (0x70000000) /* R/W */ | ||
466 | #define E2P_CMD_EPC_CMD_READ_ (0x00000000) /* R/W */ | ||
467 | #define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) /* R/W */ | ||
468 | #define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) /* R/W */ | ||
469 | #define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) /* R/W */ | ||
470 | #define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) /* R/W */ | ||
471 | #define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) /* R/W */ | ||
472 | #define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) /* R/W */ | ||
473 | #define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) /* R/W */ | ||
474 | #define E2P_CMD_EPC_TIMEOUT_ (0x00000200) /* RO */ | ||
475 | #define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) /* RO */ | ||
476 | #define E2P_CMD_EPC_ADDR_ (0x000000FF) /* R/W */ | ||
477 | |||
478 | #define E2P_DATA (0xB4) | ||
479 | #define E2P_DATA_EEPROM_DATA_ (0x000000FF) /* R/W */ | ||
480 | /* end of LAN register offsets and bit definitions */ | ||
481 | |||
482 | /* | ||
483 | **************************************************************************** | ||
484 | **************************************************************************** | ||
485 | * MAC Control and Status Register (Indirect Address) | ||
486 | * Offset (through the MAC_CSR CMD and DATA port) | ||
487 | **************************************************************************** | ||
488 | **************************************************************************** | ||
489 | * | ||
490 | */ | ||
491 | #define MAC_CR (0x01) /* R/W */ | ||
492 | |||
493 | /* MAC_CR - MAC Control Register */ | ||
494 | #define MAC_CR_RXALL_ (0x80000000) | ||
495 | // TODO: delete this bit? It is not described in the data sheet. | ||
496 | #define MAC_CR_HBDIS_ (0x10000000) | ||
497 | #define MAC_CR_RCVOWN_ (0x00800000) | ||
498 | #define MAC_CR_LOOPBK_ (0x00200000) | ||
499 | #define MAC_CR_FDPX_ (0x00100000) | ||
500 | #define MAC_CR_MCPAS_ (0x00080000) | ||
501 | #define MAC_CR_PRMS_ (0x00040000) | ||
502 | #define MAC_CR_INVFILT_ (0x00020000) | ||
503 | #define MAC_CR_PASSBAD_ (0x00010000) | ||
504 | #define MAC_CR_HFILT_ (0x00008000) | ||
505 | #define MAC_CR_HPFILT_ (0x00002000) | ||
506 | #define MAC_CR_LCOLL_ (0x00001000) | ||
507 | #define MAC_CR_BCAST_ (0x00000800) | ||
508 | #define MAC_CR_DISRTY_ (0x00000400) | ||
509 | #define MAC_CR_PADSTR_ (0x00000100) | ||
510 | #define MAC_CR_BOLMT_MASK_ (0x000000C0) | ||
511 | #define MAC_CR_DFCHK_ (0x00000020) | ||
512 | #define MAC_CR_TXEN_ (0x00000008) | ||
513 | #define MAC_CR_RXEN_ (0x00000004) | ||
514 | |||
515 | #define ADDRH (0x02) /* R/W mask 0x0000FFFFUL */ | ||
516 | #define ADDRL (0x03) /* R/W mask 0xFFFFFFFFUL */ | ||
517 | #define HASHH (0x04) /* R/W */ | ||
518 | #define HASHL (0x05) /* R/W */ | ||
519 | |||
520 | #define MII_ACC (0x06) /* R/W */ | ||
521 | #define MII_ACC_PHY_ADDR_ (0x0000F800) | ||
522 | #define MII_ACC_MIIRINDA_ (0x000007C0) | ||
523 | #define MII_ACC_MII_WRITE_ (0x00000002) | ||
524 | #define MII_ACC_MII_BUSY_ (0x00000001) | ||
525 | |||
526 | #define MII_DATA (0x07) /* R/W mask 0x0000FFFFUL */ | ||
527 | |||
528 | #define FLOW (0x08) /* R/W */ | ||
529 | #define FLOW_FCPT_ (0xFFFF0000) | ||
530 | #define FLOW_FCPASS_ (0x00000004) | ||
531 | #define FLOW_FCEN_ (0x00000002) | ||
532 | #define FLOW_FCBSY_ (0x00000001) | ||
533 | |||
534 | #define VLAN1 (0x09) /* R/W mask 0x0000FFFFUL */ | ||
535 | #define VLAN1_VTI1_ (0x0000ffff) | ||
536 | |||
537 | #define VLAN2 (0x0A) /* R/W mask 0x0000FFFFUL */ | ||
538 | #define VLAN2_VTI2_ (0x0000ffff) | ||
539 | |||
540 | #define WUFF (0x0B) /* WO */ | ||
541 | |||
542 | #define WUCSR (0x0C) /* R/W */ | ||
543 | #define WUCSR_GUE_ (0x00000200) | ||
544 | #define WUCSR_WUFR_ (0x00000040) | ||
545 | #define WUCSR_MPR_ (0x00000020) | ||
546 | #define WUCSR_WAKE_EN_ (0x00000004) | ||
547 | #define WUCSR_MPEN_ (0x00000002) | ||
548 | |||
549 | /* | ||
550 | **************************************************************************** | ||
551 | * Chip Specific MII Defines | ||
552 | **************************************************************************** | ||
553 | * | ||
554 | * Phy register offsets and bit definitions | ||
555 | * | ||
556 | */ | ||
557 | |||
558 | #define PHY_MODE_CTRL_STS ((u32)17) /* Mode Control/Status Register */ | ||
559 | //#define MODE_CTRL_STS_FASTRIP_ ((u16)0x4000) | ||
560 | #define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000) | ||
561 | //#define MODE_CTRL_STS_LOWSQEN_ ((u16)0x0800) | ||
562 | //#define MODE_CTRL_STS_MDPREBP_ ((u16)0x0400) | ||
563 | //#define MODE_CTRL_STS_FARLOOPBACK_ ((u16)0x0200) | ||
564 | //#define MODE_CTRL_STS_FASTEST_ ((u16)0x0100) | ||
565 | //#define MODE_CTRL_STS_REFCLKEN_ ((u16)0x0010) | ||
566 | //#define MODE_CTRL_STS_PHYADBP_ ((u16)0x0008) | ||
567 | //#define MODE_CTRL_STS_FORCE_G_LINK_ ((u16)0x0004) | ||
568 | #define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002) | ||
569 | |||
570 | #define PHY_INT_SRC ((u32)29) | ||
571 | #define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080) | ||
572 | #define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040) | ||
573 | #define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020) | ||
574 | #define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010) | ||
575 | #define PHY_INT_SRC_ANEG_LP_ACK_ ((u16)0x0008) | ||
576 | #define PHY_INT_SRC_PAR_DET_FAULT_ ((u16)0x0004) | ||
577 | #define PHY_INT_SRC_ANEG_PGRX_ ((u16)0x0002) | ||
578 | |||
579 | #define PHY_INT_MASK ((u32)30) | ||
580 | #define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080) | ||
581 | #define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040) | ||
582 | #define PHY_INT_MASK_REMOTE_FAULT_ ((u16)0x0020) | ||
583 | #define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010) | ||
584 | #define PHY_INT_MASK_ANEG_LP_ACK_ ((u16)0x0008) | ||
585 | #define PHY_INT_MASK_PAR_DET_FAULT_ ((u16)0x0004) | ||
586 | #define PHY_INT_MASK_ANEG_PGRX_ ((u16)0x0002) | ||
587 | |||
588 | #define PHY_SPECIAL ((u32)31) | ||
589 | #define PHY_SPECIAL_ANEG_DONE_ ((u16)0x1000) | ||
590 | #define PHY_SPECIAL_RES_ ((u16)0x0040) | ||
591 | #define PHY_SPECIAL_RES_MASK_ ((u16)0x0FE1) | ||
592 | #define PHY_SPECIAL_SPD_ ((u16)0x001C) | ||
593 | #define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004) | ||
594 | #define PHY_SPECIAL_SPD_10FULL_ ((u16)0x0014) | ||
595 | #define PHY_SPECIAL_SPD_100HALF_ ((u16)0x0008) | ||
596 | #define PHY_SPECIAL_SPD_100FULL_ ((u16)0x0018) | ||
597 | |||
598 | #define LAN911X_INTERNAL_PHY_ID (0x0007C000) | ||
599 | |||
600 | /* Chip ID values */ | ||
601 | #define CHIP_9115 0x115 | ||
602 | #define CHIP_9116 0x116 | ||
603 | #define CHIP_9117 0x117 | ||
604 | #define CHIP_9118 0x118 | ||
605 | |||
606 | struct chip_id { | ||
607 | u16 id; | ||
608 | char *name; | ||
609 | }; | ||
610 | |||
611 | static const struct chip_id chip_ids[] = { | ||
612 | { CHIP_9115, "LAN9115" }, | ||
613 | { CHIP_9116, "LAN9116" }, | ||
614 | { CHIP_9117, "LAN9117" }, | ||
615 | { CHIP_9118, "LAN9118" }, | ||
616 | { 0, NULL }, | ||
617 | }; | ||
618 | |||
619 | #define IS_REV_A(x) ((x & 0xFFFF)==0) | ||
620 | |||
621 | /* | ||
622 | * Macros to abstract register access according to the data bus | ||
623 | * capabilities. Please use those and not the in/out primitives. | ||
624 | */ | ||
625 | /* FIFO read/write macros */ | ||
626 | #define SMC_PUSH_DATA(p, l) SMC_outsl( ioaddr, TX_DATA_FIFO, p, (l) >> 2 ) | ||
627 | #define SMC_PULL_DATA(p, l) SMC_insl ( ioaddr, RX_DATA_FIFO, p, (l) >> 2 ) | ||
628 | #define SMC_SET_TX_FIFO(x) SMC_outl( x, ioaddr, TX_DATA_FIFO ) | ||
629 | #define SMC_GET_RX_FIFO() SMC_inl( ioaddr, RX_DATA_FIFO ) | ||
630 | |||
631 | |||
632 | /* I/O mapped register read/write macros */ | ||
633 | #define SMC_GET_TX_STS_FIFO() SMC_inl( ioaddr, TX_STATUS_FIFO ) | ||
634 | #define SMC_GET_RX_STS_FIFO() SMC_inl( ioaddr, RX_STATUS_FIFO ) | ||
635 | #define SMC_GET_RX_STS_FIFO_PEEK() SMC_inl( ioaddr, RX_STATUS_FIFO_PEEK ) | ||
636 | #define SMC_GET_PN() (SMC_inl( ioaddr, ID_REV ) >> 16) | ||
637 | #define SMC_GET_REV() (SMC_inl( ioaddr, ID_REV ) & 0xFFFF) | ||
638 | #define SMC_GET_IRQ_CFG() SMC_inl( ioaddr, INT_CFG ) | ||
639 | #define SMC_SET_IRQ_CFG(x) SMC_outl( x, ioaddr, INT_CFG ) | ||
640 | #define SMC_GET_INT() SMC_inl( ioaddr, INT_STS ) | ||
641 | #define SMC_ACK_INT(x) SMC_outl( x, ioaddr, INT_STS ) | ||
642 | #define SMC_GET_INT_EN() SMC_inl( ioaddr, INT_EN ) | ||
643 | #define SMC_SET_INT_EN(x) SMC_outl( x, ioaddr, INT_EN ) | ||
644 | #define SMC_GET_BYTE_TEST() SMC_inl( ioaddr, BYTE_TEST ) | ||
645 | #define SMC_SET_BYTE_TEST(x) SMC_outl( x, ioaddr, BYTE_TEST ) | ||
646 | #define SMC_GET_FIFO_INT() SMC_inl( ioaddr, FIFO_INT ) | ||
647 | #define SMC_SET_FIFO_INT(x) SMC_outl( x, ioaddr, FIFO_INT ) | ||
648 | #define SMC_SET_FIFO_TDA(x) \ | ||
649 | do { \ | ||
650 | unsigned long __flags; \ | ||
651 | int __mask; \ | ||
652 | local_irq_save(__flags); \ | ||
653 | __mask = SMC_GET_FIFO_INT() & ~(0xFF<<24); \ | ||
654 | SMC_SET_FIFO_INT( __mask | (x)<<24 ); \ | ||
655 | local_irq_restore(__flags); \ | ||
656 | } while (0) | ||
657 | #define SMC_SET_FIFO_TSL(x) \ | ||
658 | do { \ | ||
659 | unsigned long __flags; \ | ||
660 | int __mask; \ | ||
661 | local_irq_save(__flags); \ | ||
662 | __mask = SMC_GET_FIFO_INT() & ~(0xFF<<16); \ | ||
663 | SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<16)); \ | ||
664 | local_irq_restore(__flags); \ | ||
665 | } while (0) | ||
666 | #define SMC_SET_FIFO_RSA(x) \ | ||
667 | do { \ | ||
668 | unsigned long __flags; \ | ||
669 | int __mask; \ | ||
670 | local_irq_save(__flags); \ | ||
671 | __mask = SMC_GET_FIFO_INT() & ~(0xFF<<8); \ | ||
672 | SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<8)); \ | ||
673 | local_irq_restore(__flags); \ | ||
674 | } while (0) | ||
675 | #define SMC_SET_FIFO_RSL(x) \ | ||
676 | do { \ | ||
677 | unsigned long __flags; \ | ||
678 | int __mask; \ | ||
679 | local_irq_save(__flags); \ | ||
680 | __mask = SMC_GET_FIFO_INT() & ~0xFF; \ | ||
681 | SMC_SET_FIFO_INT( __mask | ((x) & 0xFF)); \ | ||
682 | local_irq_restore(__flags); \ | ||
683 | } while (0) | ||
684 | #define SMC_GET_RX_CFG() SMC_inl( ioaddr, RX_CFG ) | ||
685 | #define SMC_SET_RX_CFG(x) SMC_outl( x, ioaddr, RX_CFG ) | ||
686 | #define SMC_GET_TX_CFG() SMC_inl( ioaddr, TX_CFG ) | ||
687 | #define SMC_SET_TX_CFG(x) SMC_outl( x, ioaddr, TX_CFG ) | ||
688 | #define SMC_GET_HW_CFG() SMC_inl( ioaddr, HW_CFG ) | ||
689 | #define SMC_SET_HW_CFG(x) SMC_outl( x, ioaddr, HW_CFG ) | ||
690 | #define SMC_GET_RX_DP_CTRL() SMC_inl( ioaddr, RX_DP_CTRL ) | ||
691 | #define SMC_SET_RX_DP_CTRL(x) SMC_outl( x, ioaddr, RX_DP_CTRL ) | ||
692 | #define SMC_GET_PMT_CTRL() SMC_inl( ioaddr, PMT_CTRL ) | ||
693 | #define SMC_SET_PMT_CTRL(x) SMC_outl( x, ioaddr, PMT_CTRL ) | ||
694 | #define SMC_GET_GPIO_CFG() SMC_inl( ioaddr, GPIO_CFG ) | ||
695 | #define SMC_SET_GPIO_CFG(x) SMC_outl( x, ioaddr, GPIO_CFG ) | ||
696 | #define SMC_GET_RX_FIFO_INF() SMC_inl( ioaddr, RX_FIFO_INF ) | ||
697 | #define SMC_SET_RX_FIFO_INF(x) SMC_outl( x, ioaddr, RX_FIFO_INF ) | ||
698 | #define SMC_GET_TX_FIFO_INF() SMC_inl( ioaddr, TX_FIFO_INF ) | ||
699 | #define SMC_SET_TX_FIFO_INF(x) SMC_outl( x, ioaddr, TX_FIFO_INF ) | ||
700 | #define SMC_GET_GPT_CFG() SMC_inl( ioaddr, GPT_CFG ) | ||
701 | #define SMC_SET_GPT_CFG(x) SMC_outl( x, ioaddr, GPT_CFG ) | ||
702 | #define SMC_GET_RX_DROP() SMC_inl( ioaddr, RX_DROP ) | ||
703 | #define SMC_SET_RX_DROP(x) SMC_outl( x, ioaddr, RX_DROP ) | ||
704 | #define SMC_GET_MAC_CMD() SMC_inl( ioaddr, MAC_CSR_CMD ) | ||
705 | #define SMC_SET_MAC_CMD(x) SMC_outl( x, ioaddr, MAC_CSR_CMD ) | ||
706 | #define SMC_GET_MAC_DATA() SMC_inl( ioaddr, MAC_CSR_DATA ) | ||
707 | #define SMC_SET_MAC_DATA(x) SMC_outl( x, ioaddr, MAC_CSR_DATA ) | ||
708 | #define SMC_GET_AFC_CFG() SMC_inl( ioaddr, AFC_CFG ) | ||
709 | #define SMC_SET_AFC_CFG(x) SMC_outl( x, ioaddr, AFC_CFG ) | ||
710 | #define SMC_GET_E2P_CMD() SMC_inl( ioaddr, E2P_CMD ) | ||
711 | #define SMC_SET_E2P_CMD(x) SMC_outl( x, ioaddr, E2P_CMD ) | ||
712 | #define SMC_GET_E2P_DATA() SMC_inl( ioaddr, E2P_DATA ) | ||
713 | #define SMC_SET_E2P_DATA(x) SMC_outl( x, ioaddr, E2P_DATA ) | ||
714 | |||
715 | /* MAC register read/write macros */ | ||
716 | #define SMC_GET_MAC_CSR(a,v) \ | ||
717 | do { \ | ||
718 | while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ | ||
719 | SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ | \ | ||
720 | MAC_CSR_CMD_R_NOT_W_ | (a) ); \ | ||
721 | while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ | ||
722 | v = SMC_GET_MAC_DATA(); \ | ||
723 | } while (0) | ||
724 | #define SMC_SET_MAC_CSR(a,v) \ | ||
725 | do { \ | ||
726 | while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ | ||
727 | SMC_SET_MAC_DATA(v); \ | ||
728 | SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ | (a) ); \ | ||
729 | while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ | ||
730 | } while (0) | ||
731 | #define SMC_GET_MAC_CR(x) SMC_GET_MAC_CSR( MAC_CR, x ) | ||
732 | #define SMC_SET_MAC_CR(x) SMC_SET_MAC_CSR( MAC_CR, x ) | ||
733 | #define SMC_GET_ADDRH(x) SMC_GET_MAC_CSR( ADDRH, x ) | ||
734 | #define SMC_SET_ADDRH(x) SMC_SET_MAC_CSR( ADDRH, x ) | ||
735 | #define SMC_GET_ADDRL(x) SMC_GET_MAC_CSR( ADDRL, x ) | ||
736 | #define SMC_SET_ADDRL(x) SMC_SET_MAC_CSR( ADDRL, x ) | ||
737 | #define SMC_GET_HASHH(x) SMC_GET_MAC_CSR( HASHH, x ) | ||
738 | #define SMC_SET_HASHH(x) SMC_SET_MAC_CSR( HASHH, x ) | ||
739 | #define SMC_GET_HASHL(x) SMC_GET_MAC_CSR( HASHL, x ) | ||
740 | #define SMC_SET_HASHL(x) SMC_SET_MAC_CSR( HASHL, x ) | ||
741 | #define SMC_GET_MII_ACC(x) SMC_GET_MAC_CSR( MII_ACC, x ) | ||
742 | #define SMC_SET_MII_ACC(x) SMC_SET_MAC_CSR( MII_ACC, x ) | ||
743 | #define SMC_GET_MII_DATA(x) SMC_GET_MAC_CSR( MII_DATA, x ) | ||
744 | #define SMC_SET_MII_DATA(x) SMC_SET_MAC_CSR( MII_DATA, x ) | ||
745 | #define SMC_GET_FLOW(x) SMC_GET_MAC_CSR( FLOW, x ) | ||
746 | #define SMC_SET_FLOW(x) SMC_SET_MAC_CSR( FLOW, x ) | ||
747 | #define SMC_GET_VLAN1(x) SMC_GET_MAC_CSR( VLAN1, x ) | ||
748 | #define SMC_SET_VLAN1(x) SMC_SET_MAC_CSR( VLAN1, x ) | ||
749 | #define SMC_GET_VLAN2(x) SMC_GET_MAC_CSR( VLAN2, x ) | ||
750 | #define SMC_SET_VLAN2(x) SMC_SET_MAC_CSR( VLAN2, x ) | ||
751 | #define SMC_SET_WUFF(x) SMC_SET_MAC_CSR( WUFF, x ) | ||
752 | #define SMC_GET_WUCSR(x) SMC_GET_MAC_CSR( WUCSR, x ) | ||
753 | #define SMC_SET_WUCSR(x) SMC_SET_MAC_CSR( WUCSR, x ) | ||
754 | |||
755 | /* PHY register read/write macros */ | ||
756 | #define SMC_GET_MII(a,phy,v) \ | ||
757 | do { \ | ||
758 | u32 __v; \ | ||
759 | do { \ | ||
760 | SMC_GET_MII_ACC(__v); \ | ||
761 | } while ( __v & MII_ACC_MII_BUSY_ ); \ | ||
762 | SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) | \ | ||
763 | MII_ACC_MII_BUSY_); \ | ||
764 | do { \ | ||
765 | SMC_GET_MII_ACC(__v); \ | ||
766 | } while ( __v & MII_ACC_MII_BUSY_ ); \ | ||
767 | SMC_GET_MII_DATA(v); \ | ||
768 | } while (0) | ||
769 | #define SMC_SET_MII(a,phy,v) \ | ||
770 | do { \ | ||
771 | u32 __v; \ | ||
772 | do { \ | ||
773 | SMC_GET_MII_ACC(__v); \ | ||
774 | } while ( __v & MII_ACC_MII_BUSY_ ); \ | ||
775 | SMC_SET_MII_DATA(v); \ | ||
776 | SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) | \ | ||
777 | MII_ACC_MII_BUSY_ | \ | ||
778 | MII_ACC_MII_WRITE_ ); \ | ||
779 | do { \ | ||
780 | SMC_GET_MII_ACC(__v); \ | ||
781 | } while ( __v & MII_ACC_MII_BUSY_ ); \ | ||
782 | } while (0) | ||
783 | #define SMC_GET_PHY_BMCR(phy,x) SMC_GET_MII( MII_BMCR, phy, x ) | ||
784 | #define SMC_SET_PHY_BMCR(phy,x) SMC_SET_MII( MII_BMCR, phy, x ) | ||
785 | #define SMC_GET_PHY_BMSR(phy,x) SMC_GET_MII( MII_BMSR, phy, x ) | ||
786 | #define SMC_GET_PHY_ID1(phy,x) SMC_GET_MII( MII_PHYSID1, phy, x ) | ||
787 | #define SMC_GET_PHY_ID2(phy,x) SMC_GET_MII( MII_PHYSID2, phy, x ) | ||
788 | #define SMC_GET_PHY_MII_ADV(phy,x) SMC_GET_MII( MII_ADVERTISE, phy, x ) | ||
789 | #define SMC_SET_PHY_MII_ADV(phy,x) SMC_SET_MII( MII_ADVERTISE, phy, x ) | ||
790 | #define SMC_GET_PHY_MII_LPA(phy,x) SMC_GET_MII( MII_LPA, phy, x ) | ||
791 | #define SMC_SET_PHY_MII_LPA(phy,x) SMC_SET_MII( MII_LPA, phy, x ) | ||
792 | #define SMC_GET_PHY_CTRL_STS(phy,x) SMC_GET_MII( PHY_MODE_CTRL_STS, phy, x ) | ||
793 | #define SMC_SET_PHY_CTRL_STS(phy,x) SMC_SET_MII( PHY_MODE_CTRL_STS, phy, x ) | ||
794 | #define SMC_GET_PHY_INT_SRC(phy,x) SMC_GET_MII( PHY_INT_SRC, phy, x ) | ||
795 | #define SMC_SET_PHY_INT_SRC(phy,x) SMC_SET_MII( PHY_INT_SRC, phy, x ) | ||
796 | #define SMC_GET_PHY_INT_MASK(phy,x) SMC_GET_MII( PHY_INT_MASK, phy, x ) | ||
797 | #define SMC_SET_PHY_INT_MASK(phy,x) SMC_SET_MII( PHY_INT_MASK, phy, x ) | ||
798 | #define SMC_GET_PHY_SPECIAL(phy,x) SMC_GET_MII( PHY_SPECIAL, phy, x ) | ||
799 | |||
800 | |||
801 | |||
802 | /* Misc read/write macros */ | ||
803 | |||
804 | #ifndef SMC_GET_MAC_ADDR | ||
805 | #define SMC_GET_MAC_ADDR(addr) \ | ||
806 | do { \ | ||
807 | unsigned int __v; \ | ||
808 | \ | ||
809 | SMC_GET_MAC_CSR(ADDRL, __v); \ | ||
810 | addr[0] = __v; addr[1] = __v >> 8; \ | ||
811 | addr[2] = __v >> 16; addr[3] = __v >> 24; \ | ||
812 | SMC_GET_MAC_CSR(ADDRH, __v); \ | ||
813 | addr[4] = __v; addr[5] = __v >> 8; \ | ||
814 | } while (0) | ||
815 | #endif | ||
816 | |||
817 | #define SMC_SET_MAC_ADDR(addr) \ | ||
818 | do { \ | ||
819 | SMC_SET_MAC_CSR(ADDRL, \ | ||
820 | addr[0] | \ | ||
821 | (addr[1] << 8) | \ | ||
822 | (addr[2] << 16) | \ | ||
823 | (addr[3] << 24)); \ | ||
824 | SMC_SET_MAC_CSR(ADDRH, addr[4]|(addr[5] << 8));\ | ||
825 | } while (0) | ||
826 | |||
827 | |||
828 | #define SMC_WRITE_EEPROM_CMD(cmd, addr) \ | ||
829 | do { \ | ||
830 | while (SMC_GET_E2P_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ | ||
831 | SMC_SET_MAC_CMD(MAC_CSR_CMD_R_NOT_W_ | a ); \ | ||
832 | while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ | ||
833 | } while (0) | ||
834 | |||
835 | #endif /* _SMC911X_H_ */ | ||
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index e1be1af51201..f72a4f57905a 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -129,6 +129,24 @@ | |||
129 | #define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) | 129 | #define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) |
130 | #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) | 130 | #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) |
131 | 131 | ||
132 | #elif defined(CONFIG_MACH_LOGICPD_PXA270) | ||
133 | |||
134 | #define SMC_CAN_USE_8BIT 0 | ||
135 | #define SMC_CAN_USE_16BIT 1 | ||
136 | #define SMC_CAN_USE_32BIT 0 | ||
137 | #define SMC_IO_SHIFT 0 | ||
138 | #define SMC_NOWAIT 1 | ||
139 | #define SMC_USE_PXA_DMA 1 | ||
140 | |||
141 | #define SMC_inb(a, r) readb((a) + (r)) | ||
142 | #define SMC_inw(a, r) readw((a) + (r)) | ||
143 | #define SMC_inl(a, r) readl((a) + (r)) | ||
144 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
145 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
146 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
147 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
148 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
149 | |||
132 | #elif defined(CONFIG_ARCH_INNOKOM) || \ | 150 | #elif defined(CONFIG_ARCH_INNOKOM) || \ |
133 | defined(CONFIG_MACH_MAINSTONE) || \ | 151 | defined(CONFIG_MACH_MAINSTONE) || \ |
134 | defined(CONFIG_ARCH_PXA_IDP) || \ | 152 | defined(CONFIG_ARCH_PXA_IDP) || \ |
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index b2ddd5e79303..9282b4b0c022 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c | |||
@@ -345,9 +345,9 @@ static int bcm5421_enable_fiber(struct mii_phy* phy) | |||
345 | 345 | ||
346 | static int bcm5461_enable_fiber(struct mii_phy* phy) | 346 | static int bcm5461_enable_fiber(struct mii_phy* phy) |
347 | { | 347 | { |
348 | phy_write(phy, MII_NCONFIG, 0xfc0c); | 348 | phy_write(phy, MII_NCONFIG, 0xfc0c); |
349 | phy_write(phy, MII_BMCR, 0x4140); | 349 | phy_write(phy, MII_BMCR, 0x4140); |
350 | phy_write(phy, MII_NCONFIG, 0xfc0b); | 350 | phy_write(phy, MII_NCONFIG, 0xfc0b); |
351 | phy_write(phy, MII_BMCR, 0x0140); | 351 | phy_write(phy, MII_BMCR, 0x0140); |
352 | 352 | ||
353 | return 0; | 353 | return 0; |
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index e3dd144d326b..5f743b972949 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -227,12 +227,12 @@ enum { | |||
227 | SROMC0InfoLeaf = 27, | 227 | SROMC0InfoLeaf = 27, |
228 | MediaBlockMask = 0x3f, | 228 | MediaBlockMask = 0x3f, |
229 | MediaCustomCSRs = (1 << 6), | 229 | MediaCustomCSRs = (1 << 6), |
230 | 230 | ||
231 | /* PCIPM bits */ | 231 | /* PCIPM bits */ |
232 | PM_Sleep = (1 << 31), | 232 | PM_Sleep = (1 << 31), |
233 | PM_Snooze = (1 << 30), | 233 | PM_Snooze = (1 << 30), |
234 | PM_Mask = PM_Sleep | PM_Snooze, | 234 | PM_Mask = PM_Sleep | PM_Snooze, |
235 | 235 | ||
236 | /* SIAStatus bits */ | 236 | /* SIAStatus bits */ |
237 | NWayState = (1 << 14) | (1 << 13) | (1 << 12), | 237 | NWayState = (1 << 14) | (1 << 13) | (1 << 12), |
238 | NWayRestart = (1 << 12), | 238 | NWayRestart = (1 << 12), |
@@ -858,7 +858,7 @@ static void de_stop_rxtx (struct de_private *de) | |||
858 | return; | 858 | return; |
859 | cpu_relax(); | 859 | cpu_relax(); |
860 | } | 860 | } |
861 | 861 | ||
862 | printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name); | 862 | printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name); |
863 | } | 863 | } |
864 | 864 | ||
@@ -931,7 +931,7 @@ static void de_set_media (struct de_private *de) | |||
931 | macmode |= FullDuplex; | 931 | macmode |= FullDuplex; |
932 | else | 932 | else |
933 | macmode &= ~FullDuplex; | 933 | macmode &= ~FullDuplex; |
934 | 934 | ||
935 | if (netif_msg_link(de)) { | 935 | if (netif_msg_link(de)) { |
936 | printk(KERN_INFO "%s: set link %s\n" | 936 | printk(KERN_INFO "%s: set link %s\n" |
937 | KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n" | 937 | KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n" |
@@ -966,9 +966,9 @@ static void de21040_media_timer (unsigned long data) | |||
966 | u32 status = dr32(SIAStatus); | 966 | u32 status = dr32(SIAStatus); |
967 | unsigned int carrier; | 967 | unsigned int carrier; |
968 | unsigned long flags; | 968 | unsigned long flags; |
969 | 969 | ||
970 | carrier = (status & NetCxnErr) ? 0 : 1; | 970 | carrier = (status & NetCxnErr) ? 0 : 1; |
971 | 971 | ||
972 | if (carrier) { | 972 | if (carrier) { |
973 | if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus)) | 973 | if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus)) |
974 | goto no_link_yet; | 974 | goto no_link_yet; |
@@ -985,7 +985,7 @@ static void de21040_media_timer (unsigned long data) | |||
985 | return; | 985 | return; |
986 | } | 986 | } |
987 | 987 | ||
988 | de_link_down(de); | 988 | de_link_down(de); |
989 | 989 | ||
990 | if (de->media_lock) | 990 | if (de->media_lock) |
991 | return; | 991 | return; |
@@ -1039,7 +1039,7 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media) | |||
1039 | return 0; | 1039 | return 0; |
1040 | break; | 1040 | break; |
1041 | } | 1041 | } |
1042 | 1042 | ||
1043 | return 1; | 1043 | return 1; |
1044 | } | 1044 | } |
1045 | 1045 | ||
@@ -1050,9 +1050,9 @@ static void de21041_media_timer (unsigned long data) | |||
1050 | u32 status = dr32(SIAStatus); | 1050 | u32 status = dr32(SIAStatus); |
1051 | unsigned int carrier; | 1051 | unsigned int carrier; |
1052 | unsigned long flags; | 1052 | unsigned long flags; |
1053 | 1053 | ||
1054 | carrier = (status & NetCxnErr) ? 0 : 1; | 1054 | carrier = (status & NetCxnErr) ? 0 : 1; |
1055 | 1055 | ||
1056 | if (carrier) { | 1056 | if (carrier) { |
1057 | if ((de->media_type == DE_MEDIA_TP_AUTO || | 1057 | if ((de->media_type == DE_MEDIA_TP_AUTO || |
1058 | de->media_type == DE_MEDIA_TP || | 1058 | de->media_type == DE_MEDIA_TP || |
@@ -1072,7 +1072,7 @@ static void de21041_media_timer (unsigned long data) | |||
1072 | return; | 1072 | return; |
1073 | } | 1073 | } |
1074 | 1074 | ||
1075 | de_link_down(de); | 1075 | de_link_down(de); |
1076 | 1076 | ||
1077 | /* if media type locked, don't switch media */ | 1077 | /* if media type locked, don't switch media */ |
1078 | if (de->media_lock) | 1078 | if (de->media_lock) |
@@ -1124,7 +1124,7 @@ static void de21041_media_timer (unsigned long data) | |||
1124 | u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO }; | 1124 | u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO }; |
1125 | de_next_media(de, next_states, ARRAY_SIZE(next_states)); | 1125 | de_next_media(de, next_states, ARRAY_SIZE(next_states)); |
1126 | } | 1126 | } |
1127 | 1127 | ||
1128 | set_media: | 1128 | set_media: |
1129 | spin_lock_irqsave(&de->lock, flags); | 1129 | spin_lock_irqsave(&de->lock, flags); |
1130 | de_stop_rxtx(de); | 1130 | de_stop_rxtx(de); |
@@ -1148,7 +1148,7 @@ static void de_media_interrupt (struct de_private *de, u32 status) | |||
1148 | mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); | 1148 | mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); |
1149 | return; | 1149 | return; |
1150 | } | 1150 | } |
1151 | 1151 | ||
1152 | BUG_ON(!(status & LinkFail)); | 1152 | BUG_ON(!(status & LinkFail)); |
1153 | 1153 | ||
1154 | if (netif_carrier_ok(de->dev)) { | 1154 | if (netif_carrier_ok(de->dev)) { |
@@ -1227,7 +1227,7 @@ static int de_init_hw (struct de_private *de) | |||
1227 | int rc; | 1227 | int rc; |
1228 | 1228 | ||
1229 | de_adapter_wake(de); | 1229 | de_adapter_wake(de); |
1230 | 1230 | ||
1231 | macmode = dr32(MacMode) & ~MacModeClear; | 1231 | macmode = dr32(MacMode) & ~MacModeClear; |
1232 | 1232 | ||
1233 | rc = de_reset_mac(de); | 1233 | rc = de_reset_mac(de); |
@@ -1413,7 +1413,7 @@ static int de_close (struct net_device *dev) | |||
1413 | netif_stop_queue(dev); | 1413 | netif_stop_queue(dev); |
1414 | netif_carrier_off(dev); | 1414 | netif_carrier_off(dev); |
1415 | spin_unlock_irqrestore(&de->lock, flags); | 1415 | spin_unlock_irqrestore(&de->lock, flags); |
1416 | 1416 | ||
1417 | free_irq(dev->irq, dev); | 1417 | free_irq(dev->irq, dev); |
1418 | 1418 | ||
1419 | de_free_rings(de); | 1419 | de_free_rings(de); |
@@ -1441,7 +1441,7 @@ static void de_tx_timeout (struct net_device *dev) | |||
1441 | 1441 | ||
1442 | spin_unlock_irq(&de->lock); | 1442 | spin_unlock_irq(&de->lock); |
1443 | enable_irq(dev->irq); | 1443 | enable_irq(dev->irq); |
1444 | 1444 | ||
1445 | /* Update the error counts. */ | 1445 | /* Update the error counts. */ |
1446 | __de_get_stats(de); | 1446 | __de_get_stats(de); |
1447 | 1447 | ||
@@ -1451,7 +1451,7 @@ static void de_tx_timeout (struct net_device *dev) | |||
1451 | de_init_rings(de); | 1451 | de_init_rings(de); |
1452 | 1452 | ||
1453 | de_init_hw(de); | 1453 | de_init_hw(de); |
1454 | 1454 | ||
1455 | netif_wake_queue(dev); | 1455 | netif_wake_queue(dev); |
1456 | } | 1456 | } |
1457 | 1457 | ||
@@ -1459,7 +1459,7 @@ static void __de_get_regs(struct de_private *de, u8 *buf) | |||
1459 | { | 1459 | { |
1460 | int i; | 1460 | int i; |
1461 | u32 *rbuf = (u32 *)buf; | 1461 | u32 *rbuf = (u32 *)buf; |
1462 | 1462 | ||
1463 | /* read all CSRs */ | 1463 | /* read all CSRs */ |
1464 | for (i = 0; i < DE_NUM_REGS; i++) | 1464 | for (i = 0; i < DE_NUM_REGS; i++) |
1465 | rbuf[i] = dr32(i * 8); | 1465 | rbuf[i] = dr32(i * 8); |
@@ -1474,7 +1474,7 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd) | |||
1474 | ecmd->transceiver = XCVR_INTERNAL; | 1474 | ecmd->transceiver = XCVR_INTERNAL; |
1475 | ecmd->phy_address = 0; | 1475 | ecmd->phy_address = 0; |
1476 | ecmd->advertising = de->media_advertise; | 1476 | ecmd->advertising = de->media_advertise; |
1477 | 1477 | ||
1478 | switch (de->media_type) { | 1478 | switch (de->media_type) { |
1479 | case DE_MEDIA_AUI: | 1479 | case DE_MEDIA_AUI: |
1480 | ecmd->port = PORT_AUI; | 1480 | ecmd->port = PORT_AUI; |
@@ -1489,7 +1489,7 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd) | |||
1489 | ecmd->speed = SPEED_10; | 1489 | ecmd->speed = SPEED_10; |
1490 | break; | 1490 | break; |
1491 | } | 1491 | } |
1492 | 1492 | ||
1493 | if (dr32(MacMode) & FullDuplex) | 1493 | if (dr32(MacMode) & FullDuplex) |
1494 | ecmd->duplex = DUPLEX_FULL; | 1494 | ecmd->duplex = DUPLEX_FULL; |
1495 | else | 1495 | else |
@@ -1529,7 +1529,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) | |||
1529 | if (ecmd->autoneg == AUTONEG_ENABLE && | 1529 | if (ecmd->autoneg == AUTONEG_ENABLE && |
1530 | (!(ecmd->advertising & ADVERTISED_Autoneg))) | 1530 | (!(ecmd->advertising & ADVERTISED_Autoneg))) |
1531 | return -EINVAL; | 1531 | return -EINVAL; |
1532 | 1532 | ||
1533 | switch (ecmd->port) { | 1533 | switch (ecmd->port) { |
1534 | case PORT_AUI: | 1534 | case PORT_AUI: |
1535 | new_media = DE_MEDIA_AUI; | 1535 | new_media = DE_MEDIA_AUI; |
@@ -1554,22 +1554,22 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) | |||
1554 | return -EINVAL; | 1554 | return -EINVAL; |
1555 | break; | 1555 | break; |
1556 | } | 1556 | } |
1557 | 1557 | ||
1558 | media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1; | 1558 | media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1; |
1559 | 1559 | ||
1560 | if ((new_media == de->media_type) && | 1560 | if ((new_media == de->media_type) && |
1561 | (media_lock == de->media_lock) && | 1561 | (media_lock == de->media_lock) && |
1562 | (ecmd->advertising == de->media_advertise)) | 1562 | (ecmd->advertising == de->media_advertise)) |
1563 | return 0; /* nothing to change */ | 1563 | return 0; /* nothing to change */ |
1564 | 1564 | ||
1565 | de_link_down(de); | 1565 | de_link_down(de); |
1566 | de_stop_rxtx(de); | 1566 | de_stop_rxtx(de); |
1567 | 1567 | ||
1568 | de->media_type = new_media; | 1568 | de->media_type = new_media; |
1569 | de->media_lock = media_lock; | 1569 | de->media_lock = media_lock; |
1570 | de->media_advertise = ecmd->advertising; | 1570 | de->media_advertise = ecmd->advertising; |
1571 | de_set_media(de); | 1571 | de_set_media(de); |
1572 | 1572 | ||
1573 | return 0; | 1573 | return 0; |
1574 | } | 1574 | } |
1575 | 1575 | ||
@@ -1817,7 +1817,7 @@ static void __init de21041_get_srom_info (struct de_private *de) | |||
1817 | case 0x0204: de->media_type = DE_MEDIA_TP_FD; break; | 1817 | case 0x0204: de->media_type = DE_MEDIA_TP_FD; break; |
1818 | default: de->media_type = DE_MEDIA_TP_AUTO; break; | 1818 | default: de->media_type = DE_MEDIA_TP_AUTO; break; |
1819 | } | 1819 | } |
1820 | 1820 | ||
1821 | if (netif_msg_probe(de)) | 1821 | if (netif_msg_probe(de)) |
1822 | printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n", | 1822 | printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n", |
1823 | de->board_idx, ofs, | 1823 | de->board_idx, ofs, |
@@ -1886,7 +1886,7 @@ static void __init de21041_get_srom_info (struct de_private *de) | |||
1886 | de->media[idx].csr13, | 1886 | de->media[idx].csr13, |
1887 | de->media[idx].csr14, | 1887 | de->media[idx].csr14, |
1888 | de->media[idx].csr15); | 1888 | de->media[idx].csr15); |
1889 | 1889 | ||
1890 | } else if (netif_msg_probe(de)) | 1890 | } else if (netif_msg_probe(de)) |
1891 | printk("\n"); | 1891 | printk("\n"); |
1892 | 1892 | ||
@@ -2118,7 +2118,7 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state) | |||
2118 | 2118 | ||
2119 | spin_unlock_irq(&de->lock); | 2119 | spin_unlock_irq(&de->lock); |
2120 | enable_irq(dev->irq); | 2120 | enable_irq(dev->irq); |
2121 | 2121 | ||
2122 | /* Update the error counts. */ | 2122 | /* Update the error counts. */ |
2123 | __de_get_stats(de); | 2123 | __de_get_stats(de); |
2124 | 2124 | ||
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index f56094102042..da8bd0d62a3f 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c | |||
@@ -41,11 +41,11 @@ | |||
41 | Digital Semiconductor SROM Specification. The driver currently | 41 | Digital Semiconductor SROM Specification. The driver currently |
42 | recognises the following chips: | 42 | recognises the following chips: |
43 | 43 | ||
44 | DC21040 (no SROM) | 44 | DC21040 (no SROM) |
45 | DC21041[A] | 45 | DC21041[A] |
46 | DC21140[A] | 46 | DC21140[A] |
47 | DC21142 | 47 | DC21142 |
48 | DC21143 | 48 | DC21143 |
49 | 49 | ||
50 | So far the driver is known to work with the following cards: | 50 | So far the driver is known to work with the following cards: |
51 | 51 | ||
@@ -55,7 +55,7 @@ | |||
55 | SMC8432 | 55 | SMC8432 |
56 | SMC9332 (w/new SROM) | 56 | SMC9332 (w/new SROM) |
57 | ZNYX31[45] | 57 | ZNYX31[45] |
58 | ZNYX346 10/100 4 port (can act as a 10/100 bridge!) | 58 | ZNYX346 10/100 4 port (can act as a 10/100 bridge!) |
59 | 59 | ||
60 | The driver has been tested on a relatively busy network using the DE425, | 60 | The driver has been tested on a relatively busy network using the DE425, |
61 | DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred | 61 | DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred |
@@ -106,7 +106,7 @@ | |||
106 | loading by: | 106 | loading by: |
107 | 107 | ||
108 | insmod de4x5 io=0xghh where g = bus number | 108 | insmod de4x5 io=0xghh where g = bus number |
109 | hh = device number | 109 | hh = device number |
110 | 110 | ||
111 | NB: autoprobing for modules is now supported by default. You may just | 111 | NB: autoprobing for modules is now supported by default. You may just |
112 | use: | 112 | use: |
@@ -120,11 +120,11 @@ | |||
120 | 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a | 120 | 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a |
121 | kernel with the de4x5 configuration turned off and reboot. | 121 | kernel with the de4x5 configuration turned off and reboot. |
122 | 5) insmod de4x5 [io=0xghh] | 122 | 5) insmod de4x5 [io=0xghh] |
123 | 6) run the net startup bits for your new eth?? interface(s) manually | 123 | 6) run the net startup bits for your new eth?? interface(s) manually |
124 | (usually /etc/rc.inet[12] at boot time). | 124 | (usually /etc/rc.inet[12] at boot time). |
125 | 7) enjoy! | 125 | 7) enjoy! |
126 | 126 | ||
127 | To unload a module, turn off the associated interface(s) | 127 | To unload a module, turn off the associated interface(s) |
128 | 'ifconfig eth?? down' then 'rmmod de4x5'. | 128 | 'ifconfig eth?? down' then 'rmmod de4x5'. |
129 | 129 | ||
130 | Automedia detection is included so that in principal you can disconnect | 130 | Automedia detection is included so that in principal you can disconnect |
@@ -135,7 +135,7 @@ | |||
135 | By default, the driver will now autodetect any DECchip based card. | 135 | By default, the driver will now autodetect any DECchip based card. |
136 | Should you have a need to restrict the driver to DIGITAL only cards, you | 136 | Should you have a need to restrict the driver to DIGITAL only cards, you |
137 | can compile with a DEC_ONLY define, or if loading as a module, use the | 137 | can compile with a DEC_ONLY define, or if loading as a module, use the |
138 | 'dec_only=1' parameter. | 138 | 'dec_only=1' parameter. |
139 | 139 | ||
140 | I've changed the timing routines to use the kernel timer and scheduling | 140 | I've changed the timing routines to use the kernel timer and scheduling |
141 | functions so that the hangs and other assorted problems that occurred | 141 | functions so that the hangs and other assorted problems that occurred |
@@ -204,7 +204,7 @@ | |||
204 | following parameters are allowed: | 204 | following parameters are allowed: |
205 | 205 | ||
206 | fdx for full duplex | 206 | fdx for full duplex |
207 | autosense to set the media/speed; with the following | 207 | autosense to set the media/speed; with the following |
208 | sub-parameters: | 208 | sub-parameters: |
209 | TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO | 209 | TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO |
210 | 210 | ||
@@ -235,14 +235,14 @@ | |||
235 | this automatically or include #define DE4X5_FORCE_EISA on or before | 235 | this automatically or include #define DE4X5_FORCE_EISA on or before |
236 | line 1040 in the driver. | 236 | line 1040 in the driver. |
237 | 237 | ||
238 | TO DO: | 238 | TO DO: |
239 | ------ | 239 | ------ |
240 | 240 | ||
241 | Revision History | 241 | Revision History |
242 | ---------------- | 242 | ---------------- |
243 | 243 | ||
244 | Version Date Description | 244 | Version Date Description |
245 | 245 | ||
246 | 0.1 17-Nov-94 Initial writing. ALPHA code release. | 246 | 0.1 17-Nov-94 Initial writing. ALPHA code release. |
247 | 0.2 13-Jan-95 Added PCI support for DE435's. | 247 | 0.2 13-Jan-95 Added PCI support for DE435's. |
248 | 0.21 19-Jan-95 Added auto media detection. | 248 | 0.21 19-Jan-95 Added auto media detection. |
@@ -251,7 +251,7 @@ | |||
251 | Add request/release_region code. | 251 | Add request/release_region code. |
252 | Add loadable modules support for PCI. | 252 | Add loadable modules support for PCI. |
253 | Clean up loadable modules support. | 253 | Clean up loadable modules support. |
254 | 0.23 28-Feb-95 Added DC21041 and DC21140 support. | 254 | 0.23 28-Feb-95 Added DC21041 and DC21140 support. |
255 | Fix missed frame counter value and initialisation. | 255 | Fix missed frame counter value and initialisation. |
256 | Fixed EISA probe. | 256 | Fixed EISA probe. |
257 | 0.24 11-Apr-95 Change delay routine to use <linux/udelay>. | 257 | 0.24 11-Apr-95 Change delay routine to use <linux/udelay>. |
@@ -280,7 +280,7 @@ | |||
280 | Add kernel timer code (h/w is too flaky). | 280 | Add kernel timer code (h/w is too flaky). |
281 | Add MII based PHY autosense. | 281 | Add MII based PHY autosense. |
282 | Add new multicasting code. | 282 | Add new multicasting code. |
283 | Add new autosense algorithms for media/mode | 283 | Add new autosense algorithms for media/mode |
284 | selection using kernel scheduling/timing. | 284 | selection using kernel scheduling/timing. |
285 | Re-formatted. | 285 | Re-formatted. |
286 | Made changes suggested by <jeff@router.patch.net>: | 286 | Made changes suggested by <jeff@router.patch.net>: |
@@ -307,10 +307,10 @@ | |||
307 | Add Accton to the list of broken cards. | 307 | Add Accton to the list of broken cards. |
308 | Fix TX under-run bug for non DC21140 chips. | 308 | Fix TX under-run bug for non DC21140 chips. |
309 | Fix boot command probe bug in alloc_device() as | 309 | Fix boot command probe bug in alloc_device() as |
310 | reported by <koen.gadeyne@barco.com> and | 310 | reported by <koen.gadeyne@barco.com> and |
311 | <orava@nether.tky.hut.fi>. | 311 | <orava@nether.tky.hut.fi>. |
312 | Add cache locks to prevent a race condition as | 312 | Add cache locks to prevent a race condition as |
313 | reported by <csd@microplex.com> and | 313 | reported by <csd@microplex.com> and |
314 | <baba@beckman.uiuc.edu>. | 314 | <baba@beckman.uiuc.edu>. |
315 | Upgraded alloc_device() code. | 315 | Upgraded alloc_device() code. |
316 | 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion | 316 | 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion |
@@ -322,7 +322,7 @@ | |||
322 | with a loopback packet. | 322 | with a loopback packet. |
323 | 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported | 323 | 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported |
324 | by <bhat@mundook.cs.mu.OZ.AU> | 324 | by <bhat@mundook.cs.mu.OZ.AU> |
325 | 0.45 8-Dec-96 Include endian functions for PPC use, from work | 325 | 0.45 8-Dec-96 Include endian functions for PPC use, from work |
326 | by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>. | 326 | by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>. |
327 | 0.451 28-Dec-96 Added fix to allow autoprobe for modules after | 327 | 0.451 28-Dec-96 Added fix to allow autoprobe for modules after |
328 | suggestion from <mjacob@feral.com>. | 328 | suggestion from <mjacob@feral.com>. |
@@ -346,14 +346,14 @@ | |||
346 | <paubert@iram.es>. | 346 | <paubert@iram.es>. |
347 | 0.52 26-Apr-97 Some changes may not credit the right people - | 347 | 0.52 26-Apr-97 Some changes may not credit the right people - |
348 | a disk crash meant I lost some mail. | 348 | a disk crash meant I lost some mail. |
349 | Change RX interrupt routine to drop rather than | 349 | Change RX interrupt routine to drop rather than |
350 | defer packets to avoid hang reported by | 350 | defer packets to avoid hang reported by |
351 | <g.thomas@opengroup.org>. | 351 | <g.thomas@opengroup.org>. |
352 | Fix srom_exec() to return for COMPACT and type 1 | 352 | Fix srom_exec() to return for COMPACT and type 1 |
353 | infoblocks. | 353 | infoblocks. |
354 | Added DC21142 and DC21143 functions. | 354 | Added DC21142 and DC21143 functions. |
355 | Added byte counters from <phil@tazenda.demon.co.uk> | 355 | Added byte counters from <phil@tazenda.demon.co.uk> |
356 | Added SA_INTERRUPT temporary fix from | 356 | Added SA_INTERRUPT temporary fix from |
357 | <mjacob@feral.com>. | 357 | <mjacob@feral.com>. |
358 | 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during | 358 | 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during |
359 | module load: bug reported by | 359 | module load: bug reported by |
@@ -363,10 +363,10 @@ | |||
363 | Make above search independent of BIOS device scan | 363 | Make above search independent of BIOS device scan |
364 | direction. | 364 | direction. |
365 | Completed DC2114[23] autosense functions. | 365 | Completed DC2114[23] autosense functions. |
366 | 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by | 366 | 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by |
367 | <robin@intercore.com | 367 | <robin@intercore.com |
368 | Fix type1_infoblock() bug introduced in 0.53, from | 368 | Fix type1_infoblock() bug introduced in 0.53, from |
369 | problem reports by | 369 | problem reports by |
370 | <parmee@postecss.ncrfran.france.ncr.com> and | 370 | <parmee@postecss.ncrfran.france.ncr.com> and |
371 | <jo@ice.dillingen.baynet.de>. | 371 | <jo@ice.dillingen.baynet.de>. |
372 | Added argument list to set up each board from either | 372 | Added argument list to set up each board from either |
@@ -374,7 +374,7 @@ | |||
374 | Added generic MII PHY functionality to deal with | 374 | Added generic MII PHY functionality to deal with |
375 | newer PHY chips. | 375 | newer PHY chips. |
376 | Fix the mess in 2.1.67. | 376 | Fix the mess in 2.1.67. |
377 | 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by | 377 | 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by |
378 | <redhat@cococo.net>. | 378 | <redhat@cococo.net>. |
379 | Fix bug in pci_probe() for 64 bit systems reported | 379 | Fix bug in pci_probe() for 64 bit systems reported |
380 | by <belliott@accessone.com>. | 380 | by <belliott@accessone.com>. |
@@ -398,7 +398,7 @@ | |||
398 | version. I hope nothing is broken... | 398 | version. I hope nothing is broken... |
399 | Add TX done interrupt modification from suggestion | 399 | Add TX done interrupt modification from suggestion |
400 | by <Austin.Donnelly@cl.cam.ac.uk>. | 400 | by <Austin.Donnelly@cl.cam.ac.uk>. |
401 | Fix is_anc_capable() bug reported by | 401 | Fix is_anc_capable() bug reported by |
402 | <Austin.Donnelly@cl.cam.ac.uk>. | 402 | <Austin.Donnelly@cl.cam.ac.uk>. |
403 | Fix type[13]_infoblock() bug: during MII search, PHY | 403 | Fix type[13]_infoblock() bug: during MII search, PHY |
404 | lp->rst not run because lp->ibn not initialised - | 404 | lp->rst not run because lp->ibn not initialised - |
@@ -413,7 +413,7 @@ | |||
413 | Add an_exception() for old ZYNX346 and fix compile | 413 | Add an_exception() for old ZYNX346 and fix compile |
414 | warning on PPC & SPARC, from <ecd@skynet.be>. | 414 | warning on PPC & SPARC, from <ecd@skynet.be>. |
415 | Fix lastPCI to correctly work with compiled in | 415 | Fix lastPCI to correctly work with compiled in |
416 | kernels and modules from bug report by | 416 | kernels and modules from bug report by |
417 | <Zlatko.Calusic@CARNet.hr> et al. | 417 | <Zlatko.Calusic@CARNet.hr> et al. |
418 | 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages | 418 | 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages |
419 | when media is unconnected. | 419 | when media is unconnected. |
@@ -425,7 +425,7 @@ | |||
425 | 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using | 425 | 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using |
426 | a 21143 by <mmporter@home.com>. | 426 | a 21143 by <mmporter@home.com>. |
427 | Change PCI/EISA bus probing order. | 427 | Change PCI/EISA bus probing order. |
428 | 0.545 28-Nov-99 Further Moto SROM bug fix from | 428 | 0.545 28-Nov-99 Further Moto SROM bug fix from |
429 | <mporter@eng.mcd.mot.com> | 429 | <mporter@eng.mcd.mot.com> |
430 | Remove double checking for DEBUG_RX in de4x5_dbg_rx() | 430 | Remove double checking for DEBUG_RX in de4x5_dbg_rx() |
431 | from report by <geert@linux-m68k.org> | 431 | from report by <geert@linux-m68k.org> |
@@ -434,8 +434,8 @@ | |||
434 | variable 'pb', on a non de4x5 PCI device, in this | 434 | variable 'pb', on a non de4x5 PCI device, in this |
435 | case a PCI bridge (DEC chip 21152). The value of | 435 | case a PCI bridge (DEC chip 21152). The value of |
436 | 'pb' is now only initialized if a de4x5 chip is | 436 | 'pb' is now only initialized if a de4x5 chip is |
437 | present. | 437 | present. |
438 | <france@handhelds.org> | 438 | <france@handhelds.org> |
439 | 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com> | 439 | 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com> |
440 | 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and | 440 | 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and |
441 | generic DMA APIs. Fixed DE425 support on Alpha. | 441 | generic DMA APIs. Fixed DE425 support on Alpha. |
@@ -584,7 +584,7 @@ static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION); | |||
584 | 584 | ||
585 | /* | 585 | /* |
586 | ** Allow per adapter set up. For modules this is simply a command line | 586 | ** Allow per adapter set up. For modules this is simply a command line |
587 | ** parameter, e.g.: | 587 | ** parameter, e.g.: |
588 | ** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'. | 588 | ** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'. |
589 | ** | 589 | ** |
590 | ** For a compiled in driver, place e.g. | 590 | ** For a compiled in driver, place e.g. |
@@ -655,7 +655,7 @@ static c_char *de4x5_signatures[] = DE4X5_SIGNATURE; | |||
655 | ** Memory Alignment. Each descriptor is 4 longwords long. To force a | 655 | ** Memory Alignment. Each descriptor is 4 longwords long. To force a |
656 | ** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and | 656 | ** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and |
657 | ** DESC_ALIGN. ALIGN aligns the start address of the private memory area | 657 | ** DESC_ALIGN. ALIGN aligns the start address of the private memory area |
658 | ** and hence the RX descriptor ring's first entry. | 658 | ** and hence the RX descriptor ring's first entry. |
659 | */ | 659 | */ |
660 | #define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */ | 660 | #define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */ |
661 | #define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */ | 661 | #define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */ |
@@ -1081,8 +1081,8 @@ static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = { | |||
1081 | mdelay(2); /* Wait for 2ms */\ | 1081 | mdelay(2); /* Wait for 2ms */\ |
1082 | } | 1082 | } |
1083 | 1083 | ||
1084 | 1084 | ||
1085 | static int __devinit | 1085 | static int __devinit |
1086 | de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | 1086 | de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) |
1087 | { | 1087 | { |
1088 | char name[DE4X5_NAME_LENGTH + 1]; | 1088 | char name[DE4X5_NAME_LENGTH + 1]; |
@@ -1102,12 +1102,12 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1102 | mdelay(10); | 1102 | mdelay(10); |
1103 | 1103 | ||
1104 | RESET_DE4X5; | 1104 | RESET_DE4X5; |
1105 | 1105 | ||
1106 | if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) { | 1106 | if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) { |
1107 | return -ENXIO; /* Hardware could not reset */ | 1107 | return -ENXIO; /* Hardware could not reset */ |
1108 | } | 1108 | } |
1109 | 1109 | ||
1110 | /* | 1110 | /* |
1111 | ** Now find out what kind of DC21040/DC21041/DC21140 board we have. | 1111 | ** Now find out what kind of DC21040/DC21041/DC21140 board we have. |
1112 | */ | 1112 | */ |
1113 | lp->useSROM = FALSE; | 1113 | lp->useSROM = FALSE; |
@@ -1116,21 +1116,21 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1116 | } else { | 1116 | } else { |
1117 | EISA_signature(name, gendev); | 1117 | EISA_signature(name, gendev); |
1118 | } | 1118 | } |
1119 | 1119 | ||
1120 | if (*name == '\0') { /* Not found a board signature */ | 1120 | if (*name == '\0') { /* Not found a board signature */ |
1121 | return -ENXIO; | 1121 | return -ENXIO; |
1122 | } | 1122 | } |
1123 | 1123 | ||
1124 | dev->base_addr = iobase; | 1124 | dev->base_addr = iobase; |
1125 | printk ("%s: %s at 0x%04lx", gendev->bus_id, name, iobase); | 1125 | printk ("%s: %s at 0x%04lx", gendev->bus_id, name, iobase); |
1126 | 1126 | ||
1127 | printk(", h/w address "); | 1127 | printk(", h/w address "); |
1128 | status = get_hw_addr(dev); | 1128 | status = get_hw_addr(dev); |
1129 | for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */ | 1129 | for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */ |
1130 | printk("%2.2x:", dev->dev_addr[i]); | 1130 | printk("%2.2x:", dev->dev_addr[i]); |
1131 | } | 1131 | } |
1132 | printk("%2.2x,\n", dev->dev_addr[i]); | 1132 | printk("%2.2x,\n", dev->dev_addr[i]); |
1133 | 1133 | ||
1134 | if (status != 0) { | 1134 | if (status != 0) { |
1135 | printk(" which has an Ethernet PROM CRC error.\n"); | 1135 | printk(" which has an Ethernet PROM CRC error.\n"); |
1136 | return -ENXIO; | 1136 | return -ENXIO; |
@@ -1171,10 +1171,10 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1171 | } | 1171 | } |
1172 | 1172 | ||
1173 | lp->tx_ring = lp->rx_ring + NUM_RX_DESC; | 1173 | lp->tx_ring = lp->rx_ring + NUM_RX_DESC; |
1174 | 1174 | ||
1175 | /* | 1175 | /* |
1176 | ** Set up the RX descriptor ring (Intels) | 1176 | ** Set up the RX descriptor ring (Intels) |
1177 | ** Allocate contiguous receive buffers, long word aligned (Alphas) | 1177 | ** Allocate contiguous receive buffers, long word aligned (Alphas) |
1178 | */ | 1178 | */ |
1179 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY) | 1179 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY) |
1180 | for (i=0; i<NUM_RX_DESC; i++) { | 1180 | for (i=0; i<NUM_RX_DESC; i++) { |
@@ -1210,7 +1210,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1210 | 1210 | ||
1211 | lp->rxRingSize = NUM_RX_DESC; | 1211 | lp->rxRingSize = NUM_RX_DESC; |
1212 | lp->txRingSize = NUM_TX_DESC; | 1212 | lp->txRingSize = NUM_TX_DESC; |
1213 | 1213 | ||
1214 | /* Write the end of list marker to the descriptor lists */ | 1214 | /* Write the end of list marker to the descriptor lists */ |
1215 | lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER); | 1215 | lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER); |
1216 | lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER); | 1216 | lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER); |
@@ -1219,7 +1219,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1219 | outl(lp->dma_rings, DE4X5_RRBA); | 1219 | outl(lp->dma_rings, DE4X5_RRBA); |
1220 | outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), | 1220 | outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), |
1221 | DE4X5_TRBA); | 1221 | DE4X5_TRBA); |
1222 | 1222 | ||
1223 | /* Initialise the IRQ mask and Enable/Disable */ | 1223 | /* Initialise the IRQ mask and Enable/Disable */ |
1224 | lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM; | 1224 | lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM; |
1225 | lp->irq_en = IMR_NIM | IMR_AIM; | 1225 | lp->irq_en = IMR_NIM | IMR_AIM; |
@@ -1252,7 +1252,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1252 | if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) { | 1252 | if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) { |
1253 | mii_get_phy(dev); | 1253 | mii_get_phy(dev); |
1254 | } | 1254 | } |
1255 | 1255 | ||
1256 | #ifndef __sparc_v9__ | 1256 | #ifndef __sparc_v9__ |
1257 | printk(" and requires IRQ%d (provided by %s).\n", dev->irq, | 1257 | printk(" and requires IRQ%d (provided by %s).\n", dev->irq, |
1258 | #else | 1258 | #else |
@@ -1260,11 +1260,11 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1260 | #endif | 1260 | #endif |
1261 | ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); | 1261 | ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); |
1262 | } | 1262 | } |
1263 | 1263 | ||
1264 | if (de4x5_debug & DEBUG_VERSION) { | 1264 | if (de4x5_debug & DEBUG_VERSION) { |
1265 | printk(version); | 1265 | printk(version); |
1266 | } | 1266 | } |
1267 | 1267 | ||
1268 | /* The DE4X5-specific entries in the device structure. */ | 1268 | /* The DE4X5-specific entries in the device structure. */ |
1269 | SET_MODULE_OWNER(dev); | 1269 | SET_MODULE_OWNER(dev); |
1270 | SET_NETDEV_DEV(dev, gendev); | 1270 | SET_NETDEV_DEV(dev, gendev); |
@@ -1274,23 +1274,23 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | |||
1274 | dev->get_stats = &de4x5_get_stats; | 1274 | dev->get_stats = &de4x5_get_stats; |
1275 | dev->set_multicast_list = &set_multicast_list; | 1275 | dev->set_multicast_list = &set_multicast_list; |
1276 | dev->do_ioctl = &de4x5_ioctl; | 1276 | dev->do_ioctl = &de4x5_ioctl; |
1277 | 1277 | ||
1278 | dev->mem_start = 0; | 1278 | dev->mem_start = 0; |
1279 | 1279 | ||
1280 | /* Fill in the generic fields of the device structure. */ | 1280 | /* Fill in the generic fields of the device structure. */ |
1281 | if ((status = register_netdev (dev))) { | 1281 | if ((status = register_netdev (dev))) { |
1282 | dma_free_coherent (gendev, lp->dma_size, | 1282 | dma_free_coherent (gendev, lp->dma_size, |
1283 | lp->rx_ring, lp->dma_rings); | 1283 | lp->rx_ring, lp->dma_rings); |
1284 | return status; | 1284 | return status; |
1285 | } | 1285 | } |
1286 | 1286 | ||
1287 | /* Let the adapter sleep to save power */ | 1287 | /* Let the adapter sleep to save power */ |
1288 | yawn(dev, SLEEP); | 1288 | yawn(dev, SLEEP); |
1289 | 1289 | ||
1290 | return status; | 1290 | return status; |
1291 | } | 1291 | } |
1292 | 1292 | ||
1293 | 1293 | ||
1294 | static int | 1294 | static int |
1295 | de4x5_open(struct net_device *dev) | 1295 | de4x5_open(struct net_device *dev) |
1296 | { | 1296 | { |
@@ -1312,15 +1312,15 @@ de4x5_open(struct net_device *dev) | |||
1312 | */ | 1312 | */ |
1313 | yawn(dev, WAKEUP); | 1313 | yawn(dev, WAKEUP); |
1314 | 1314 | ||
1315 | /* | 1315 | /* |
1316 | ** Re-initialize the DE4X5... | 1316 | ** Re-initialize the DE4X5... |
1317 | */ | 1317 | */ |
1318 | status = de4x5_init(dev); | 1318 | status = de4x5_init(dev); |
1319 | spin_lock_init(&lp->lock); | 1319 | spin_lock_init(&lp->lock); |
1320 | lp->state = OPEN; | 1320 | lp->state = OPEN; |
1321 | de4x5_dbg_open(dev); | 1321 | de4x5_dbg_open(dev); |
1322 | 1322 | ||
1323 | if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ, | 1323 | if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ, |
1324 | lp->adapter_name, dev)) { | 1324 | lp->adapter_name, dev)) { |
1325 | printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq); | 1325 | printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq); |
1326 | if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ, | 1326 | if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ, |
@@ -1340,11 +1340,11 @@ de4x5_open(struct net_device *dev) | |||
1340 | 1340 | ||
1341 | lp->interrupt = UNMASK_INTERRUPTS; | 1341 | lp->interrupt = UNMASK_INTERRUPTS; |
1342 | dev->trans_start = jiffies; | 1342 | dev->trans_start = jiffies; |
1343 | 1343 | ||
1344 | START_DE4X5; | 1344 | START_DE4X5; |
1345 | 1345 | ||
1346 | de4x5_setup_intr(dev); | 1346 | de4x5_setup_intr(dev); |
1347 | 1347 | ||
1348 | if (de4x5_debug & DEBUG_OPEN) { | 1348 | if (de4x5_debug & DEBUG_OPEN) { |
1349 | printk("\tsts: 0x%08x\n", inl(DE4X5_STS)); | 1349 | printk("\tsts: 0x%08x\n", inl(DE4X5_STS)); |
1350 | printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR)); | 1350 | printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR)); |
@@ -1355,7 +1355,7 @@ de4x5_open(struct net_device *dev) | |||
1355 | printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR)); | 1355 | printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR)); |
1356 | printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR)); | 1356 | printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR)); |
1357 | } | 1357 | } |
1358 | 1358 | ||
1359 | return status; | 1359 | return status; |
1360 | } | 1360 | } |
1361 | 1361 | ||
@@ -1369,15 +1369,15 @@ de4x5_open(struct net_device *dev) | |||
1369 | */ | 1369 | */ |
1370 | static int | 1370 | static int |
1371 | de4x5_init(struct net_device *dev) | 1371 | de4x5_init(struct net_device *dev) |
1372 | { | 1372 | { |
1373 | /* Lock out other processes whilst setting up the hardware */ | 1373 | /* Lock out other processes whilst setting up the hardware */ |
1374 | netif_stop_queue(dev); | 1374 | netif_stop_queue(dev); |
1375 | 1375 | ||
1376 | de4x5_sw_reset(dev); | 1376 | de4x5_sw_reset(dev); |
1377 | 1377 | ||
1378 | /* Autoconfigure the connected port */ | 1378 | /* Autoconfigure the connected port */ |
1379 | autoconf_media(dev); | 1379 | autoconf_media(dev); |
1380 | 1380 | ||
1381 | return 0; | 1381 | return 0; |
1382 | } | 1382 | } |
1383 | 1383 | ||
@@ -1388,7 +1388,7 @@ de4x5_sw_reset(struct net_device *dev) | |||
1388 | u_long iobase = dev->base_addr; | 1388 | u_long iobase = dev->base_addr; |
1389 | int i, j, status = 0; | 1389 | int i, j, status = 0; |
1390 | s32 bmr, omr; | 1390 | s32 bmr, omr; |
1391 | 1391 | ||
1392 | /* Select the MII or SRL port now and RESET the MAC */ | 1392 | /* Select the MII or SRL port now and RESET the MAC */ |
1393 | if (!lp->useSROM) { | 1393 | if (!lp->useSROM) { |
1394 | if (lp->phy[lp->active].id != 0) { | 1394 | if (lp->phy[lp->active].id != 0) { |
@@ -1399,7 +1399,7 @@ de4x5_sw_reset(struct net_device *dev) | |||
1399 | de4x5_switch_mac_port(dev); | 1399 | de4x5_switch_mac_port(dev); |
1400 | } | 1400 | } |
1401 | 1401 | ||
1402 | /* | 1402 | /* |
1403 | ** Set the programmable burst length to 8 longwords for all the DC21140 | 1403 | ** Set the programmable burst length to 8 longwords for all the DC21140 |
1404 | ** Fasternet chips and 4 longwords for all others: DMA errors result | 1404 | ** Fasternet chips and 4 longwords for all others: DMA errors result |
1405 | ** without these values. Cache align 16 long. | 1405 | ** without these values. Cache align 16 long. |
@@ -1416,23 +1416,23 @@ de4x5_sw_reset(struct net_device *dev) | |||
1416 | outl(lp->dma_rings, DE4X5_RRBA); | 1416 | outl(lp->dma_rings, DE4X5_RRBA); |
1417 | outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), | 1417 | outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), |
1418 | DE4X5_TRBA); | 1418 | DE4X5_TRBA); |
1419 | 1419 | ||
1420 | lp->rx_new = lp->rx_old = 0; | 1420 | lp->rx_new = lp->rx_old = 0; |
1421 | lp->tx_new = lp->tx_old = 0; | 1421 | lp->tx_new = lp->tx_old = 0; |
1422 | 1422 | ||
1423 | for (i = 0; i < lp->rxRingSize; i++) { | 1423 | for (i = 0; i < lp->rxRingSize; i++) { |
1424 | lp->rx_ring[i].status = cpu_to_le32(R_OWN); | 1424 | lp->rx_ring[i].status = cpu_to_le32(R_OWN); |
1425 | } | 1425 | } |
1426 | 1426 | ||
1427 | for (i = 0; i < lp->txRingSize; i++) { | 1427 | for (i = 0; i < lp->txRingSize; i++) { |
1428 | lp->tx_ring[i].status = cpu_to_le32(0); | 1428 | lp->tx_ring[i].status = cpu_to_le32(0); |
1429 | } | 1429 | } |
1430 | 1430 | ||
1431 | barrier(); | 1431 | barrier(); |
1432 | 1432 | ||
1433 | /* Build the setup frame depending on filtering mode */ | 1433 | /* Build the setup frame depending on filtering mode */ |
1434 | SetMulticastFilter(dev); | 1434 | SetMulticastFilter(dev); |
1435 | 1435 | ||
1436 | load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1); | 1436 | load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1); |
1437 | outl(omr|OMR_ST, DE4X5_OMR); | 1437 | outl(omr|OMR_ST, DE4X5_OMR); |
1438 | 1438 | ||
@@ -1445,18 +1445,18 @@ de4x5_sw_reset(struct net_device *dev) | |||
1445 | outl(omr, DE4X5_OMR); /* Stop everything! */ | 1445 | outl(omr, DE4X5_OMR); /* Stop everything! */ |
1446 | 1446 | ||
1447 | if (j == 0) { | 1447 | if (j == 0) { |
1448 | printk("%s: Setup frame timed out, status %08x\n", dev->name, | 1448 | printk("%s: Setup frame timed out, status %08x\n", dev->name, |
1449 | inl(DE4X5_STS)); | 1449 | inl(DE4X5_STS)); |
1450 | status = -EIO; | 1450 | status = -EIO; |
1451 | } | 1451 | } |
1452 | 1452 | ||
1453 | lp->tx_new = (++lp->tx_new) % lp->txRingSize; | 1453 | lp->tx_new = (++lp->tx_new) % lp->txRingSize; |
1454 | lp->tx_old = lp->tx_new; | 1454 | lp->tx_old = lp->tx_new; |
1455 | 1455 | ||
1456 | return status; | 1456 | return status; |
1457 | } | 1457 | } |
1458 | 1458 | ||
1459 | /* | 1459 | /* |
1460 | ** Writes a socket buffer address to the next available transmit descriptor. | 1460 | ** Writes a socket buffer address to the next available transmit descriptor. |
1461 | */ | 1461 | */ |
1462 | static int | 1462 | static int |
@@ -1469,9 +1469,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) | |||
1469 | 1469 | ||
1470 | netif_stop_queue(dev); | 1470 | netif_stop_queue(dev); |
1471 | if (lp->tx_enable == NO) { /* Cannot send for now */ | 1471 | if (lp->tx_enable == NO) { /* Cannot send for now */ |
1472 | return -1; | 1472 | return -1; |
1473 | } | 1473 | } |
1474 | 1474 | ||
1475 | /* | 1475 | /* |
1476 | ** Clean out the TX ring asynchronously to interrupts - sometimes the | 1476 | ** Clean out the TX ring asynchronously to interrupts - sometimes the |
1477 | ** interrupts are lost by delayed descriptor status updates relative to | 1477 | ** interrupts are lost by delayed descriptor status updates relative to |
@@ -1482,7 +1482,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) | |||
1482 | spin_unlock_irqrestore(&lp->lock, flags); | 1482 | spin_unlock_irqrestore(&lp->lock, flags); |
1483 | 1483 | ||
1484 | /* Test if cache is already locked - requeue skb if so */ | 1484 | /* Test if cache is already locked - requeue skb if so */ |
1485 | if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt) | 1485 | if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt) |
1486 | return -1; | 1486 | return -1; |
1487 | 1487 | ||
1488 | /* Transmit descriptor ring full or stale skb */ | 1488 | /* Transmit descriptor ring full or stale skb */ |
@@ -1509,10 +1509,10 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) | |||
1509 | load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); | 1509 | load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); |
1510 | lp->stats.tx_bytes += skb->len; | 1510 | lp->stats.tx_bytes += skb->len; |
1511 | outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */ | 1511 | outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */ |
1512 | 1512 | ||
1513 | lp->tx_new = (++lp->tx_new) % lp->txRingSize; | 1513 | lp->tx_new = (++lp->tx_new) % lp->txRingSize; |
1514 | dev->trans_start = jiffies; | 1514 | dev->trans_start = jiffies; |
1515 | 1515 | ||
1516 | if (TX_BUFFS_AVAIL) { | 1516 | if (TX_BUFFS_AVAIL) { |
1517 | netif_start_queue(dev); /* Another pkt may be queued */ | 1517 | netif_start_queue(dev); /* Another pkt may be queued */ |
1518 | } | 1518 | } |
@@ -1521,15 +1521,15 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) | |||
1521 | } | 1521 | } |
1522 | if (skb) de4x5_putb_cache(dev, skb); | 1522 | if (skb) de4x5_putb_cache(dev, skb); |
1523 | } | 1523 | } |
1524 | 1524 | ||
1525 | lp->cache.lock = 0; | 1525 | lp->cache.lock = 0; |
1526 | 1526 | ||
1527 | return status; | 1527 | return status; |
1528 | } | 1528 | } |
1529 | 1529 | ||
1530 | /* | 1530 | /* |
1531 | ** The DE4X5 interrupt handler. | 1531 | ** The DE4X5 interrupt handler. |
1532 | ** | 1532 | ** |
1533 | ** I/O Read/Writes through intermediate PCI bridges are never 'posted', | 1533 | ** I/O Read/Writes through intermediate PCI bridges are never 'posted', |
1534 | ** so that the asserted interrupt always has some real data to work with - | 1534 | ** so that the asserted interrupt always has some real data to work with - |
1535 | ** if these I/O accesses are ever changed to memory accesses, ensure the | 1535 | ** if these I/O accesses are ever changed to memory accesses, ensure the |
@@ -1546,7 +1546,7 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
1546 | s32 imr, omr, sts, limit; | 1546 | s32 imr, omr, sts, limit; |
1547 | u_long iobase; | 1547 | u_long iobase; |
1548 | unsigned int handled = 0; | 1548 | unsigned int handled = 0; |
1549 | 1549 | ||
1550 | if (dev == NULL) { | 1550 | if (dev == NULL) { |
1551 | printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq); | 1551 | printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq); |
1552 | return IRQ_NONE; | 1552 | return IRQ_NONE; |
@@ -1554,35 +1554,35 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
1554 | lp = netdev_priv(dev); | 1554 | lp = netdev_priv(dev); |
1555 | spin_lock(&lp->lock); | 1555 | spin_lock(&lp->lock); |
1556 | iobase = dev->base_addr; | 1556 | iobase = dev->base_addr; |
1557 | 1557 | ||
1558 | DISABLE_IRQs; /* Ensure non re-entrancy */ | 1558 | DISABLE_IRQs; /* Ensure non re-entrancy */ |
1559 | 1559 | ||
1560 | if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt)) | 1560 | if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt)) |
1561 | printk("%s: Re-entering the interrupt handler.\n", dev->name); | 1561 | printk("%s: Re-entering the interrupt handler.\n", dev->name); |
1562 | 1562 | ||
1563 | synchronize_irq(dev->irq); | 1563 | synchronize_irq(dev->irq); |
1564 | 1564 | ||
1565 | for (limit=0; limit<8; limit++) { | 1565 | for (limit=0; limit<8; limit++) { |
1566 | sts = inl(DE4X5_STS); /* Read IRQ status */ | 1566 | sts = inl(DE4X5_STS); /* Read IRQ status */ |
1567 | outl(sts, DE4X5_STS); /* Reset the board interrupts */ | 1567 | outl(sts, DE4X5_STS); /* Reset the board interrupts */ |
1568 | 1568 | ||
1569 | if (!(sts & lp->irq_mask)) break;/* All done */ | 1569 | if (!(sts & lp->irq_mask)) break;/* All done */ |
1570 | handled = 1; | 1570 | handled = 1; |
1571 | 1571 | ||
1572 | if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */ | 1572 | if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */ |
1573 | de4x5_rx(dev); | 1573 | de4x5_rx(dev); |
1574 | 1574 | ||
1575 | if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */ | 1575 | if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */ |
1576 | de4x5_tx(dev); | 1576 | de4x5_tx(dev); |
1577 | 1577 | ||
1578 | if (sts & STS_LNF) { /* TP Link has failed */ | 1578 | if (sts & STS_LNF) { /* TP Link has failed */ |
1579 | lp->irq_mask &= ~IMR_LFM; | 1579 | lp->irq_mask &= ~IMR_LFM; |
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | if (sts & STS_UNF) { /* Transmit underrun */ | 1582 | if (sts & STS_UNF) { /* Transmit underrun */ |
1583 | de4x5_txur(dev); | 1583 | de4x5_txur(dev); |
1584 | } | 1584 | } |
1585 | 1585 | ||
1586 | if (sts & STS_SE) { /* Bus Error */ | 1586 | if (sts & STS_SE) { /* Bus Error */ |
1587 | STOP_DE4X5; | 1587 | STOP_DE4X5; |
1588 | printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n", | 1588 | printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n", |
@@ -1603,7 +1603,7 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
1603 | lp->interrupt = UNMASK_INTERRUPTS; | 1603 | lp->interrupt = UNMASK_INTERRUPTS; |
1604 | ENABLE_IRQs; | 1604 | ENABLE_IRQs; |
1605 | spin_unlock(&lp->lock); | 1605 | spin_unlock(&lp->lock); |
1606 | 1606 | ||
1607 | return IRQ_RETVAL(handled); | 1607 | return IRQ_RETVAL(handled); |
1608 | } | 1608 | } |
1609 | 1609 | ||
@@ -1614,11 +1614,11 @@ de4x5_rx(struct net_device *dev) | |||
1614 | u_long iobase = dev->base_addr; | 1614 | u_long iobase = dev->base_addr; |
1615 | int entry; | 1615 | int entry; |
1616 | s32 status; | 1616 | s32 status; |
1617 | 1617 | ||
1618 | for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0; | 1618 | for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0; |
1619 | entry=lp->rx_new) { | 1619 | entry=lp->rx_new) { |
1620 | status = (s32)le32_to_cpu(lp->rx_ring[entry].status); | 1620 | status = (s32)le32_to_cpu(lp->rx_ring[entry].status); |
1621 | 1621 | ||
1622 | if (lp->rx_ovf) { | 1622 | if (lp->rx_ovf) { |
1623 | if (inl(DE4X5_MFC) & MFC_FOCM) { | 1623 | if (inl(DE4X5_MFC) & MFC_FOCM) { |
1624 | de4x5_rx_ovfc(dev); | 1624 | de4x5_rx_ovfc(dev); |
@@ -1629,7 +1629,7 @@ de4x5_rx(struct net_device *dev) | |||
1629 | if (status & RD_FS) { /* Remember the start of frame */ | 1629 | if (status & RD_FS) { /* Remember the start of frame */ |
1630 | lp->rx_old = entry; | 1630 | lp->rx_old = entry; |
1631 | } | 1631 | } |
1632 | 1632 | ||
1633 | if (status & RD_LS) { /* Valid frame status */ | 1633 | if (status & RD_LS) { /* Valid frame status */ |
1634 | if (lp->tx_enable) lp->linkOK++; | 1634 | if (lp->tx_enable) lp->linkOK++; |
1635 | if (status & RD_ES) { /* There was an error. */ | 1635 | if (status & RD_ES) { /* There was an error. */ |
@@ -1646,9 +1646,9 @@ de4x5_rx(struct net_device *dev) | |||
1646 | struct sk_buff *skb; | 1646 | struct sk_buff *skb; |
1647 | short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status) | 1647 | short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status) |
1648 | >> 16) - 4; | 1648 | >> 16) - 4; |
1649 | 1649 | ||
1650 | if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) { | 1650 | if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) { |
1651 | printk("%s: Insufficient memory; nuking packet.\n", | 1651 | printk("%s: Insufficient memory; nuking packet.\n", |
1652 | dev->name); | 1652 | dev->name); |
1653 | lp->stats.rx_dropped++; | 1653 | lp->stats.rx_dropped++; |
1654 | } else { | 1654 | } else { |
@@ -1658,14 +1658,14 @@ de4x5_rx(struct net_device *dev) | |||
1658 | skb->protocol=eth_type_trans(skb,dev); | 1658 | skb->protocol=eth_type_trans(skb,dev); |
1659 | de4x5_local_stats(dev, skb->data, pkt_len); | 1659 | de4x5_local_stats(dev, skb->data, pkt_len); |
1660 | netif_rx(skb); | 1660 | netif_rx(skb); |
1661 | 1661 | ||
1662 | /* Update stats */ | 1662 | /* Update stats */ |
1663 | dev->last_rx = jiffies; | 1663 | dev->last_rx = jiffies; |
1664 | lp->stats.rx_packets++; | 1664 | lp->stats.rx_packets++; |
1665 | lp->stats.rx_bytes += pkt_len; | 1665 | lp->stats.rx_bytes += pkt_len; |
1666 | } | 1666 | } |
1667 | } | 1667 | } |
1668 | 1668 | ||
1669 | /* Change buffer ownership for this frame, back to the adapter */ | 1669 | /* Change buffer ownership for this frame, back to the adapter */ |
1670 | for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) { | 1670 | for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) { |
1671 | lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN); | 1671 | lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN); |
@@ -1674,13 +1674,13 @@ de4x5_rx(struct net_device *dev) | |||
1674 | lp->rx_ring[entry].status = cpu_to_le32(R_OWN); | 1674 | lp->rx_ring[entry].status = cpu_to_le32(R_OWN); |
1675 | barrier(); | 1675 | barrier(); |
1676 | } | 1676 | } |
1677 | 1677 | ||
1678 | /* | 1678 | /* |
1679 | ** Update entry information | 1679 | ** Update entry information |
1680 | */ | 1680 | */ |
1681 | lp->rx_new = (++lp->rx_new) % lp->rxRingSize; | 1681 | lp->rx_new = (++lp->rx_new) % lp->rxRingSize; |
1682 | } | 1682 | } |
1683 | 1683 | ||
1684 | return 0; | 1684 | return 0; |
1685 | } | 1685 | } |
1686 | 1686 | ||
@@ -1705,20 +1705,20 @@ de4x5_tx(struct net_device *dev) | |||
1705 | u_long iobase = dev->base_addr; | 1705 | u_long iobase = dev->base_addr; |
1706 | int entry; | 1706 | int entry; |
1707 | s32 status; | 1707 | s32 status; |
1708 | 1708 | ||
1709 | for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) { | 1709 | for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) { |
1710 | status = (s32)le32_to_cpu(lp->tx_ring[entry].status); | 1710 | status = (s32)le32_to_cpu(lp->tx_ring[entry].status); |
1711 | if (status < 0) { /* Buffer not sent yet */ | 1711 | if (status < 0) { /* Buffer not sent yet */ |
1712 | break; | 1712 | break; |
1713 | } else if (status != 0x7fffffff) { /* Not setup frame */ | 1713 | } else if (status != 0x7fffffff) { /* Not setup frame */ |
1714 | if (status & TD_ES) { /* An error happened */ | 1714 | if (status & TD_ES) { /* An error happened */ |
1715 | lp->stats.tx_errors++; | 1715 | lp->stats.tx_errors++; |
1716 | if (status & TD_NC) lp->stats.tx_carrier_errors++; | 1716 | if (status & TD_NC) lp->stats.tx_carrier_errors++; |
1717 | if (status & TD_LC) lp->stats.tx_window_errors++; | 1717 | if (status & TD_LC) lp->stats.tx_window_errors++; |
1718 | if (status & TD_UF) lp->stats.tx_fifo_errors++; | 1718 | if (status & TD_UF) lp->stats.tx_fifo_errors++; |
1719 | if (status & TD_EC) lp->pktStats.excessive_collisions++; | 1719 | if (status & TD_EC) lp->pktStats.excessive_collisions++; |
1720 | if (status & TD_DE) lp->stats.tx_aborted_errors++; | 1720 | if (status & TD_DE) lp->stats.tx_aborted_errors++; |
1721 | 1721 | ||
1722 | if (TX_PKT_PENDING) { | 1722 | if (TX_PKT_PENDING) { |
1723 | outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */ | 1723 | outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */ |
1724 | } | 1724 | } |
@@ -1727,14 +1727,14 @@ de4x5_tx(struct net_device *dev) | |||
1727 | if (lp->tx_enable) lp->linkOK++; | 1727 | if (lp->tx_enable) lp->linkOK++; |
1728 | } | 1728 | } |
1729 | /* Update the collision counter */ | 1729 | /* Update the collision counter */ |
1730 | lp->stats.collisions += ((status & TD_EC) ? 16 : | 1730 | lp->stats.collisions += ((status & TD_EC) ? 16 : |
1731 | ((status & TD_CC) >> 3)); | 1731 | ((status & TD_CC) >> 3)); |
1732 | 1732 | ||
1733 | /* Free the buffer. */ | 1733 | /* Free the buffer. */ |
1734 | if (lp->tx_skb[entry] != NULL) | 1734 | if (lp->tx_skb[entry] != NULL) |
1735 | de4x5_free_tx_buff(lp, entry); | 1735 | de4x5_free_tx_buff(lp, entry); |
1736 | } | 1736 | } |
1737 | 1737 | ||
1738 | /* Update all the pointers */ | 1738 | /* Update all the pointers */ |
1739 | lp->tx_old = (++lp->tx_old) % lp->txRingSize; | 1739 | lp->tx_old = (++lp->tx_old) % lp->txRingSize; |
1740 | } | 1740 | } |
@@ -1746,7 +1746,7 @@ de4x5_tx(struct net_device *dev) | |||
1746 | else | 1746 | else |
1747 | netif_start_queue(dev); | 1747 | netif_start_queue(dev); |
1748 | } | 1748 | } |
1749 | 1749 | ||
1750 | return 0; | 1750 | return 0; |
1751 | } | 1751 | } |
1752 | 1752 | ||
@@ -1755,9 +1755,9 @@ de4x5_ast(struct net_device *dev) | |||
1755 | { | 1755 | { |
1756 | struct de4x5_private *lp = netdev_priv(dev); | 1756 | struct de4x5_private *lp = netdev_priv(dev); |
1757 | int next_tick = DE4X5_AUTOSENSE_MS; | 1757 | int next_tick = DE4X5_AUTOSENSE_MS; |
1758 | 1758 | ||
1759 | disable_ast(dev); | 1759 | disable_ast(dev); |
1760 | 1760 | ||
1761 | if (lp->useSROM) { | 1761 | if (lp->useSROM) { |
1762 | next_tick = srom_autoconf(dev); | 1762 | next_tick = srom_autoconf(dev); |
1763 | } else if (lp->chipset == DC21140) { | 1763 | } else if (lp->chipset == DC21140) { |
@@ -1769,7 +1769,7 @@ de4x5_ast(struct net_device *dev) | |||
1769 | } | 1769 | } |
1770 | lp->linkOK = 0; | 1770 | lp->linkOK = 0; |
1771 | enable_ast(dev, next_tick); | 1771 | enable_ast(dev, next_tick); |
1772 | 1772 | ||
1773 | return 0; | 1773 | return 0; |
1774 | } | 1774 | } |
1775 | 1775 | ||
@@ -1792,11 +1792,11 @@ de4x5_txur(struct net_device *dev) | |||
1792 | } | 1792 | } |
1793 | outl(omr | OMR_ST | OMR_SR, DE4X5_OMR); | 1793 | outl(omr | OMR_ST | OMR_SR, DE4X5_OMR); |
1794 | } | 1794 | } |
1795 | 1795 | ||
1796 | return 0; | 1796 | return 0; |
1797 | } | 1797 | } |
1798 | 1798 | ||
1799 | static int | 1799 | static int |
1800 | de4x5_rx_ovfc(struct net_device *dev) | 1800 | de4x5_rx_ovfc(struct net_device *dev) |
1801 | { | 1801 | { |
1802 | struct de4x5_private *lp = netdev_priv(dev); | 1802 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -1813,7 +1813,7 @@ de4x5_rx_ovfc(struct net_device *dev) | |||
1813 | } | 1813 | } |
1814 | 1814 | ||
1815 | outl(omr, DE4X5_OMR); | 1815 | outl(omr, DE4X5_OMR); |
1816 | 1816 | ||
1817 | return 0; | 1817 | return 0; |
1818 | } | 1818 | } |
1819 | 1819 | ||
@@ -1823,22 +1823,22 @@ de4x5_close(struct net_device *dev) | |||
1823 | struct de4x5_private *lp = netdev_priv(dev); | 1823 | struct de4x5_private *lp = netdev_priv(dev); |
1824 | u_long iobase = dev->base_addr; | 1824 | u_long iobase = dev->base_addr; |
1825 | s32 imr, omr; | 1825 | s32 imr, omr; |
1826 | 1826 | ||
1827 | disable_ast(dev); | 1827 | disable_ast(dev); |
1828 | 1828 | ||
1829 | netif_stop_queue(dev); | 1829 | netif_stop_queue(dev); |
1830 | 1830 | ||
1831 | if (de4x5_debug & DEBUG_CLOSE) { | 1831 | if (de4x5_debug & DEBUG_CLOSE) { |
1832 | printk("%s: Shutting down ethercard, status was %8.8x.\n", | 1832 | printk("%s: Shutting down ethercard, status was %8.8x.\n", |
1833 | dev->name, inl(DE4X5_STS)); | 1833 | dev->name, inl(DE4X5_STS)); |
1834 | } | 1834 | } |
1835 | 1835 | ||
1836 | /* | 1836 | /* |
1837 | ** We stop the DE4X5 here... mask interrupts and stop TX & RX | 1837 | ** We stop the DE4X5 here... mask interrupts and stop TX & RX |
1838 | */ | 1838 | */ |
1839 | DISABLE_IRQs; | 1839 | DISABLE_IRQs; |
1840 | STOP_DE4X5; | 1840 | STOP_DE4X5; |
1841 | 1841 | ||
1842 | /* Free the associated irq */ | 1842 | /* Free the associated irq */ |
1843 | free_irq(dev->irq, dev); | 1843 | free_irq(dev->irq, dev); |
1844 | lp->state = CLOSED; | 1844 | lp->state = CLOSED; |
@@ -1846,10 +1846,10 @@ de4x5_close(struct net_device *dev) | |||
1846 | /* Free any socket buffers */ | 1846 | /* Free any socket buffers */ |
1847 | de4x5_free_rx_buffs(dev); | 1847 | de4x5_free_rx_buffs(dev); |
1848 | de4x5_free_tx_buffs(dev); | 1848 | de4x5_free_tx_buffs(dev); |
1849 | 1849 | ||
1850 | /* Put the adapter to sleep to save power */ | 1850 | /* Put the adapter to sleep to save power */ |
1851 | yawn(dev, SLEEP); | 1851 | yawn(dev, SLEEP); |
1852 | 1852 | ||
1853 | return 0; | 1853 | return 0; |
1854 | } | 1854 | } |
1855 | 1855 | ||
@@ -1858,9 +1858,9 @@ de4x5_get_stats(struct net_device *dev) | |||
1858 | { | 1858 | { |
1859 | struct de4x5_private *lp = netdev_priv(dev); | 1859 | struct de4x5_private *lp = netdev_priv(dev); |
1860 | u_long iobase = dev->base_addr; | 1860 | u_long iobase = dev->base_addr; |
1861 | 1861 | ||
1862 | lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR)); | 1862 | lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR)); |
1863 | 1863 | ||
1864 | return &lp->stats; | 1864 | return &lp->stats; |
1865 | } | 1865 | } |
1866 | 1866 | ||
@@ -1886,7 +1886,7 @@ de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len) | |||
1886 | (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) { | 1886 | (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) { |
1887 | lp->pktStats.unicast++; | 1887 | lp->pktStats.unicast++; |
1888 | } | 1888 | } |
1889 | 1889 | ||
1890 | lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ | 1890 | lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ |
1891 | if (lp->pktStats.bins[0] == 0) { /* Reset counters */ | 1891 | if (lp->pktStats.bins[0] == 0) { /* Reset counters */ |
1892 | memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats)); | 1892 | memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats)); |
@@ -1937,11 +1937,11 @@ set_multicast_list(struct net_device *dev) | |||
1937 | omr = inl(DE4X5_OMR); | 1937 | omr = inl(DE4X5_OMR); |
1938 | omr |= OMR_PR; | 1938 | omr |= OMR_PR; |
1939 | outl(omr, DE4X5_OMR); | 1939 | outl(omr, DE4X5_OMR); |
1940 | } else { | 1940 | } else { |
1941 | SetMulticastFilter(dev); | 1941 | SetMulticastFilter(dev); |
1942 | load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | | 1942 | load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | |
1943 | SETUP_FRAME_LEN, (struct sk_buff *)1); | 1943 | SETUP_FRAME_LEN, (struct sk_buff *)1); |
1944 | 1944 | ||
1945 | lp->tx_new = (++lp->tx_new) % lp->txRingSize; | 1945 | lp->tx_new = (++lp->tx_new) % lp->txRingSize; |
1946 | outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ | 1946 | outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ |
1947 | dev->trans_start = jiffies; | 1947 | dev->trans_start = jiffies; |
@@ -1969,20 +1969,20 @@ SetMulticastFilter(struct net_device *dev) | |||
1969 | omr = inl(DE4X5_OMR); | 1969 | omr = inl(DE4X5_OMR); |
1970 | omr &= ~(OMR_PR | OMR_PM); | 1970 | omr &= ~(OMR_PR | OMR_PM); |
1971 | pa = build_setup_frame(dev, ALL); /* Build the basic frame */ | 1971 | pa = build_setup_frame(dev, ALL); /* Build the basic frame */ |
1972 | 1972 | ||
1973 | if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) { | 1973 | if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) { |
1974 | omr |= OMR_PM; /* Pass all multicasts */ | 1974 | omr |= OMR_PM; /* Pass all multicasts */ |
1975 | } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ | 1975 | } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ |
1976 | for (i=0;i<dev->mc_count;i++) { /* for each address in the list */ | 1976 | for (i=0;i<dev->mc_count;i++) { /* for each address in the list */ |
1977 | addrs=dmi->dmi_addr; | 1977 | addrs=dmi->dmi_addr; |
1978 | dmi=dmi->next; | 1978 | dmi=dmi->next; |
1979 | if ((*addrs & 0x01) == 1) { /* multicast address? */ | 1979 | if ((*addrs & 0x01) == 1) { /* multicast address? */ |
1980 | crc = ether_crc_le(ETH_ALEN, addrs); | 1980 | crc = ether_crc_le(ETH_ALEN, addrs); |
1981 | hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ | 1981 | hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ |
1982 | 1982 | ||
1983 | byte = hashcode >> 3; /* bit[3-8] -> byte in filter */ | 1983 | byte = hashcode >> 3; /* bit[3-8] -> byte in filter */ |
1984 | bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */ | 1984 | bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */ |
1985 | 1985 | ||
1986 | byte <<= 1; /* calc offset into setup frame */ | 1986 | byte <<= 1; /* calc offset into setup frame */ |
1987 | if (byte & 0x02) { | 1987 | if (byte & 0x02) { |
1988 | byte -= 1; | 1988 | byte -= 1; |
@@ -1994,14 +1994,14 @@ SetMulticastFilter(struct net_device *dev) | |||
1994 | for (j=0; j<dev->mc_count; j++) { | 1994 | for (j=0; j<dev->mc_count; j++) { |
1995 | addrs=dmi->dmi_addr; | 1995 | addrs=dmi->dmi_addr; |
1996 | dmi=dmi->next; | 1996 | dmi=dmi->next; |
1997 | for (i=0; i<ETH_ALEN; i++) { | 1997 | for (i=0; i<ETH_ALEN; i++) { |
1998 | *(pa + (i&1)) = *addrs++; | 1998 | *(pa + (i&1)) = *addrs++; |
1999 | if (i & 0x01) pa += 4; | 1999 | if (i & 0x01) pa += 4; |
2000 | } | 2000 | } |
2001 | } | 2001 | } |
2002 | } | 2002 | } |
2003 | outl(omr, DE4X5_OMR); | 2003 | outl(omr, DE4X5_OMR); |
2004 | 2004 | ||
2005 | return; | 2005 | return; |
2006 | } | 2006 | } |
2007 | 2007 | ||
@@ -2031,18 +2031,18 @@ static int __init de4x5_eisa_probe (struct device *gendev) | |||
2031 | status = -EBUSY; | 2031 | status = -EBUSY; |
2032 | goto release_reg_1; | 2032 | goto release_reg_1; |
2033 | } | 2033 | } |
2034 | 2034 | ||
2035 | if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) { | 2035 | if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) { |
2036 | status = -ENOMEM; | 2036 | status = -ENOMEM; |
2037 | goto release_reg_2; | 2037 | goto release_reg_2; |
2038 | } | 2038 | } |
2039 | lp = netdev_priv(dev); | 2039 | lp = netdev_priv(dev); |
2040 | 2040 | ||
2041 | cfid = (u32) inl(PCI_CFID); | 2041 | cfid = (u32) inl(PCI_CFID); |
2042 | lp->cfrv = (u_short) inl(PCI_CFRV); | 2042 | lp->cfrv = (u_short) inl(PCI_CFRV); |
2043 | device = (cfid >> 8) & 0x00ffff00; | 2043 | device = (cfid >> 8) & 0x00ffff00; |
2044 | vendor = (u_short) cfid; | 2044 | vendor = (u_short) cfid; |
2045 | 2045 | ||
2046 | /* Read the EISA Configuration Registers */ | 2046 | /* Read the EISA Configuration Registers */ |
2047 | regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT); | 2047 | regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT); |
2048 | #ifdef CONFIG_ALPHA | 2048 | #ifdef CONFIG_ALPHA |
@@ -2050,7 +2050,7 @@ static int __init de4x5_eisa_probe (struct device *gendev) | |||
2050 | * care about the EISA configuration, and thus doesn't | 2050 | * care about the EISA configuration, and thus doesn't |
2051 | * configure the PLX bridge properly. Oh well... Simply mimic | 2051 | * configure the PLX bridge properly. Oh well... Simply mimic |
2052 | * the EISA config file to sort it out. */ | 2052 | * the EISA config file to sort it out. */ |
2053 | 2053 | ||
2054 | /* EISA REG1: Assert DecChip 21040 HW Reset */ | 2054 | /* EISA REG1: Assert DecChip 21040 HW Reset */ |
2055 | outb (ER1_IAM | 1, EISA_REG1); | 2055 | outb (ER1_IAM | 1, EISA_REG1); |
2056 | mdelay (1); | 2056 | mdelay (1); |
@@ -2061,12 +2061,12 @@ static int __init de4x5_eisa_probe (struct device *gendev) | |||
2061 | 2061 | ||
2062 | /* EISA REG3: R/W Burst Transfer Enable */ | 2062 | /* EISA REG3: R/W Burst Transfer Enable */ |
2063 | outb (ER3_BWE | ER3_BRE, EISA_REG3); | 2063 | outb (ER3_BWE | ER3_BRE, EISA_REG3); |
2064 | 2064 | ||
2065 | /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */ | 2065 | /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */ |
2066 | outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0); | 2066 | outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0); |
2067 | #endif | 2067 | #endif |
2068 | irq = de4x5_irq[(regval >> 1) & 0x03]; | 2068 | irq = de4x5_irq[(regval >> 1) & 0x03]; |
2069 | 2069 | ||
2070 | if (is_DC2114x) { | 2070 | if (is_DC2114x) { |
2071 | device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); | 2071 | device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); |
2072 | } | 2072 | } |
@@ -2077,7 +2077,7 @@ static int __init de4x5_eisa_probe (struct device *gendev) | |||
2077 | outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS); | 2077 | outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS); |
2078 | outl(0x00006000, PCI_CFLT); | 2078 | outl(0x00006000, PCI_CFLT); |
2079 | outl(iobase, PCI_CBIO); | 2079 | outl(iobase, PCI_CBIO); |
2080 | 2080 | ||
2081 | DevicePresent(dev, EISA_APROM); | 2081 | DevicePresent(dev, EISA_APROM); |
2082 | 2082 | ||
2083 | dev->irq = irq; | 2083 | dev->irq = irq; |
@@ -2102,7 +2102,7 @@ static int __devexit de4x5_eisa_remove (struct device *device) | |||
2102 | 2102 | ||
2103 | dev = device->driver_data; | 2103 | dev = device->driver_data; |
2104 | iobase = dev->base_addr; | 2104 | iobase = dev->base_addr; |
2105 | 2105 | ||
2106 | unregister_netdev (dev); | 2106 | unregister_netdev (dev); |
2107 | free_netdev (dev); | 2107 | free_netdev (dev); |
2108 | release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE); | 2108 | release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE); |
@@ -2131,11 +2131,11 @@ MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids); | |||
2131 | 2131 | ||
2132 | /* | 2132 | /* |
2133 | ** This function searches the current bus (which is >0) for a DECchip with an | 2133 | ** This function searches the current bus (which is >0) for a DECchip with an |
2134 | ** SROM, so that in multiport cards that have one SROM shared between multiple | 2134 | ** SROM, so that in multiport cards that have one SROM shared between multiple |
2135 | ** DECchips, we can find the base SROM irrespective of the BIOS scan direction. | 2135 | ** DECchips, we can find the base SROM irrespective of the BIOS scan direction. |
2136 | ** For single port cards this is a time waster... | 2136 | ** For single port cards this is a time waster... |
2137 | */ | 2137 | */ |
2138 | static void __devinit | 2138 | static void __devinit |
2139 | srom_search(struct net_device *dev, struct pci_dev *pdev) | 2139 | srom_search(struct net_device *dev, struct pci_dev *pdev) |
2140 | { | 2140 | { |
2141 | u_char pb; | 2141 | u_char pb; |
@@ -2163,7 +2163,7 @@ srom_search(struct net_device *dev, struct pci_dev *pdev) | |||
2163 | /* Set the device number information */ | 2163 | /* Set the device number information */ |
2164 | lp->device = PCI_SLOT(this_dev->devfn); | 2164 | lp->device = PCI_SLOT(this_dev->devfn); |
2165 | lp->bus_num = pb; | 2165 | lp->bus_num = pb; |
2166 | 2166 | ||
2167 | /* Set the chipset information */ | 2167 | /* Set the chipset information */ |
2168 | if (is_DC2114x) { | 2168 | if (is_DC2114x) { |
2169 | device = ((cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); | 2169 | device = ((cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); |
@@ -2176,7 +2176,7 @@ srom_search(struct net_device *dev, struct pci_dev *pdev) | |||
2176 | /* Fetch the IRQ to be used */ | 2176 | /* Fetch the IRQ to be used */ |
2177 | irq = this_dev->irq; | 2177 | irq = this_dev->irq; |
2178 | if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue; | 2178 | if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue; |
2179 | 2179 | ||
2180 | /* Check if I/O accesses are enabled */ | 2180 | /* Check if I/O accesses are enabled */ |
2181 | pci_read_config_word(this_dev, PCI_COMMAND, &status); | 2181 | pci_read_config_word(this_dev, PCI_COMMAND, &status); |
2182 | if (!(status & PCI_COMMAND_IO)) continue; | 2182 | if (!(status & PCI_COMMAND_IO)) continue; |
@@ -2254,7 +2254,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev, | |||
2254 | lp = netdev_priv(dev); | 2254 | lp = netdev_priv(dev); |
2255 | lp->bus = PCI; | 2255 | lp->bus = PCI; |
2256 | lp->bus_num = 0; | 2256 | lp->bus_num = 0; |
2257 | 2257 | ||
2258 | /* Search for an SROM on this bus */ | 2258 | /* Search for an SROM on this bus */ |
2259 | if (lp->bus_num != pb) { | 2259 | if (lp->bus_num != pb) { |
2260 | lp->bus_num = pb; | 2260 | lp->bus_num = pb; |
@@ -2267,7 +2267,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev, | |||
2267 | /* Set the device number information */ | 2267 | /* Set the device number information */ |
2268 | lp->device = dev_num; | 2268 | lp->device = dev_num; |
2269 | lp->bus_num = pb; | 2269 | lp->bus_num = pb; |
2270 | 2270 | ||
2271 | /* Set the chipset information */ | 2271 | /* Set the chipset information */ |
2272 | if (is_DC2114x) { | 2272 | if (is_DC2114x) { |
2273 | device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); | 2273 | device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); |
@@ -2283,7 +2283,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev, | |||
2283 | error = -ENODEV; | 2283 | error = -ENODEV; |
2284 | goto free_dev; | 2284 | goto free_dev; |
2285 | } | 2285 | } |
2286 | 2286 | ||
2287 | /* Check if I/O accesses and Bus Mastering are enabled */ | 2287 | /* Check if I/O accesses and Bus Mastering are enabled */ |
2288 | pci_read_config_word(pdev, PCI_COMMAND, &status); | 2288 | pci_read_config_word(pdev, PCI_COMMAND, &status); |
2289 | #ifdef __powerpc__ | 2289 | #ifdef __powerpc__ |
@@ -2322,7 +2322,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev, | |||
2322 | } | 2322 | } |
2323 | 2323 | ||
2324 | dev->irq = irq; | 2324 | dev->irq = irq; |
2325 | 2325 | ||
2326 | if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) { | 2326 | if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) { |
2327 | goto release; | 2327 | goto release; |
2328 | } | 2328 | } |
@@ -2377,7 +2377,7 @@ static struct pci_driver de4x5_pci_driver = { | |||
2377 | ** Auto configure the media here rather than setting the port at compile | 2377 | ** Auto configure the media here rather than setting the port at compile |
2378 | ** time. This routine is called by de4x5_init() and when a loss of media is | 2378 | ** time. This routine is called by de4x5_init() and when a loss of media is |
2379 | ** detected (excessive collisions, loss of carrier, no carrier or link fail | 2379 | ** detected (excessive collisions, loss of carrier, no carrier or link fail |
2380 | ** [TP] or no recent receive activity) to check whether the user has been | 2380 | ** [TP] or no recent receive activity) to check whether the user has been |
2381 | ** sneaky and changed the port on us. | 2381 | ** sneaky and changed the port on us. |
2382 | */ | 2382 | */ |
2383 | static int | 2383 | static int |
@@ -2405,7 +2405,7 @@ autoconf_media(struct net_device *dev) | |||
2405 | } | 2405 | } |
2406 | 2406 | ||
2407 | enable_ast(dev, next_tick); | 2407 | enable_ast(dev, next_tick); |
2408 | 2408 | ||
2409 | return (lp->media); | 2409 | return (lp->media); |
2410 | } | 2410 | } |
2411 | 2411 | ||
@@ -2428,7 +2428,7 @@ dc21040_autoconf(struct net_device *dev) | |||
2428 | u_long iobase = dev->base_addr; | 2428 | u_long iobase = dev->base_addr; |
2429 | int next_tick = DE4X5_AUTOSENSE_MS; | 2429 | int next_tick = DE4X5_AUTOSENSE_MS; |
2430 | s32 imr; | 2430 | s32 imr; |
2431 | 2431 | ||
2432 | switch (lp->media) { | 2432 | switch (lp->media) { |
2433 | case INIT: | 2433 | case INIT: |
2434 | DISABLE_IRQs; | 2434 | DISABLE_IRQs; |
@@ -2447,36 +2447,36 @@ dc21040_autoconf(struct net_device *dev) | |||
2447 | lp->local_state = 0; | 2447 | lp->local_state = 0; |
2448 | next_tick = dc21040_autoconf(dev); | 2448 | next_tick = dc21040_autoconf(dev); |
2449 | break; | 2449 | break; |
2450 | 2450 | ||
2451 | case TP: | 2451 | case TP: |
2452 | next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI, | 2452 | next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI, |
2453 | TP_SUSPECT, test_tp); | 2453 | TP_SUSPECT, test_tp); |
2454 | break; | 2454 | break; |
2455 | 2455 | ||
2456 | case TP_SUSPECT: | 2456 | case TP_SUSPECT: |
2457 | next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf); | 2457 | next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf); |
2458 | break; | 2458 | break; |
2459 | 2459 | ||
2460 | case BNC: | 2460 | case BNC: |
2461 | case AUI: | 2461 | case AUI: |
2462 | case BNC_AUI: | 2462 | case BNC_AUI: |
2463 | next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA, | 2463 | next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA, |
2464 | BNC_AUI_SUSPECT, ping_media); | 2464 | BNC_AUI_SUSPECT, ping_media); |
2465 | break; | 2465 | break; |
2466 | 2466 | ||
2467 | case BNC_AUI_SUSPECT: | 2467 | case BNC_AUI_SUSPECT: |
2468 | next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf); | 2468 | next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf); |
2469 | break; | 2469 | break; |
2470 | 2470 | ||
2471 | case EXT_SIA: | 2471 | case EXT_SIA: |
2472 | next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000, | 2472 | next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000, |
2473 | NC, EXT_SIA_SUSPECT, ping_media); | 2473 | NC, EXT_SIA_SUSPECT, ping_media); |
2474 | break; | 2474 | break; |
2475 | 2475 | ||
2476 | case EXT_SIA_SUSPECT: | 2476 | case EXT_SIA_SUSPECT: |
2477 | next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf); | 2477 | next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf); |
2478 | break; | 2478 | break; |
2479 | 2479 | ||
2480 | case NC: | 2480 | case NC: |
2481 | /* default to TP for all */ | 2481 | /* default to TP for all */ |
2482 | reset_init_sia(dev, 0x8f01, 0xffff, 0x0000); | 2482 | reset_init_sia(dev, 0x8f01, 0xffff, 0x0000); |
@@ -2488,13 +2488,13 @@ dc21040_autoconf(struct net_device *dev) | |||
2488 | lp->tx_enable = NO; | 2488 | lp->tx_enable = NO; |
2489 | break; | 2489 | break; |
2490 | } | 2490 | } |
2491 | 2491 | ||
2492 | return next_tick; | 2492 | return next_tick; |
2493 | } | 2493 | } |
2494 | 2494 | ||
2495 | static int | 2495 | static int |
2496 | dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, | 2496 | dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, |
2497 | int next_state, int suspect_state, | 2497 | int next_state, int suspect_state, |
2498 | int (*fn)(struct net_device *, int)) | 2498 | int (*fn)(struct net_device *, int)) |
2499 | { | 2499 | { |
2500 | struct de4x5_private *lp = netdev_priv(dev); | 2500 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -2507,7 +2507,7 @@ dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeo | |||
2507 | lp->local_state++; | 2507 | lp->local_state++; |
2508 | next_tick = 500; | 2508 | next_tick = 500; |
2509 | break; | 2509 | break; |
2510 | 2510 | ||
2511 | case 1: | 2511 | case 1: |
2512 | if (!lp->tx_enable) { | 2512 | if (!lp->tx_enable) { |
2513 | linkBad = fn(dev, timeout); | 2513 | linkBad = fn(dev, timeout); |
@@ -2527,7 +2527,7 @@ dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeo | |||
2527 | } | 2527 | } |
2528 | break; | 2528 | break; |
2529 | } | 2529 | } |
2530 | 2530 | ||
2531 | return next_tick; | 2531 | return next_tick; |
2532 | } | 2532 | } |
2533 | 2533 | ||
@@ -2582,7 +2582,7 @@ dc21041_autoconf(struct net_device *dev) | |||
2582 | u_long iobase = dev->base_addr; | 2582 | u_long iobase = dev->base_addr; |
2583 | s32 sts, irqs, irq_mask, imr, omr; | 2583 | s32 sts, irqs, irq_mask, imr, omr; |
2584 | int next_tick = DE4X5_AUTOSENSE_MS; | 2584 | int next_tick = DE4X5_AUTOSENSE_MS; |
2585 | 2585 | ||
2586 | switch (lp->media) { | 2586 | switch (lp->media) { |
2587 | case INIT: | 2587 | case INIT: |
2588 | DISABLE_IRQs; | 2588 | DISABLE_IRQs; |
@@ -2603,7 +2603,7 @@ dc21041_autoconf(struct net_device *dev) | |||
2603 | lp->local_state = 0; | 2603 | lp->local_state = 0; |
2604 | next_tick = dc21041_autoconf(dev); | 2604 | next_tick = dc21041_autoconf(dev); |
2605 | break; | 2605 | break; |
2606 | 2606 | ||
2607 | case TP_NW: | 2607 | case TP_NW: |
2608 | if (lp->timeout < 0) { | 2608 | if (lp->timeout < 0) { |
2609 | omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */ | 2609 | omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */ |
@@ -2623,7 +2623,7 @@ dc21041_autoconf(struct net_device *dev) | |||
2623 | next_tick = dc21041_autoconf(dev); | 2623 | next_tick = dc21041_autoconf(dev); |
2624 | } | 2624 | } |
2625 | break; | 2625 | break; |
2626 | 2626 | ||
2627 | case ANS: | 2627 | case ANS: |
2628 | if (!lp->tx_enable) { | 2628 | if (!lp->tx_enable) { |
2629 | irqs = STS_LNP; | 2629 | irqs = STS_LNP; |
@@ -2645,11 +2645,11 @@ dc21041_autoconf(struct net_device *dev) | |||
2645 | next_tick = 3000; | 2645 | next_tick = 3000; |
2646 | } | 2646 | } |
2647 | break; | 2647 | break; |
2648 | 2648 | ||
2649 | case ANS_SUSPECT: | 2649 | case ANS_SUSPECT: |
2650 | next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf); | 2650 | next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf); |
2651 | break; | 2651 | break; |
2652 | 2652 | ||
2653 | case TP: | 2653 | case TP: |
2654 | if (!lp->tx_enable) { | 2654 | if (!lp->tx_enable) { |
2655 | if (lp->timeout < 0) { | 2655 | if (lp->timeout < 0) { |
@@ -2679,11 +2679,11 @@ dc21041_autoconf(struct net_device *dev) | |||
2679 | next_tick = 3000; | 2679 | next_tick = 3000; |
2680 | } | 2680 | } |
2681 | break; | 2681 | break; |
2682 | 2682 | ||
2683 | case TP_SUSPECT: | 2683 | case TP_SUSPECT: |
2684 | next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf); | 2684 | next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf); |
2685 | break; | 2685 | break; |
2686 | 2686 | ||
2687 | case AUI: | 2687 | case AUI: |
2688 | if (!lp->tx_enable) { | 2688 | if (!lp->tx_enable) { |
2689 | if (lp->timeout < 0) { | 2689 | if (lp->timeout < 0) { |
@@ -2709,11 +2709,11 @@ dc21041_autoconf(struct net_device *dev) | |||
2709 | next_tick = 3000; | 2709 | next_tick = 3000; |
2710 | } | 2710 | } |
2711 | break; | 2711 | break; |
2712 | 2712 | ||
2713 | case AUI_SUSPECT: | 2713 | case AUI_SUSPECT: |
2714 | next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf); | 2714 | next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf); |
2715 | break; | 2715 | break; |
2716 | 2716 | ||
2717 | case BNC: | 2717 | case BNC: |
2718 | switch (lp->local_state) { | 2718 | switch (lp->local_state) { |
2719 | case 0: | 2719 | case 0: |
@@ -2731,7 +2731,7 @@ dc21041_autoconf(struct net_device *dev) | |||
2731 | next_tick = dc21041_autoconf(dev); | 2731 | next_tick = dc21041_autoconf(dev); |
2732 | } | 2732 | } |
2733 | break; | 2733 | break; |
2734 | 2734 | ||
2735 | case 1: | 2735 | case 1: |
2736 | if (!lp->tx_enable) { | 2736 | if (!lp->tx_enable) { |
2737 | if ((sts = ping_media(dev, 3000)) < 0) { | 2737 | if ((sts = ping_media(dev, 3000)) < 0) { |
@@ -2751,11 +2751,11 @@ dc21041_autoconf(struct net_device *dev) | |||
2751 | break; | 2751 | break; |
2752 | } | 2752 | } |
2753 | break; | 2753 | break; |
2754 | 2754 | ||
2755 | case BNC_SUSPECT: | 2755 | case BNC_SUSPECT: |
2756 | next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf); | 2756 | next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf); |
2757 | break; | 2757 | break; |
2758 | 2758 | ||
2759 | case NC: | 2759 | case NC: |
2760 | omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */ | 2760 | omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */ |
2761 | outl(omr | OMR_FDX, DE4X5_OMR); | 2761 | outl(omr | OMR_FDX, DE4X5_OMR); |
@@ -2768,7 +2768,7 @@ dc21041_autoconf(struct net_device *dev) | |||
2768 | lp->tx_enable = NO; | 2768 | lp->tx_enable = NO; |
2769 | break; | 2769 | break; |
2770 | } | 2770 | } |
2771 | 2771 | ||
2772 | return next_tick; | 2772 | return next_tick; |
2773 | } | 2773 | } |
2774 | 2774 | ||
@@ -2784,9 +2784,9 @@ dc21140m_autoconf(struct net_device *dev) | |||
2784 | int ana, anlpa, cap, cr, slnk, sr; | 2784 | int ana, anlpa, cap, cr, slnk, sr; |
2785 | int next_tick = DE4X5_AUTOSENSE_MS; | 2785 | int next_tick = DE4X5_AUTOSENSE_MS; |
2786 | u_long imr, omr, iobase = dev->base_addr; | 2786 | u_long imr, omr, iobase = dev->base_addr; |
2787 | 2787 | ||
2788 | switch(lp->media) { | 2788 | switch(lp->media) { |
2789 | case INIT: | 2789 | case INIT: |
2790 | if (lp->timeout < 0) { | 2790 | if (lp->timeout < 0) { |
2791 | DISABLE_IRQs; | 2791 | DISABLE_IRQs; |
2792 | lp->tx_enable = FALSE; | 2792 | lp->tx_enable = FALSE; |
@@ -2813,7 +2813,7 @@ dc21140m_autoconf(struct net_device *dev) | |||
2813 | lp->media = _100Mb; | 2813 | lp->media = _100Mb; |
2814 | } else if (lp->autosense == _10Mb) { | 2814 | } else if (lp->autosense == _10Mb) { |
2815 | lp->media = _10Mb; | 2815 | lp->media = _10Mb; |
2816 | } else if ((lp->autosense == AUTO) && | 2816 | } else if ((lp->autosense == AUTO) && |
2817 | ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { | 2817 | ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { |
2818 | ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); | 2818 | ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); |
2819 | ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); | 2819 | ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); |
@@ -2831,7 +2831,7 @@ dc21140m_autoconf(struct net_device *dev) | |||
2831 | next_tick = dc21140m_autoconf(dev); | 2831 | next_tick = dc21140m_autoconf(dev); |
2832 | } | 2832 | } |
2833 | break; | 2833 | break; |
2834 | 2834 | ||
2835 | case ANS: | 2835 | case ANS: |
2836 | switch (lp->local_state) { | 2836 | switch (lp->local_state) { |
2837 | case 0: | 2837 | case 0: |
@@ -2851,7 +2851,7 @@ dc21140m_autoconf(struct net_device *dev) | |||
2851 | next_tick = dc21140m_autoconf(dev); | 2851 | next_tick = dc21140m_autoconf(dev); |
2852 | } | 2852 | } |
2853 | break; | 2853 | break; |
2854 | 2854 | ||
2855 | case 1: | 2855 | case 1: |
2856 | if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) { | 2856 | if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) { |
2857 | next_tick = sr & ~TIMER_CB; | 2857 | next_tick = sr & ~TIMER_CB; |
@@ -2862,7 +2862,7 @@ dc21140m_autoconf(struct net_device *dev) | |||
2862 | lp->tmp = MII_SR_ASSC; | 2862 | lp->tmp = MII_SR_ASSC; |
2863 | anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); | 2863 | anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); |
2864 | ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); | 2864 | ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); |
2865 | if (!(anlpa & MII_ANLPA_RF) && | 2865 | if (!(anlpa & MII_ANLPA_RF) && |
2866 | (cap = anlpa & MII_ANLPA_TAF & ana)) { | 2866 | (cap = anlpa & MII_ANLPA_TAF & ana)) { |
2867 | if (cap & MII_ANA_100M) { | 2867 | if (cap & MII_ANA_100M) { |
2868 | lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE); | 2868 | lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE); |
@@ -2879,10 +2879,10 @@ dc21140m_autoconf(struct net_device *dev) | |||
2879 | break; | 2879 | break; |
2880 | } | 2880 | } |
2881 | break; | 2881 | break; |
2882 | 2882 | ||
2883 | case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ | 2883 | case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ |
2884 | if (lp->timeout < 0) { | 2884 | if (lp->timeout < 0) { |
2885 | lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS : | 2885 | lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS : |
2886 | (~gep_rd(dev) & GEP_LNP)); | 2886 | (~gep_rd(dev) & GEP_LNP)); |
2887 | SET_100Mb_PDET; | 2887 | SET_100Mb_PDET; |
2888 | } | 2888 | } |
@@ -2899,7 +2899,7 @@ dc21140m_autoconf(struct net_device *dev) | |||
2899 | next_tick = dc21140m_autoconf(dev); | 2899 | next_tick = dc21140m_autoconf(dev); |
2900 | } | 2900 | } |
2901 | break; | 2901 | break; |
2902 | 2902 | ||
2903 | case _100Mb: /* Set 100Mb/s */ | 2903 | case _100Mb: /* Set 100Mb/s */ |
2904 | next_tick = 3000; | 2904 | next_tick = 3000; |
2905 | if (!lp->tx_enable) { | 2905 | if (!lp->tx_enable) { |
@@ -2933,7 +2933,7 @@ dc21140m_autoconf(struct net_device *dev) | |||
2933 | } | 2933 | } |
2934 | } | 2934 | } |
2935 | break; | 2935 | break; |
2936 | 2936 | ||
2937 | case NC: | 2937 | case NC: |
2938 | if (lp->media != lp->c_media) { | 2938 | if (lp->media != lp->c_media) { |
2939 | de4x5_dbg_media(dev); | 2939 | de4x5_dbg_media(dev); |
@@ -2943,7 +2943,7 @@ dc21140m_autoconf(struct net_device *dev) | |||
2943 | lp->tx_enable = FALSE; | 2943 | lp->tx_enable = FALSE; |
2944 | break; | 2944 | break; |
2945 | } | 2945 | } |
2946 | 2946 | ||
2947 | return next_tick; | 2947 | return next_tick; |
2948 | } | 2948 | } |
2949 | 2949 | ||
@@ -3002,7 +3002,7 @@ dc2114x_autoconf(struct net_device *dev) | |||
3002 | lp->media = AUI; | 3002 | lp->media = AUI; |
3003 | } else { | 3003 | } else { |
3004 | lp->media = SPD_DET; | 3004 | lp->media = SPD_DET; |
3005 | if ((lp->infoblock_media == ANS) && | 3005 | if ((lp->infoblock_media == ANS) && |
3006 | ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { | 3006 | ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { |
3007 | ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); | 3007 | ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); |
3008 | ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); | 3008 | ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); |
@@ -3014,7 +3014,7 @@ dc2114x_autoconf(struct net_device *dev) | |||
3014 | next_tick = dc2114x_autoconf(dev); | 3014 | next_tick = dc2114x_autoconf(dev); |
3015 | } | 3015 | } |
3016 | break; | 3016 | break; |
3017 | 3017 | ||
3018 | case ANS: | 3018 | case ANS: |
3019 | switch (lp->local_state) { | 3019 | switch (lp->local_state) { |
3020 | case 0: | 3020 | case 0: |
@@ -3034,7 +3034,7 @@ dc2114x_autoconf(struct net_device *dev) | |||
3034 | next_tick = dc2114x_autoconf(dev); | 3034 | next_tick = dc2114x_autoconf(dev); |
3035 | } | 3035 | } |
3036 | break; | 3036 | break; |
3037 | 3037 | ||
3038 | case 1: | 3038 | case 1: |
3039 | if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) { | 3039 | if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) { |
3040 | next_tick = sr & ~TIMER_CB; | 3040 | next_tick = sr & ~TIMER_CB; |
@@ -3045,7 +3045,7 @@ dc2114x_autoconf(struct net_device *dev) | |||
3045 | lp->tmp = MII_SR_ASSC; | 3045 | lp->tmp = MII_SR_ASSC; |
3046 | anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); | 3046 | anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); |
3047 | ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); | 3047 | ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); |
3048 | if (!(anlpa & MII_ANLPA_RF) && | 3048 | if (!(anlpa & MII_ANLPA_RF) && |
3049 | (cap = anlpa & MII_ANLPA_TAF & ana)) { | 3049 | (cap = anlpa & MII_ANLPA_TAF & ana)) { |
3050 | if (cap & MII_ANA_100M) { | 3050 | if (cap & MII_ANA_100M) { |
3051 | lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE); | 3051 | lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE); |
@@ -3087,11 +3087,11 @@ dc2114x_autoconf(struct net_device *dev) | |||
3087 | next_tick = 3000; | 3087 | next_tick = 3000; |
3088 | } | 3088 | } |
3089 | break; | 3089 | break; |
3090 | 3090 | ||
3091 | case AUI_SUSPECT: | 3091 | case AUI_SUSPECT: |
3092 | next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf); | 3092 | next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf); |
3093 | break; | 3093 | break; |
3094 | 3094 | ||
3095 | case BNC: | 3095 | case BNC: |
3096 | switch (lp->local_state) { | 3096 | switch (lp->local_state) { |
3097 | case 0: | 3097 | case 0: |
@@ -3109,7 +3109,7 @@ dc2114x_autoconf(struct net_device *dev) | |||
3109 | next_tick = dc2114x_autoconf(dev); | 3109 | next_tick = dc2114x_autoconf(dev); |
3110 | } | 3110 | } |
3111 | break; | 3111 | break; |
3112 | 3112 | ||
3113 | case 1: | 3113 | case 1: |
3114 | if (!lp->tx_enable) { | 3114 | if (!lp->tx_enable) { |
3115 | if ((sts = ping_media(dev, 3000)) < 0) { | 3115 | if ((sts = ping_media(dev, 3000)) < 0) { |
@@ -3130,11 +3130,11 @@ dc2114x_autoconf(struct net_device *dev) | |||
3130 | break; | 3130 | break; |
3131 | } | 3131 | } |
3132 | break; | 3132 | break; |
3133 | 3133 | ||
3134 | case BNC_SUSPECT: | 3134 | case BNC_SUSPECT: |
3135 | next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf); | 3135 | next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf); |
3136 | break; | 3136 | break; |
3137 | 3137 | ||
3138 | case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ | 3138 | case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ |
3139 | if (srom_map_media(dev) < 0) { | 3139 | if (srom_map_media(dev) < 0) { |
3140 | lp->tcount++; | 3140 | lp->tcount++; |
@@ -3161,7 +3161,7 @@ dc2114x_autoconf(struct net_device *dev) | |||
3161 | next_tick = dc2114x_autoconf(dev); | 3161 | next_tick = dc2114x_autoconf(dev); |
3162 | } else if (((lp->media == _100Mb) && is_100_up(dev)) || | 3162 | } else if (((lp->media == _100Mb) && is_100_up(dev)) || |
3163 | (((lp->media == _10Mb) || (lp->media == TP) || | 3163 | (((lp->media == _10Mb) || (lp->media == TP) || |
3164 | (lp->media == BNC) || (lp->media == AUI)) && | 3164 | (lp->media == BNC) || (lp->media == AUI)) && |
3165 | is_10_up(dev))) { | 3165 | is_10_up(dev))) { |
3166 | next_tick = dc2114x_autoconf(dev); | 3166 | next_tick = dc2114x_autoconf(dev); |
3167 | } else { | 3167 | } else { |
@@ -3169,7 +3169,7 @@ dc2114x_autoconf(struct net_device *dev) | |||
3169 | lp->media = INIT; | 3169 | lp->media = INIT; |
3170 | } | 3170 | } |
3171 | break; | 3171 | break; |
3172 | 3172 | ||
3173 | case _10Mb: | 3173 | case _10Mb: |
3174 | next_tick = 3000; | 3174 | next_tick = 3000; |
3175 | if (!lp->tx_enable) { | 3175 | if (!lp->tx_enable) { |
@@ -3208,7 +3208,7 @@ printk("Huh?: media:%02x\n", lp->media); | |||
3208 | lp->media = INIT; | 3208 | lp->media = INIT; |
3209 | break; | 3209 | break; |
3210 | } | 3210 | } |
3211 | 3211 | ||
3212 | return next_tick; | 3212 | return next_tick; |
3213 | } | 3213 | } |
3214 | 3214 | ||
@@ -3231,7 +3231,7 @@ srom_map_media(struct net_device *dev) | |||
3231 | struct de4x5_private *lp = netdev_priv(dev); | 3231 | struct de4x5_private *lp = netdev_priv(dev); |
3232 | 3232 | ||
3233 | lp->fdx = 0; | 3233 | lp->fdx = 0; |
3234 | if (lp->infoblock_media == lp->media) | 3234 | if (lp->infoblock_media == lp->media) |
3235 | return 0; | 3235 | return 0; |
3236 | 3236 | ||
3237 | switch(lp->infoblock_media) { | 3237 | switch(lp->infoblock_media) { |
@@ -3270,7 +3270,7 @@ srom_map_media(struct net_device *dev) | |||
3270 | case SROM_100BASEFF: | 3270 | case SROM_100BASEFF: |
3271 | if (!lp->params.fdx) return -1; | 3271 | if (!lp->params.fdx) return -1; |
3272 | lp->fdx = TRUE; | 3272 | lp->fdx = TRUE; |
3273 | case SROM_100BASEF: | 3273 | case SROM_100BASEF: |
3274 | if (lp->params.fdx && !lp->fdx) return -1; | 3274 | if (lp->params.fdx && !lp->fdx) return -1; |
3275 | lp->media = _100Mb; | 3275 | lp->media = _100Mb; |
3276 | break; | 3276 | break; |
@@ -3280,8 +3280,8 @@ srom_map_media(struct net_device *dev) | |||
3280 | lp->fdx = lp->params.fdx; | 3280 | lp->fdx = lp->params.fdx; |
3281 | break; | 3281 | break; |
3282 | 3282 | ||
3283 | default: | 3283 | default: |
3284 | printk("%s: Bad media code [%d] detected in SROM!\n", dev->name, | 3284 | printk("%s: Bad media code [%d] detected in SROM!\n", dev->name, |
3285 | lp->infoblock_media); | 3285 | lp->infoblock_media); |
3286 | return -1; | 3286 | return -1; |
3287 | break; | 3287 | break; |
@@ -3359,7 +3359,7 @@ test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, | |||
3359 | struct de4x5_private *lp = netdev_priv(dev); | 3359 | struct de4x5_private *lp = netdev_priv(dev); |
3360 | u_long iobase = dev->base_addr; | 3360 | u_long iobase = dev->base_addr; |
3361 | s32 sts, csr12; | 3361 | s32 sts, csr12; |
3362 | 3362 | ||
3363 | if (lp->timeout < 0) { | 3363 | if (lp->timeout < 0) { |
3364 | lp->timeout = msec/100; | 3364 | lp->timeout = msec/100; |
3365 | if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */ | 3365 | if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */ |
@@ -3372,22 +3372,22 @@ test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, | |||
3372 | /* clear all pending interrupts */ | 3372 | /* clear all pending interrupts */ |
3373 | sts = inl(DE4X5_STS); | 3373 | sts = inl(DE4X5_STS); |
3374 | outl(sts, DE4X5_STS); | 3374 | outl(sts, DE4X5_STS); |
3375 | 3375 | ||
3376 | /* clear csr12 NRA and SRA bits */ | 3376 | /* clear csr12 NRA and SRA bits */ |
3377 | if ((lp->chipset == DC21041) || lp->useSROM) { | 3377 | if ((lp->chipset == DC21041) || lp->useSROM) { |
3378 | csr12 = inl(DE4X5_SISR); | 3378 | csr12 = inl(DE4X5_SISR); |
3379 | outl(csr12, DE4X5_SISR); | 3379 | outl(csr12, DE4X5_SISR); |
3380 | } | 3380 | } |
3381 | } | 3381 | } |
3382 | 3382 | ||
3383 | sts = inl(DE4X5_STS) & ~TIMER_CB; | 3383 | sts = inl(DE4X5_STS) & ~TIMER_CB; |
3384 | 3384 | ||
3385 | if (!(sts & irqs) && --lp->timeout) { | 3385 | if (!(sts & irqs) && --lp->timeout) { |
3386 | sts = 100 | TIMER_CB; | 3386 | sts = 100 | TIMER_CB; |
3387 | } else { | 3387 | } else { |
3388 | lp->timeout = -1; | 3388 | lp->timeout = -1; |
3389 | } | 3389 | } |
3390 | 3390 | ||
3391 | return sts; | 3391 | return sts; |
3392 | } | 3392 | } |
3393 | 3393 | ||
@@ -3397,11 +3397,11 @@ test_tp(struct net_device *dev, s32 msec) | |||
3397 | struct de4x5_private *lp = netdev_priv(dev); | 3397 | struct de4x5_private *lp = netdev_priv(dev); |
3398 | u_long iobase = dev->base_addr; | 3398 | u_long iobase = dev->base_addr; |
3399 | int sisr; | 3399 | int sisr; |
3400 | 3400 | ||
3401 | if (lp->timeout < 0) { | 3401 | if (lp->timeout < 0) { |
3402 | lp->timeout = msec/100; | 3402 | lp->timeout = msec/100; |
3403 | } | 3403 | } |
3404 | 3404 | ||
3405 | sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR); | 3405 | sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR); |
3406 | 3406 | ||
3407 | if (sisr && --lp->timeout) { | 3407 | if (sisr && --lp->timeout) { |
@@ -3409,7 +3409,7 @@ test_tp(struct net_device *dev, s32 msec) | |||
3409 | } else { | 3409 | } else { |
3410 | lp->timeout = -1; | 3410 | lp->timeout = -1; |
3411 | } | 3411 | } |
3412 | 3412 | ||
3413 | return sisr; | 3413 | return sisr; |
3414 | } | 3414 | } |
3415 | 3415 | ||
@@ -3436,7 +3436,7 @@ test_for_100Mb(struct net_device *dev, int msec) | |||
3436 | lp->timeout = msec/SAMPLE_INTERVAL; | 3436 | lp->timeout = msec/SAMPLE_INTERVAL; |
3437 | } | 3437 | } |
3438 | } | 3438 | } |
3439 | 3439 | ||
3440 | if (lp->phy[lp->active].id || lp->useSROM) { | 3440 | if (lp->phy[lp->active].id || lp->useSROM) { |
3441 | gep = is_100_up(dev) | is_spd_100(dev); | 3441 | gep = is_100_up(dev) | is_spd_100(dev); |
3442 | } else { | 3442 | } else { |
@@ -3447,7 +3447,7 @@ test_for_100Mb(struct net_device *dev, int msec) | |||
3447 | } else { | 3447 | } else { |
3448 | lp->timeout = -1; | 3448 | lp->timeout = -1; |
3449 | } | 3449 | } |
3450 | 3450 | ||
3451 | return gep; | 3451 | return gep; |
3452 | } | 3452 | } |
3453 | 3453 | ||
@@ -3459,13 +3459,13 @@ wait_for_link(struct net_device *dev) | |||
3459 | if (lp->timeout < 0) { | 3459 | if (lp->timeout < 0) { |
3460 | lp->timeout = 1; | 3460 | lp->timeout = 1; |
3461 | } | 3461 | } |
3462 | 3462 | ||
3463 | if (lp->timeout--) { | 3463 | if (lp->timeout--) { |
3464 | return TIMER_CB; | 3464 | return TIMER_CB; |
3465 | } else { | 3465 | } else { |
3466 | lp->timeout = -1; | 3466 | lp->timeout = -1; |
3467 | } | 3467 | } |
3468 | 3468 | ||
3469 | return 0; | 3469 | return 0; |
3470 | } | 3470 | } |
3471 | 3471 | ||
@@ -3479,21 +3479,21 @@ test_mii_reg(struct net_device *dev, int reg, int mask, int pol, long msec) | |||
3479 | struct de4x5_private *lp = netdev_priv(dev); | 3479 | struct de4x5_private *lp = netdev_priv(dev); |
3480 | int test; | 3480 | int test; |
3481 | u_long iobase = dev->base_addr; | 3481 | u_long iobase = dev->base_addr; |
3482 | 3482 | ||
3483 | if (lp->timeout < 0) { | 3483 | if (lp->timeout < 0) { |
3484 | lp->timeout = msec/100; | 3484 | lp->timeout = msec/100; |
3485 | } | 3485 | } |
3486 | 3486 | ||
3487 | if (pol) pol = ~0; | 3487 | if (pol) pol = ~0; |
3488 | reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask; | 3488 | reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask; |
3489 | test = (reg ^ pol) & mask; | 3489 | test = (reg ^ pol) & mask; |
3490 | 3490 | ||
3491 | if (test && --lp->timeout) { | 3491 | if (test && --lp->timeout) { |
3492 | reg = 100 | TIMER_CB; | 3492 | reg = 100 | TIMER_CB; |
3493 | } else { | 3493 | } else { |
3494 | lp->timeout = -1; | 3494 | lp->timeout = -1; |
3495 | } | 3495 | } |
3496 | 3496 | ||
3497 | return reg; | 3497 | return reg; |
3498 | } | 3498 | } |
3499 | 3499 | ||
@@ -3503,7 +3503,7 @@ is_spd_100(struct net_device *dev) | |||
3503 | struct de4x5_private *lp = netdev_priv(dev); | 3503 | struct de4x5_private *lp = netdev_priv(dev); |
3504 | u_long iobase = dev->base_addr; | 3504 | u_long iobase = dev->base_addr; |
3505 | int spd; | 3505 | int spd; |
3506 | 3506 | ||
3507 | if (lp->useMII) { | 3507 | if (lp->useMII) { |
3508 | spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII); | 3508 | spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII); |
3509 | spd = ~(spd ^ lp->phy[lp->active].spd.value); | 3509 | spd = ~(spd ^ lp->phy[lp->active].spd.value); |
@@ -3517,7 +3517,7 @@ is_spd_100(struct net_device *dev) | |||
3517 | spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) | | 3517 | spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) | |
3518 | (lp->linkOK & ~lp->asBitValid); | 3518 | (lp->linkOK & ~lp->asBitValid); |
3519 | } | 3519 | } |
3520 | 3520 | ||
3521 | return spd; | 3521 | return spd; |
3522 | } | 3522 | } |
3523 | 3523 | ||
@@ -3526,7 +3526,7 @@ is_100_up(struct net_device *dev) | |||
3526 | { | 3526 | { |
3527 | struct de4x5_private *lp = netdev_priv(dev); | 3527 | struct de4x5_private *lp = netdev_priv(dev); |
3528 | u_long iobase = dev->base_addr; | 3528 | u_long iobase = dev->base_addr; |
3529 | 3529 | ||
3530 | if (lp->useMII) { | 3530 | if (lp->useMII) { |
3531 | /* Double read for sticky bits & temporary drops */ | 3531 | /* Double read for sticky bits & temporary drops */ |
3532 | mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); | 3532 | mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); |
@@ -3547,7 +3547,7 @@ is_10_up(struct net_device *dev) | |||
3547 | { | 3547 | { |
3548 | struct de4x5_private *lp = netdev_priv(dev); | 3548 | struct de4x5_private *lp = netdev_priv(dev); |
3549 | u_long iobase = dev->base_addr; | 3549 | u_long iobase = dev->base_addr; |
3550 | 3550 | ||
3551 | if (lp->useMII) { | 3551 | if (lp->useMII) { |
3552 | /* Double read for sticky bits & temporary drops */ | 3552 | /* Double read for sticky bits & temporary drops */ |
3553 | mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); | 3553 | mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); |
@@ -3570,7 +3570,7 @@ is_anc_capable(struct net_device *dev) | |||
3570 | { | 3570 | { |
3571 | struct de4x5_private *lp = netdev_priv(dev); | 3571 | struct de4x5_private *lp = netdev_priv(dev); |
3572 | u_long iobase = dev->base_addr; | 3572 | u_long iobase = dev->base_addr; |
3573 | 3573 | ||
3574 | if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { | 3574 | if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { |
3575 | return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII)); | 3575 | return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII)); |
3576 | } else if ((lp->chipset & ~0x00ff) == DC2114x) { | 3576 | } else if ((lp->chipset & ~0x00ff) == DC2114x) { |
@@ -3590,24 +3590,24 @@ ping_media(struct net_device *dev, int msec) | |||
3590 | struct de4x5_private *lp = netdev_priv(dev); | 3590 | struct de4x5_private *lp = netdev_priv(dev); |
3591 | u_long iobase = dev->base_addr; | 3591 | u_long iobase = dev->base_addr; |
3592 | int sisr; | 3592 | int sisr; |
3593 | 3593 | ||
3594 | if (lp->timeout < 0) { | 3594 | if (lp->timeout < 0) { |
3595 | lp->timeout = msec/100; | 3595 | lp->timeout = msec/100; |
3596 | 3596 | ||
3597 | lp->tmp = lp->tx_new; /* Remember the ring position */ | 3597 | lp->tmp = lp->tx_new; /* Remember the ring position */ |
3598 | load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1); | 3598 | load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1); |
3599 | lp->tx_new = (++lp->tx_new) % lp->txRingSize; | 3599 | lp->tx_new = (++lp->tx_new) % lp->txRingSize; |
3600 | outl(POLL_DEMAND, DE4X5_TPD); | 3600 | outl(POLL_DEMAND, DE4X5_TPD); |
3601 | } | 3601 | } |
3602 | 3602 | ||
3603 | sisr = inl(DE4X5_SISR); | 3603 | sisr = inl(DE4X5_SISR); |
3604 | 3604 | ||
3605 | if ((!(sisr & SISR_NCR)) && | 3605 | if ((!(sisr & SISR_NCR)) && |
3606 | ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) && | 3606 | ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) && |
3607 | (--lp->timeout)) { | 3607 | (--lp->timeout)) { |
3608 | sisr = 100 | TIMER_CB; | 3608 | sisr = 100 | TIMER_CB; |
3609 | } else { | 3609 | } else { |
3610 | if ((!(sisr & SISR_NCR)) && | 3610 | if ((!(sisr & SISR_NCR)) && |
3611 | !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) && | 3611 | !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) && |
3612 | lp->timeout) { | 3612 | lp->timeout) { |
3613 | sisr = 0; | 3613 | sisr = 0; |
@@ -3616,7 +3616,7 @@ ping_media(struct net_device *dev, int msec) | |||
3616 | } | 3616 | } |
3617 | lp->timeout = -1; | 3617 | lp->timeout = -1; |
3618 | } | 3618 | } |
3619 | 3619 | ||
3620 | return sisr; | 3620 | return sisr; |
3621 | } | 3621 | } |
3622 | 3622 | ||
@@ -3668,7 +3668,7 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len) | |||
3668 | } else { /* Linear buffer */ | 3668 | } else { /* Linear buffer */ |
3669 | memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len); | 3669 | memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len); |
3670 | } | 3670 | } |
3671 | 3671 | ||
3672 | return p; | 3672 | return p; |
3673 | #endif | 3673 | #endif |
3674 | } | 3674 | } |
@@ -3751,23 +3751,23 @@ de4x5_rst_desc_ring(struct net_device *dev) | |||
3751 | outl(lp->dma_rings, DE4X5_RRBA); | 3751 | outl(lp->dma_rings, DE4X5_RRBA); |
3752 | outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), | 3752 | outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), |
3753 | DE4X5_TRBA); | 3753 | DE4X5_TRBA); |
3754 | 3754 | ||
3755 | lp->rx_new = lp->rx_old = 0; | 3755 | lp->rx_new = lp->rx_old = 0; |
3756 | lp->tx_new = lp->tx_old = 0; | 3756 | lp->tx_new = lp->tx_old = 0; |
3757 | 3757 | ||
3758 | for (i = 0; i < lp->rxRingSize; i++) { | 3758 | for (i = 0; i < lp->rxRingSize; i++) { |
3759 | lp->rx_ring[i].status = cpu_to_le32(R_OWN); | 3759 | lp->rx_ring[i].status = cpu_to_le32(R_OWN); |
3760 | } | 3760 | } |
3761 | 3761 | ||
3762 | for (i = 0; i < lp->txRingSize; i++) { | 3762 | for (i = 0; i < lp->txRingSize; i++) { |
3763 | lp->tx_ring[i].status = cpu_to_le32(0); | 3763 | lp->tx_ring[i].status = cpu_to_le32(0); |
3764 | } | 3764 | } |
3765 | 3765 | ||
3766 | barrier(); | 3766 | barrier(); |
3767 | lp->cache.save_cnt--; | 3767 | lp->cache.save_cnt--; |
3768 | START_DE4X5; | 3768 | START_DE4X5; |
3769 | } | 3769 | } |
3770 | 3770 | ||
3771 | return; | 3771 | return; |
3772 | } | 3772 | } |
3773 | 3773 | ||
@@ -3792,7 +3792,7 @@ de4x5_cache_state(struct net_device *dev, int flag) | |||
3792 | gep_wr(lp->cache.gepc, dev); | 3792 | gep_wr(lp->cache.gepc, dev); |
3793 | gep_wr(lp->cache.gep, dev); | 3793 | gep_wr(lp->cache.gep, dev); |
3794 | } else { | 3794 | } else { |
3795 | reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, | 3795 | reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, |
3796 | lp->cache.csr15); | 3796 | lp->cache.csr15); |
3797 | } | 3797 | } |
3798 | break; | 3798 | break; |
@@ -3854,25 +3854,25 @@ test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec) | |||
3854 | struct de4x5_private *lp = netdev_priv(dev); | 3854 | struct de4x5_private *lp = netdev_priv(dev); |
3855 | u_long iobase = dev->base_addr; | 3855 | u_long iobase = dev->base_addr; |
3856 | s32 sts, ans; | 3856 | s32 sts, ans; |
3857 | 3857 | ||
3858 | if (lp->timeout < 0) { | 3858 | if (lp->timeout < 0) { |
3859 | lp->timeout = msec/100; | 3859 | lp->timeout = msec/100; |
3860 | outl(irq_mask, DE4X5_IMR); | 3860 | outl(irq_mask, DE4X5_IMR); |
3861 | 3861 | ||
3862 | /* clear all pending interrupts */ | 3862 | /* clear all pending interrupts */ |
3863 | sts = inl(DE4X5_STS); | 3863 | sts = inl(DE4X5_STS); |
3864 | outl(sts, DE4X5_STS); | 3864 | outl(sts, DE4X5_STS); |
3865 | } | 3865 | } |
3866 | 3866 | ||
3867 | ans = inl(DE4X5_SISR) & SISR_ANS; | 3867 | ans = inl(DE4X5_SISR) & SISR_ANS; |
3868 | sts = inl(DE4X5_STS) & ~TIMER_CB; | 3868 | sts = inl(DE4X5_STS) & ~TIMER_CB; |
3869 | 3869 | ||
3870 | if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) { | 3870 | if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) { |
3871 | sts = 100 | TIMER_CB; | 3871 | sts = 100 | TIMER_CB; |
3872 | } else { | 3872 | } else { |
3873 | lp->timeout = -1; | 3873 | lp->timeout = -1; |
3874 | } | 3874 | } |
3875 | 3875 | ||
3876 | return sts; | 3876 | return sts; |
3877 | } | 3877 | } |
3878 | 3878 | ||
@@ -3882,7 +3882,7 @@ de4x5_setup_intr(struct net_device *dev) | |||
3882 | struct de4x5_private *lp = netdev_priv(dev); | 3882 | struct de4x5_private *lp = netdev_priv(dev); |
3883 | u_long iobase = dev->base_addr; | 3883 | u_long iobase = dev->base_addr; |
3884 | s32 imr, sts; | 3884 | s32 imr, sts; |
3885 | 3885 | ||
3886 | if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */ | 3886 | if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */ |
3887 | imr = 0; | 3887 | imr = 0; |
3888 | UNMASK_IRQs; | 3888 | UNMASK_IRQs; |
@@ -3890,7 +3890,7 @@ de4x5_setup_intr(struct net_device *dev) | |||
3890 | outl(sts, DE4X5_STS); | 3890 | outl(sts, DE4X5_STS); |
3891 | ENABLE_IRQs; | 3891 | ENABLE_IRQs; |
3892 | } | 3892 | } |
3893 | 3893 | ||
3894 | return; | 3894 | return; |
3895 | } | 3895 | } |
3896 | 3896 | ||
@@ -3936,17 +3936,17 @@ create_packet(struct net_device *dev, char *frame, int len) | |||
3936 | { | 3936 | { |
3937 | int i; | 3937 | int i; |
3938 | char *buf = frame; | 3938 | char *buf = frame; |
3939 | 3939 | ||
3940 | for (i=0; i<ETH_ALEN; i++) { /* Use this source address */ | 3940 | for (i=0; i<ETH_ALEN; i++) { /* Use this source address */ |
3941 | *buf++ = dev->dev_addr[i]; | 3941 | *buf++ = dev->dev_addr[i]; |
3942 | } | 3942 | } |
3943 | for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */ | 3943 | for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */ |
3944 | *buf++ = dev->dev_addr[i]; | 3944 | *buf++ = dev->dev_addr[i]; |
3945 | } | 3945 | } |
3946 | 3946 | ||
3947 | *buf++ = 0; /* Packet length (2 bytes) */ | 3947 | *buf++ = 0; /* Packet length (2 bytes) */ |
3948 | *buf++ = 1; | 3948 | *buf++ = 1; |
3949 | 3949 | ||
3950 | return; | 3950 | return; |
3951 | } | 3951 | } |
3952 | 3952 | ||
@@ -3978,7 +3978,7 @@ static int | |||
3978 | PCI_signature(char *name, struct de4x5_private *lp) | 3978 | PCI_signature(char *name, struct de4x5_private *lp) |
3979 | { | 3979 | { |
3980 | int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *); | 3980 | int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *); |
3981 | 3981 | ||
3982 | if (lp->chipset == DC21040) { | 3982 | if (lp->chipset == DC21040) { |
3983 | strcpy(name, "DE434/5"); | 3983 | strcpy(name, "DE434/5"); |
3984 | return status; | 3984 | return status; |
@@ -4007,7 +4007,7 @@ PCI_signature(char *name, struct de4x5_private *lp) | |||
4007 | } else if ((lp->chipset & ~0x00ff) == DC2114x) { | 4007 | } else if ((lp->chipset & ~0x00ff) == DC2114x) { |
4008 | lp->useSROM = TRUE; | 4008 | lp->useSROM = TRUE; |
4009 | } | 4009 | } |
4010 | 4010 | ||
4011 | return status; | 4011 | return status; |
4012 | } | 4012 | } |
4013 | 4013 | ||
@@ -4024,7 +4024,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr) | |||
4024 | { | 4024 | { |
4025 | int i, j=0; | 4025 | int i, j=0; |
4026 | struct de4x5_private *lp = netdev_priv(dev); | 4026 | struct de4x5_private *lp = netdev_priv(dev); |
4027 | 4027 | ||
4028 | if (lp->chipset == DC21040) { | 4028 | if (lp->chipset == DC21040) { |
4029 | if (lp->bus == EISA) { | 4029 | if (lp->bus == EISA) { |
4030 | enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */ | 4030 | enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */ |
@@ -4049,7 +4049,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr) | |||
4049 | } | 4049 | } |
4050 | de4x5_dbg_srom((struct de4x5_srom *)&lp->srom); | 4050 | de4x5_dbg_srom((struct de4x5_srom *)&lp->srom); |
4051 | } | 4051 | } |
4052 | 4052 | ||
4053 | return; | 4053 | return; |
4054 | } | 4054 | } |
4055 | 4055 | ||
@@ -4071,11 +4071,11 @@ enet_addr_rst(u_long aprom_addr) | |||
4071 | short sigLength=0; | 4071 | short sigLength=0; |
4072 | s8 data; | 4072 | s8 data; |
4073 | int i, j; | 4073 | int i, j; |
4074 | 4074 | ||
4075 | dev.llsig.a = ETH_PROM_SIG; | 4075 | dev.llsig.a = ETH_PROM_SIG; |
4076 | dev.llsig.b = ETH_PROM_SIG; | 4076 | dev.llsig.b = ETH_PROM_SIG; |
4077 | sigLength = sizeof(u32) << 1; | 4077 | sigLength = sizeof(u32) << 1; |
4078 | 4078 | ||
4079 | for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) { | 4079 | for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) { |
4080 | data = inb(aprom_addr); | 4080 | data = inb(aprom_addr); |
4081 | if (dev.Sig[j] == data) { /* track signature */ | 4081 | if (dev.Sig[j] == data) { /* track signature */ |
@@ -4088,7 +4088,7 @@ enet_addr_rst(u_long aprom_addr) | |||
4088 | } | 4088 | } |
4089 | } | 4089 | } |
4090 | } | 4090 | } |
4091 | 4091 | ||
4092 | return; | 4092 | return; |
4093 | } | 4093 | } |
4094 | 4094 | ||
@@ -4111,7 +4111,7 @@ get_hw_addr(struct net_device *dev) | |||
4111 | for (i=0,k=0,j=0;j<3;j++) { | 4111 | for (i=0,k=0,j=0;j<3;j++) { |
4112 | k <<= 1; | 4112 | k <<= 1; |
4113 | if (k > 0xffff) k-=0xffff; | 4113 | if (k > 0xffff) k-=0xffff; |
4114 | 4114 | ||
4115 | if (lp->bus == PCI) { | 4115 | if (lp->bus == PCI) { |
4116 | if (lp->chipset == DC21040) { | 4116 | if (lp->chipset == DC21040) { |
4117 | while ((tmp = inl(DE4X5_APROM)) < 0); | 4117 | while ((tmp = inl(DE4X5_APROM)) < 0); |
@@ -4133,11 +4133,11 @@ get_hw_addr(struct net_device *dev) | |||
4133 | k += (u_short) ((tmp = inb(EISA_APROM)) << 8); | 4133 | k += (u_short) ((tmp = inb(EISA_APROM)) << 8); |
4134 | dev->dev_addr[i++] = (u_char) tmp; | 4134 | dev->dev_addr[i++] = (u_char) tmp; |
4135 | } | 4135 | } |
4136 | 4136 | ||
4137 | if (k > 0xffff) k-=0xffff; | 4137 | if (k > 0xffff) k-=0xffff; |
4138 | } | 4138 | } |
4139 | if (k == 0xffff) k=0; | 4139 | if (k == 0xffff) k=0; |
4140 | 4140 | ||
4141 | if (lp->bus == PCI) { | 4141 | if (lp->bus == PCI) { |
4142 | if (lp->chipset == DC21040) { | 4142 | if (lp->chipset == DC21040) { |
4143 | while ((tmp = inl(DE4X5_APROM)) < 0); | 4143 | while ((tmp = inl(DE4X5_APROM)) < 0); |
@@ -4156,7 +4156,7 @@ get_hw_addr(struct net_device *dev) | |||
4156 | srom_repair(dev, broken); | 4156 | srom_repair(dev, broken); |
4157 | 4157 | ||
4158 | #ifdef CONFIG_PPC_MULTIPLATFORM | 4158 | #ifdef CONFIG_PPC_MULTIPLATFORM |
4159 | /* | 4159 | /* |
4160 | ** If the address starts with 00 a0, we have to bit-reverse | 4160 | ** If the address starts with 00 a0, we have to bit-reverse |
4161 | ** each byte of the address. | 4161 | ** each byte of the address. |
4162 | */ | 4162 | */ |
@@ -4245,7 +4245,7 @@ test_bad_enet(struct net_device *dev, int status) | |||
4245 | 4245 | ||
4246 | for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i]; | 4246 | for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i]; |
4247 | if ((tmp == 0) || (tmp == 0x5fa)) { | 4247 | if ((tmp == 0) || (tmp == 0x5fa)) { |
4248 | if ((lp->chipset == last.chipset) && | 4248 | if ((lp->chipset == last.chipset) && |
4249 | (lp->bus_num == last.bus) && (lp->bus_num > 0)) { | 4249 | (lp->bus_num == last.bus) && (lp->bus_num > 0)) { |
4250 | for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i]; | 4250 | for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i]; |
4251 | for (i=ETH_ALEN-1; i>2; --i) { | 4251 | for (i=ETH_ALEN-1; i>2; --i) { |
@@ -4275,7 +4275,7 @@ test_bad_enet(struct net_device *dev, int status) | |||
4275 | static int | 4275 | static int |
4276 | an_exception(struct de4x5_private *lp) | 4276 | an_exception(struct de4x5_private *lp) |
4277 | { | 4277 | { |
4278 | if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) && | 4278 | if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) && |
4279 | (*(u_short *)lp->srom.sub_system_id == 0x95e0)) { | 4279 | (*(u_short *)lp->srom.sub_system_id == 0x95e0)) { |
4280 | return -1; | 4280 | return -1; |
4281 | } | 4281 | } |
@@ -4290,11 +4290,11 @@ static short | |||
4290 | srom_rd(u_long addr, u_char offset) | 4290 | srom_rd(u_long addr, u_char offset) |
4291 | { | 4291 | { |
4292 | sendto_srom(SROM_RD | SROM_SR, addr); | 4292 | sendto_srom(SROM_RD | SROM_SR, addr); |
4293 | 4293 | ||
4294 | srom_latch(SROM_RD | SROM_SR | DT_CS, addr); | 4294 | srom_latch(SROM_RD | SROM_SR | DT_CS, addr); |
4295 | srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr); | 4295 | srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr); |
4296 | srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset); | 4296 | srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset); |
4297 | 4297 | ||
4298 | return srom_data(SROM_RD | SROM_SR | DT_CS, addr); | 4298 | return srom_data(SROM_RD | SROM_SR | DT_CS, addr); |
4299 | } | 4299 | } |
4300 | 4300 | ||
@@ -4304,7 +4304,7 @@ srom_latch(u_int command, u_long addr) | |||
4304 | sendto_srom(command, addr); | 4304 | sendto_srom(command, addr); |
4305 | sendto_srom(command | DT_CLK, addr); | 4305 | sendto_srom(command | DT_CLK, addr); |
4306 | sendto_srom(command, addr); | 4306 | sendto_srom(command, addr); |
4307 | 4307 | ||
4308 | return; | 4308 | return; |
4309 | } | 4309 | } |
4310 | 4310 | ||
@@ -4314,7 +4314,7 @@ srom_command(u_int command, u_long addr) | |||
4314 | srom_latch(command, addr); | 4314 | srom_latch(command, addr); |
4315 | srom_latch(command, addr); | 4315 | srom_latch(command, addr); |
4316 | srom_latch((command & 0x0000ff00) | DT_CS, addr); | 4316 | srom_latch((command & 0x0000ff00) | DT_CS, addr); |
4317 | 4317 | ||
4318 | return; | 4318 | return; |
4319 | } | 4319 | } |
4320 | 4320 | ||
@@ -4322,15 +4322,15 @@ static void | |||
4322 | srom_address(u_int command, u_long addr, u_char offset) | 4322 | srom_address(u_int command, u_long addr, u_char offset) |
4323 | { | 4323 | { |
4324 | int i, a; | 4324 | int i, a; |
4325 | 4325 | ||
4326 | a = offset << 2; | 4326 | a = offset << 2; |
4327 | for (i=0; i<6; i++, a <<= 1) { | 4327 | for (i=0; i<6; i++, a <<= 1) { |
4328 | srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr); | 4328 | srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr); |
4329 | } | 4329 | } |
4330 | udelay(1); | 4330 | udelay(1); |
4331 | 4331 | ||
4332 | i = (getfrom_srom(addr) >> 3) & 0x01; | 4332 | i = (getfrom_srom(addr) >> 3) & 0x01; |
4333 | 4333 | ||
4334 | return; | 4334 | return; |
4335 | } | 4335 | } |
4336 | 4336 | ||
@@ -4340,17 +4340,17 @@ srom_data(u_int command, u_long addr) | |||
4340 | int i; | 4340 | int i; |
4341 | short word = 0; | 4341 | short word = 0; |
4342 | s32 tmp; | 4342 | s32 tmp; |
4343 | 4343 | ||
4344 | for (i=0; i<16; i++) { | 4344 | for (i=0; i<16; i++) { |
4345 | sendto_srom(command | DT_CLK, addr); | 4345 | sendto_srom(command | DT_CLK, addr); |
4346 | tmp = getfrom_srom(addr); | 4346 | tmp = getfrom_srom(addr); |
4347 | sendto_srom(command, addr); | 4347 | sendto_srom(command, addr); |
4348 | 4348 | ||
4349 | word = (word << 1) | ((tmp >> 3) & 0x01); | 4349 | word = (word << 1) | ((tmp >> 3) & 0x01); |
4350 | } | 4350 | } |
4351 | 4351 | ||
4352 | sendto_srom(command & 0x0000ff00, addr); | 4352 | sendto_srom(command & 0x0000ff00, addr); |
4353 | 4353 | ||
4354 | return word; | 4354 | return word; |
4355 | } | 4355 | } |
4356 | 4356 | ||
@@ -4359,13 +4359,13 @@ static void | |||
4359 | srom_busy(u_int command, u_long addr) | 4359 | srom_busy(u_int command, u_long addr) |
4360 | { | 4360 | { |
4361 | sendto_srom((command & 0x0000ff00) | DT_CS, addr); | 4361 | sendto_srom((command & 0x0000ff00) | DT_CS, addr); |
4362 | 4362 | ||
4363 | while (!((getfrom_srom(addr) >> 3) & 0x01)) { | 4363 | while (!((getfrom_srom(addr) >> 3) & 0x01)) { |
4364 | mdelay(1); | 4364 | mdelay(1); |
4365 | } | 4365 | } |
4366 | 4366 | ||
4367 | sendto_srom(command & 0x0000ff00, addr); | 4367 | sendto_srom(command & 0x0000ff00, addr); |
4368 | 4368 | ||
4369 | return; | 4369 | return; |
4370 | } | 4370 | } |
4371 | */ | 4371 | */ |
@@ -4375,7 +4375,7 @@ sendto_srom(u_int command, u_long addr) | |||
4375 | { | 4375 | { |
4376 | outl(command, addr); | 4376 | outl(command, addr); |
4377 | udelay(1); | 4377 | udelay(1); |
4378 | 4378 | ||
4379 | return; | 4379 | return; |
4380 | } | 4380 | } |
4381 | 4381 | ||
@@ -4383,10 +4383,10 @@ static int | |||
4383 | getfrom_srom(u_long addr) | 4383 | getfrom_srom(u_long addr) |
4384 | { | 4384 | { |
4385 | s32 tmp; | 4385 | s32 tmp; |
4386 | 4386 | ||
4387 | tmp = inl(addr); | 4387 | tmp = inl(addr); |
4388 | udelay(1); | 4388 | udelay(1); |
4389 | 4389 | ||
4390 | return tmp; | 4390 | return tmp; |
4391 | } | 4391 | } |
4392 | 4392 | ||
@@ -4403,7 +4403,7 @@ srom_infoleaf_info(struct net_device *dev) | |||
4403 | } | 4403 | } |
4404 | if (i == INFOLEAF_SIZE) { | 4404 | if (i == INFOLEAF_SIZE) { |
4405 | lp->useSROM = FALSE; | 4405 | lp->useSROM = FALSE; |
4406 | printk("%s: Cannot find correct chipset for SROM decoding!\n", | 4406 | printk("%s: Cannot find correct chipset for SROM decoding!\n", |
4407 | dev->name); | 4407 | dev->name); |
4408 | return -ENXIO; | 4408 | return -ENXIO; |
4409 | } | 4409 | } |
@@ -4420,7 +4420,7 @@ srom_infoleaf_info(struct net_device *dev) | |||
4420 | } | 4420 | } |
4421 | if (i == 0) { | 4421 | if (i == 0) { |
4422 | lp->useSROM = FALSE; | 4422 | lp->useSROM = FALSE; |
4423 | printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n", | 4423 | printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n", |
4424 | dev->name, lp->device); | 4424 | dev->name, lp->device); |
4425 | return -ENXIO; | 4425 | return -ENXIO; |
4426 | } | 4426 | } |
@@ -4494,9 +4494,9 @@ srom_exec(struct net_device *dev, u_char *p) | |||
4494 | if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return; | 4494 | if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return; |
4495 | 4495 | ||
4496 | if (lp->chipset != DC21140) RESET_SIA; | 4496 | if (lp->chipset != DC21140) RESET_SIA; |
4497 | 4497 | ||
4498 | while (count--) { | 4498 | while (count--) { |
4499 | gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? | 4499 | gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? |
4500 | *p++ : TWIDDLE(w++)), dev); | 4500 | *p++ : TWIDDLE(w++)), dev); |
4501 | mdelay(2); /* 2ms per action */ | 4501 | mdelay(2); /* 2ms per action */ |
4502 | } | 4502 | } |
@@ -4514,13 +4514,13 @@ srom_exec(struct net_device *dev, u_char *p) | |||
4514 | ** unless I implement the DC21041 SROM functions. There's no need | 4514 | ** unless I implement the DC21041 SROM functions. There's no need |
4515 | ** since the existing code will be satisfactory for all boards. | 4515 | ** since the existing code will be satisfactory for all boards. |
4516 | */ | 4516 | */ |
4517 | static int | 4517 | static int |
4518 | dc21041_infoleaf(struct net_device *dev) | 4518 | dc21041_infoleaf(struct net_device *dev) |
4519 | { | 4519 | { |
4520 | return DE4X5_AUTOSENSE_MS; | 4520 | return DE4X5_AUTOSENSE_MS; |
4521 | } | 4521 | } |
4522 | 4522 | ||
4523 | static int | 4523 | static int |
4524 | dc21140_infoleaf(struct net_device *dev) | 4524 | dc21140_infoleaf(struct net_device *dev) |
4525 | { | 4525 | { |
4526 | struct de4x5_private *lp = netdev_priv(dev); | 4526 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -4558,7 +4558,7 @@ dc21140_infoleaf(struct net_device *dev) | |||
4558 | return next_tick & ~TIMER_CB; | 4558 | return next_tick & ~TIMER_CB; |
4559 | } | 4559 | } |
4560 | 4560 | ||
4561 | static int | 4561 | static int |
4562 | dc21142_infoleaf(struct net_device *dev) | 4562 | dc21142_infoleaf(struct net_device *dev) |
4563 | { | 4563 | { |
4564 | struct de4x5_private *lp = netdev_priv(dev); | 4564 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -4593,7 +4593,7 @@ dc21142_infoleaf(struct net_device *dev) | |||
4593 | return next_tick & ~TIMER_CB; | 4593 | return next_tick & ~TIMER_CB; |
4594 | } | 4594 | } |
4595 | 4595 | ||
4596 | static int | 4596 | static int |
4597 | dc21143_infoleaf(struct net_device *dev) | 4597 | dc21143_infoleaf(struct net_device *dev) |
4598 | { | 4598 | { |
4599 | struct de4x5_private *lp = netdev_priv(dev); | 4599 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -4631,7 +4631,7 @@ dc21143_infoleaf(struct net_device *dev) | |||
4631 | ** The compact infoblock is only designed for DC21140[A] chips, so | 4631 | ** The compact infoblock is only designed for DC21140[A] chips, so |
4632 | ** we'll reuse the dc21140m_autoconf function. Non MII media only. | 4632 | ** we'll reuse the dc21140m_autoconf function. Non MII media only. |
4633 | */ | 4633 | */ |
4634 | static int | 4634 | static int |
4635 | compact_infoblock(struct net_device *dev, u_char count, u_char *p) | 4635 | compact_infoblock(struct net_device *dev, u_char count, u_char *p) |
4636 | { | 4636 | { |
4637 | struct de4x5_private *lp = netdev_priv(dev); | 4637 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -4671,7 +4671,7 @@ compact_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4671 | /* | 4671 | /* |
4672 | ** This block describes non MII media for the DC21140[A] only. | 4672 | ** This block describes non MII media for the DC21140[A] only. |
4673 | */ | 4673 | */ |
4674 | static int | 4674 | static int |
4675 | type0_infoblock(struct net_device *dev, u_char count, u_char *p) | 4675 | type0_infoblock(struct net_device *dev, u_char count, u_char *p) |
4676 | { | 4676 | { |
4677 | struct de4x5_private *lp = netdev_priv(dev); | 4677 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -4711,7 +4711,7 @@ type0_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4711 | 4711 | ||
4712 | /* These functions are under construction! */ | 4712 | /* These functions are under construction! */ |
4713 | 4713 | ||
4714 | static int | 4714 | static int |
4715 | type1_infoblock(struct net_device *dev, u_char count, u_char *p) | 4715 | type1_infoblock(struct net_device *dev, u_char count, u_char *p) |
4716 | { | 4716 | { |
4717 | struct de4x5_private *lp = netdev_priv(dev); | 4717 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -4750,7 +4750,7 @@ type1_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4750 | return dc21140m_autoconf(dev); | 4750 | return dc21140m_autoconf(dev); |
4751 | } | 4751 | } |
4752 | 4752 | ||
4753 | static int | 4753 | static int |
4754 | type2_infoblock(struct net_device *dev, u_char count, u_char *p) | 4754 | type2_infoblock(struct net_device *dev, u_char count, u_char *p) |
4755 | { | 4755 | { |
4756 | struct de4x5_private *lp = netdev_priv(dev); | 4756 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -4791,7 +4791,7 @@ type2_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4791 | return dc2114x_autoconf(dev); | 4791 | return dc2114x_autoconf(dev); |
4792 | } | 4792 | } |
4793 | 4793 | ||
4794 | static int | 4794 | static int |
4795 | type3_infoblock(struct net_device *dev, u_char count, u_char *p) | 4795 | type3_infoblock(struct net_device *dev, u_char count, u_char *p) |
4796 | { | 4796 | { |
4797 | struct de4x5_private *lp = netdev_priv(dev); | 4797 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -4833,7 +4833,7 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4833 | return dc2114x_autoconf(dev); | 4833 | return dc2114x_autoconf(dev); |
4834 | } | 4834 | } |
4835 | 4835 | ||
4836 | static int | 4836 | static int |
4837 | type4_infoblock(struct net_device *dev, u_char count, u_char *p) | 4837 | type4_infoblock(struct net_device *dev, u_char count, u_char *p) |
4838 | { | 4838 | { |
4839 | struct de4x5_private *lp = netdev_priv(dev); | 4839 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -4878,7 +4878,7 @@ type4_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4878 | ** This block type provides information for resetting external devices | 4878 | ** This block type provides information for resetting external devices |
4879 | ** (chips) through the General Purpose Register. | 4879 | ** (chips) through the General Purpose Register. |
4880 | */ | 4880 | */ |
4881 | static int | 4881 | static int |
4882 | type5_infoblock(struct net_device *dev, u_char count, u_char *p) | 4882 | type5_infoblock(struct net_device *dev, u_char count, u_char *p) |
4883 | { | 4883 | { |
4884 | struct de4x5_private *lp = netdev_priv(dev); | 4884 | struct de4x5_private *lp = netdev_priv(dev); |
@@ -4916,7 +4916,7 @@ mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr) | |||
4916 | mii_address(phyaddr, ioaddr); /* PHY address to be accessed */ | 4916 | mii_address(phyaddr, ioaddr); /* PHY address to be accessed */ |
4917 | mii_address(phyreg, ioaddr); /* PHY Register to read */ | 4917 | mii_address(phyreg, ioaddr); /* PHY Register to read */ |
4918 | mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */ | 4918 | mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */ |
4919 | 4919 | ||
4920 | return mii_rdata(ioaddr); /* Read data */ | 4920 | return mii_rdata(ioaddr); /* Read data */ |
4921 | } | 4921 | } |
4922 | 4922 | ||
@@ -4931,7 +4931,7 @@ mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr) | |||
4931 | mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */ | 4931 | mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */ |
4932 | data = mii_swap(data, 16); /* Swap data bit ordering */ | 4932 | data = mii_swap(data, 16); /* Swap data bit ordering */ |
4933 | mii_wdata(data, 16, ioaddr); /* Write data */ | 4933 | mii_wdata(data, 16, ioaddr); /* Write data */ |
4934 | 4934 | ||
4935 | return; | 4935 | return; |
4936 | } | 4936 | } |
4937 | 4937 | ||
@@ -4940,12 +4940,12 @@ mii_rdata(u_long ioaddr) | |||
4940 | { | 4940 | { |
4941 | int i; | 4941 | int i; |
4942 | s32 tmp = 0; | 4942 | s32 tmp = 0; |
4943 | 4943 | ||
4944 | for (i=0; i<16; i++) { | 4944 | for (i=0; i<16; i++) { |
4945 | tmp <<= 1; | 4945 | tmp <<= 1; |
4946 | tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr); | 4946 | tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr); |
4947 | } | 4947 | } |
4948 | 4948 | ||
4949 | return tmp; | 4949 | return tmp; |
4950 | } | 4950 | } |
4951 | 4951 | ||
@@ -4953,12 +4953,12 @@ static void | |||
4953 | mii_wdata(int data, int len, u_long ioaddr) | 4953 | mii_wdata(int data, int len, u_long ioaddr) |
4954 | { | 4954 | { |
4955 | int i; | 4955 | int i; |
4956 | 4956 | ||
4957 | for (i=0; i<len; i++) { | 4957 | for (i=0; i<len; i++) { |
4958 | sendto_mii(MII_MWR | MII_WR, data, ioaddr); | 4958 | sendto_mii(MII_MWR | MII_WR, data, ioaddr); |
4959 | data >>= 1; | 4959 | data >>= 1; |
4960 | } | 4960 | } |
4961 | 4961 | ||
4962 | return; | 4962 | return; |
4963 | } | 4963 | } |
4964 | 4964 | ||
@@ -4966,13 +4966,13 @@ static void | |||
4966 | mii_address(u_char addr, u_long ioaddr) | 4966 | mii_address(u_char addr, u_long ioaddr) |
4967 | { | 4967 | { |
4968 | int i; | 4968 | int i; |
4969 | 4969 | ||
4970 | addr = mii_swap(addr, 5); | 4970 | addr = mii_swap(addr, 5); |
4971 | for (i=0; i<5; i++) { | 4971 | for (i=0; i<5; i++) { |
4972 | sendto_mii(MII_MWR | MII_WR, addr, ioaddr); | 4972 | sendto_mii(MII_MWR | MII_WR, addr, ioaddr); |
4973 | addr >>= 1; | 4973 | addr >>= 1; |
4974 | } | 4974 | } |
4975 | 4975 | ||
4976 | return; | 4976 | return; |
4977 | } | 4977 | } |
4978 | 4978 | ||
@@ -4980,12 +4980,12 @@ static void | |||
4980 | mii_ta(u_long rw, u_long ioaddr) | 4980 | mii_ta(u_long rw, u_long ioaddr) |
4981 | { | 4981 | { |
4982 | if (rw == MII_STWR) { | 4982 | if (rw == MII_STWR) { |
4983 | sendto_mii(MII_MWR | MII_WR, 1, ioaddr); | 4983 | sendto_mii(MII_MWR | MII_WR, 1, ioaddr); |
4984 | sendto_mii(MII_MWR | MII_WR, 0, ioaddr); | 4984 | sendto_mii(MII_MWR | MII_WR, 0, ioaddr); |
4985 | } else { | 4985 | } else { |
4986 | getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */ | 4986 | getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */ |
4987 | } | 4987 | } |
4988 | 4988 | ||
4989 | return; | 4989 | return; |
4990 | } | 4990 | } |
4991 | 4991 | ||
@@ -4993,13 +4993,13 @@ static int | |||
4993 | mii_swap(int data, int len) | 4993 | mii_swap(int data, int len) |
4994 | { | 4994 | { |
4995 | int i, tmp = 0; | 4995 | int i, tmp = 0; |
4996 | 4996 | ||
4997 | for (i=0; i<len; i++) { | 4997 | for (i=0; i<len; i++) { |
4998 | tmp <<= 1; | 4998 | tmp <<= 1; |
4999 | tmp |= (data & 1); | 4999 | tmp |= (data & 1); |
5000 | data >>= 1; | 5000 | data >>= 1; |
5001 | } | 5001 | } |
5002 | 5002 | ||
5003 | return tmp; | 5003 | return tmp; |
5004 | } | 5004 | } |
5005 | 5005 | ||
@@ -5007,13 +5007,13 @@ static void | |||
5007 | sendto_mii(u32 command, int data, u_long ioaddr) | 5007 | sendto_mii(u32 command, int data, u_long ioaddr) |
5008 | { | 5008 | { |
5009 | u32 j; | 5009 | u32 j; |
5010 | 5010 | ||
5011 | j = (data & 1) << 17; | 5011 | j = (data & 1) << 17; |
5012 | outl(command | j, ioaddr); | 5012 | outl(command | j, ioaddr); |
5013 | udelay(1); | 5013 | udelay(1); |
5014 | outl(command | MII_MDC | j, ioaddr); | 5014 | outl(command | MII_MDC | j, ioaddr); |
5015 | udelay(1); | 5015 | udelay(1); |
5016 | 5016 | ||
5017 | return; | 5017 | return; |
5018 | } | 5018 | } |
5019 | 5019 | ||
@@ -5024,7 +5024,7 @@ getfrom_mii(u32 command, u_long ioaddr) | |||
5024 | udelay(1); | 5024 | udelay(1); |
5025 | outl(command | MII_MDC, ioaddr); | 5025 | outl(command | MII_MDC, ioaddr); |
5026 | udelay(1); | 5026 | udelay(1); |
5027 | 5027 | ||
5028 | return ((inl(ioaddr) >> 19) & 1); | 5028 | return ((inl(ioaddr) >> 19) & 1); |
5029 | } | 5029 | } |
5030 | 5030 | ||
@@ -5085,7 +5085,7 @@ mii_get_phy(struct net_device *dev) | |||
5085 | u_long iobase = dev->base_addr; | 5085 | u_long iobase = dev->base_addr; |
5086 | int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table); | 5086 | int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table); |
5087 | int id; | 5087 | int id; |
5088 | 5088 | ||
5089 | lp->active = 0; | 5089 | lp->active = 0; |
5090 | lp->useMII = TRUE; | 5090 | lp->useMII = TRUE; |
5091 | 5091 | ||
@@ -5094,7 +5094,7 @@ mii_get_phy(struct net_device *dev) | |||
5094 | lp->phy[lp->active].addr = i; | 5094 | lp->phy[lp->active].addr = i; |
5095 | if (i==0) n++; /* Count cycles */ | 5095 | if (i==0) n++; /* Count cycles */ |
5096 | while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */ | 5096 | while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */ |
5097 | id = mii_get_oui(i, DE4X5_MII); | 5097 | id = mii_get_oui(i, DE4X5_MII); |
5098 | if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ | 5098 | if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ |
5099 | for (j=0; j<limit; j++) { /* Search PHY table */ | 5099 | for (j=0; j<limit; j++) { /* Search PHY table */ |
5100 | if (id != phy_info[j].id) continue; /* ID match? */ | 5100 | if (id != phy_info[j].id) continue; /* ID match? */ |
@@ -5133,7 +5133,7 @@ mii_get_phy(struct net_device *dev) | |||
5133 | for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/ | 5133 | for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/ |
5134 | mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); | 5134 | mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); |
5135 | while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); | 5135 | while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); |
5136 | 5136 | ||
5137 | de4x5_dbg_mii(dev, k); | 5137 | de4x5_dbg_mii(dev, k); |
5138 | } | 5138 | } |
5139 | } | 5139 | } |
@@ -5148,12 +5148,12 @@ build_setup_frame(struct net_device *dev, int mode) | |||
5148 | struct de4x5_private *lp = netdev_priv(dev); | 5148 | struct de4x5_private *lp = netdev_priv(dev); |
5149 | int i; | 5149 | int i; |
5150 | char *pa = lp->setup_frame; | 5150 | char *pa = lp->setup_frame; |
5151 | 5151 | ||
5152 | /* Initialise the setup frame */ | 5152 | /* Initialise the setup frame */ |
5153 | if (mode == ALL) { | 5153 | if (mode == ALL) { |
5154 | memset(lp->setup_frame, 0, SETUP_FRAME_LEN); | 5154 | memset(lp->setup_frame, 0, SETUP_FRAME_LEN); |
5155 | } | 5155 | } |
5156 | 5156 | ||
5157 | if (lp->setup_f == HASH_PERF) { | 5157 | if (lp->setup_f == HASH_PERF) { |
5158 | for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) { | 5158 | for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) { |
5159 | *(pa + i) = dev->dev_addr[i]; /* Host address */ | 5159 | *(pa + i) = dev->dev_addr[i]; /* Host address */ |
@@ -5170,7 +5170,7 @@ build_setup_frame(struct net_device *dev, int mode) | |||
5170 | if (i & 0x01) pa += 4; | 5170 | if (i & 0x01) pa += 4; |
5171 | } | 5171 | } |
5172 | } | 5172 | } |
5173 | 5173 | ||
5174 | return pa; /* Points to the next entry */ | 5174 | return pa; /* Points to the next entry */ |
5175 | } | 5175 | } |
5176 | 5176 | ||
@@ -5178,7 +5178,7 @@ static void | |||
5178 | enable_ast(struct net_device *dev, u32 time_out) | 5178 | enable_ast(struct net_device *dev, u32 time_out) |
5179 | { | 5179 | { |
5180 | timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out); | 5180 | timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out); |
5181 | 5181 | ||
5182 | return; | 5182 | return; |
5183 | } | 5183 | } |
5184 | 5184 | ||
@@ -5186,9 +5186,9 @@ static void | |||
5186 | disable_ast(struct net_device *dev) | 5186 | disable_ast(struct net_device *dev) |
5187 | { | 5187 | { |
5188 | struct de4x5_private *lp = netdev_priv(dev); | 5188 | struct de4x5_private *lp = netdev_priv(dev); |
5189 | 5189 | ||
5190 | del_timer(&lp->timer); | 5190 | del_timer(&lp->timer); |
5191 | 5191 | ||
5192 | return; | 5192 | return; |
5193 | } | 5193 | } |
5194 | 5194 | ||
@@ -5207,10 +5207,10 @@ de4x5_switch_mac_port(struct net_device *dev) | |||
5207 | omr |= lp->infoblock_csr6; | 5207 | omr |= lp->infoblock_csr6; |
5208 | if (omr & OMR_PS) omr |= OMR_HBD; | 5208 | if (omr & OMR_PS) omr |= OMR_HBD; |
5209 | outl(omr, DE4X5_OMR); | 5209 | outl(omr, DE4X5_OMR); |
5210 | 5210 | ||
5211 | /* Soft Reset */ | 5211 | /* Soft Reset */ |
5212 | RESET_DE4X5; | 5212 | RESET_DE4X5; |
5213 | 5213 | ||
5214 | /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */ | 5214 | /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */ |
5215 | if (lp->chipset == DC21140) { | 5215 | if (lp->chipset == DC21140) { |
5216 | gep_wr(lp->cache.gepc, dev); | 5216 | gep_wr(lp->cache.gepc, dev); |
@@ -5263,21 +5263,21 @@ timeout(struct net_device *dev, void (*fn)(u_long data), u_long data, u_long mse | |||
5263 | { | 5263 | { |
5264 | struct de4x5_private *lp = netdev_priv(dev); | 5264 | struct de4x5_private *lp = netdev_priv(dev); |
5265 | int dt; | 5265 | int dt; |
5266 | 5266 | ||
5267 | /* First, cancel any pending timer events */ | 5267 | /* First, cancel any pending timer events */ |
5268 | del_timer(&lp->timer); | 5268 | del_timer(&lp->timer); |
5269 | 5269 | ||
5270 | /* Convert msec to ticks */ | 5270 | /* Convert msec to ticks */ |
5271 | dt = (msec * HZ) / 1000; | 5271 | dt = (msec * HZ) / 1000; |
5272 | if (dt==0) dt=1; | 5272 | if (dt==0) dt=1; |
5273 | 5273 | ||
5274 | /* Set up timer */ | 5274 | /* Set up timer */ |
5275 | init_timer(&lp->timer); | 5275 | init_timer(&lp->timer); |
5276 | lp->timer.expires = jiffies + dt; | 5276 | lp->timer.expires = jiffies + dt; |
5277 | lp->timer.function = fn; | 5277 | lp->timer.function = fn; |
5278 | lp->timer.data = data; | 5278 | lp->timer.data = data; |
5279 | add_timer(&lp->timer); | 5279 | add_timer(&lp->timer); |
5280 | 5280 | ||
5281 | return; | 5281 | return; |
5282 | } | 5282 | } |
5283 | 5283 | ||
@@ -5375,7 +5375,7 @@ de4x5_dbg_open(struct net_device *dev) | |||
5375 | { | 5375 | { |
5376 | struct de4x5_private *lp = netdev_priv(dev); | 5376 | struct de4x5_private *lp = netdev_priv(dev); |
5377 | int i; | 5377 | int i; |
5378 | 5378 | ||
5379 | if (de4x5_debug & DEBUG_OPEN) { | 5379 | if (de4x5_debug & DEBUG_OPEN) { |
5380 | printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq); | 5380 | printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq); |
5381 | printk("\tphysical address: "); | 5381 | printk("\tphysical address: "); |
@@ -5413,11 +5413,11 @@ de4x5_dbg_open(struct net_device *dev) | |||
5413 | } | 5413 | } |
5414 | } | 5414 | } |
5415 | printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf)); | 5415 | printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf)); |
5416 | printk("Ring size: \nRX: %d\nTX: %d\n", | 5416 | printk("Ring size: \nRX: %d\nTX: %d\n", |
5417 | (short)lp->rxRingSize, | 5417 | (short)lp->rxRingSize, |
5418 | (short)lp->txRingSize); | 5418 | (short)lp->txRingSize); |
5419 | } | 5419 | } |
5420 | 5420 | ||
5421 | return; | 5421 | return; |
5422 | } | 5422 | } |
5423 | 5423 | ||
@@ -5426,7 +5426,7 @@ de4x5_dbg_mii(struct net_device *dev, int k) | |||
5426 | { | 5426 | { |
5427 | struct de4x5_private *lp = netdev_priv(dev); | 5427 | struct de4x5_private *lp = netdev_priv(dev); |
5428 | u_long iobase = dev->base_addr; | 5428 | u_long iobase = dev->base_addr; |
5429 | 5429 | ||
5430 | if (de4x5_debug & DEBUG_MII) { | 5430 | if (de4x5_debug & DEBUG_MII) { |
5431 | printk("\nMII device address: %d\n", lp->phy[k].addr); | 5431 | printk("\nMII device address: %d\n", lp->phy[k].addr); |
5432 | printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII)); | 5432 | printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII)); |
@@ -5445,7 +5445,7 @@ de4x5_dbg_mii(struct net_device *dev, int k) | |||
5445 | printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII)); | 5445 | printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII)); |
5446 | } | 5446 | } |
5447 | } | 5447 | } |
5448 | 5448 | ||
5449 | return; | 5449 | return; |
5450 | } | 5450 | } |
5451 | 5451 | ||
@@ -5453,17 +5453,17 @@ static void | |||
5453 | de4x5_dbg_media(struct net_device *dev) | 5453 | de4x5_dbg_media(struct net_device *dev) |
5454 | { | 5454 | { |
5455 | struct de4x5_private *lp = netdev_priv(dev); | 5455 | struct de4x5_private *lp = netdev_priv(dev); |
5456 | 5456 | ||
5457 | if (lp->media != lp->c_media) { | 5457 | if (lp->media != lp->c_media) { |
5458 | if (de4x5_debug & DEBUG_MEDIA) { | 5458 | if (de4x5_debug & DEBUG_MEDIA) { |
5459 | printk("%s: media is %s%s\n", dev->name, | 5459 | printk("%s: media is %s%s\n", dev->name, |
5460 | (lp->media == NC ? "unconnected, link down or incompatible connection" : | 5460 | (lp->media == NC ? "unconnected, link down or incompatible connection" : |
5461 | (lp->media == TP ? "TP" : | 5461 | (lp->media == TP ? "TP" : |
5462 | (lp->media == ANS ? "TP/Nway" : | 5462 | (lp->media == ANS ? "TP/Nway" : |
5463 | (lp->media == BNC ? "BNC" : | 5463 | (lp->media == BNC ? "BNC" : |
5464 | (lp->media == AUI ? "AUI" : | 5464 | (lp->media == AUI ? "AUI" : |
5465 | (lp->media == BNC_AUI ? "BNC/AUI" : | 5465 | (lp->media == BNC_AUI ? "BNC/AUI" : |
5466 | (lp->media == EXT_SIA ? "EXT SIA" : | 5466 | (lp->media == EXT_SIA ? "EXT SIA" : |
5467 | (lp->media == _100Mb ? "100Mb/s" : | 5467 | (lp->media == _100Mb ? "100Mb/s" : |
5468 | (lp->media == _10Mb ? "10Mb/s" : | 5468 | (lp->media == _10Mb ? "10Mb/s" : |
5469 | "???" | 5469 | "???" |
@@ -5471,7 +5471,7 @@ de4x5_dbg_media(struct net_device *dev) | |||
5471 | } | 5471 | } |
5472 | lp->c_media = lp->media; | 5472 | lp->c_media = lp->media; |
5473 | } | 5473 | } |
5474 | 5474 | ||
5475 | return; | 5475 | return; |
5476 | } | 5476 | } |
5477 | 5477 | ||
@@ -5554,7 +5554,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5554 | u32 lval[36]; | 5554 | u32 lval[36]; |
5555 | } tmp; | 5555 | } tmp; |
5556 | u_long flags = 0; | 5556 | u_long flags = 0; |
5557 | 5557 | ||
5558 | switch(ioc->cmd) { | 5558 | switch(ioc->cmd) { |
5559 | case DE4X5_GET_HWADDR: /* Get the hardware address */ | 5559 | case DE4X5_GET_HWADDR: /* Get the hardware address */ |
5560 | ioc->len = ETH_ALEN; | 5560 | ioc->len = ETH_ALEN; |
@@ -5575,7 +5575,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5575 | } | 5575 | } |
5576 | build_setup_frame(dev, PHYS_ADDR_ONLY); | 5576 | build_setup_frame(dev, PHYS_ADDR_ONLY); |
5577 | /* Set up the descriptor and give ownership to the card */ | 5577 | /* Set up the descriptor and give ownership to the card */ |
5578 | load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | | 5578 | load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | |
5579 | SETUP_FRAME_LEN, (struct sk_buff *)1); | 5579 | SETUP_FRAME_LEN, (struct sk_buff *)1); |
5580 | lp->tx_new = (++lp->tx_new) % lp->txRingSize; | 5580 | lp->tx_new = (++lp->tx_new) % lp->txRingSize; |
5581 | outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ | 5581 | outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ |
@@ -5617,8 +5617,8 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5617 | spin_lock_irqsave(&lp->lock, flags); | 5617 | spin_lock_irqsave(&lp->lock, flags); |
5618 | memcpy(&statbuf, &lp->pktStats, ioc->len); | 5618 | memcpy(&statbuf, &lp->pktStats, ioc->len); |
5619 | spin_unlock_irqrestore(&lp->lock, flags); | 5619 | spin_unlock_irqrestore(&lp->lock, flags); |
5620 | if (copy_to_user(ioc->data, &statbuf, ioc->len)) | 5620 | if (copy_to_user(ioc->data, &statbuf, ioc->len)) |
5621 | return -EFAULT; | 5621 | return -EFAULT; |
5622 | break; | 5622 | break; |
5623 | } | 5623 | } |
5624 | case DE4X5_CLR_STATS: /* Zero out the driver statistics */ | 5624 | case DE4X5_CLR_STATS: /* Zero out the driver statistics */ |
@@ -5652,9 +5652,9 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5652 | ioc->len = j; | 5652 | ioc->len = j; |
5653 | if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; | 5653 | if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; |
5654 | break; | 5654 | break; |
5655 | 5655 | ||
5656 | #define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */ | 5656 | #define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */ |
5657 | /* | 5657 | /* |
5658 | case DE4X5_DUMP: | 5658 | case DE4X5_DUMP: |
5659 | j = 0; | 5659 | j = 0; |
5660 | tmp.addr[j++] = dev->irq; | 5660 | tmp.addr[j++] = dev->irq; |
@@ -5664,7 +5664,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5664 | tmp.addr[j++] = lp->rxRingSize; | 5664 | tmp.addr[j++] = lp->rxRingSize; |
5665 | tmp.lval[j>>2] = (long)lp->rx_ring; j+=4; | 5665 | tmp.lval[j>>2] = (long)lp->rx_ring; j+=4; |
5666 | tmp.lval[j>>2] = (long)lp->tx_ring; j+=4; | 5666 | tmp.lval[j>>2] = (long)lp->tx_ring; j+=4; |
5667 | 5667 | ||
5668 | for (i=0;i<lp->rxRingSize-1;i++){ | 5668 | for (i=0;i<lp->rxRingSize-1;i++){ |
5669 | if (i < 3) { | 5669 | if (i < 3) { |
5670 | tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4; | 5670 | tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4; |
@@ -5677,7 +5677,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5677 | } | 5677 | } |
5678 | } | 5678 | } |
5679 | tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4; | 5679 | tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4; |
5680 | 5680 | ||
5681 | for (i=0;i<lp->rxRingSize-1;i++){ | 5681 | for (i=0;i<lp->rxRingSize-1;i++){ |
5682 | if (i < 3) { | 5682 | if (i < 3) { |
5683 | tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4; | 5683 | tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4; |
@@ -5690,14 +5690,14 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5690 | } | 5690 | } |
5691 | } | 5691 | } |
5692 | tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4; | 5692 | tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4; |
5693 | 5693 | ||
5694 | for (i=0;i<lp->rxRingSize;i++){ | 5694 | for (i=0;i<lp->rxRingSize;i++){ |
5695 | tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4; | 5695 | tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4; |
5696 | } | 5696 | } |
5697 | for (i=0;i<lp->txRingSize;i++){ | 5697 | for (i=0;i<lp->txRingSize;i++){ |
5698 | tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4; | 5698 | tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4; |
5699 | } | 5699 | } |
5700 | 5700 | ||
5701 | tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4; | 5701 | tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4; |
5702 | tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4; | 5702 | tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4; |
5703 | tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4; | 5703 | tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4; |
@@ -5706,18 +5706,18 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5706 | tmp.lval[j>>2] = inl(DE4X5_STS); j+=4; | 5706 | tmp.lval[j>>2] = inl(DE4X5_STS); j+=4; |
5707 | tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4; | 5707 | tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4; |
5708 | tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4; | 5708 | tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4; |
5709 | tmp.lval[j>>2] = lp->chipset; j+=4; | 5709 | tmp.lval[j>>2] = lp->chipset; j+=4; |
5710 | if (lp->chipset == DC21140) { | 5710 | if (lp->chipset == DC21140) { |
5711 | tmp.lval[j>>2] = gep_rd(dev); j+=4; | 5711 | tmp.lval[j>>2] = gep_rd(dev); j+=4; |
5712 | } else { | 5712 | } else { |
5713 | tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4; | 5713 | tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4; |
5714 | tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4; | 5714 | tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4; |
5715 | tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4; | 5715 | tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4; |
5716 | tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4; | 5716 | tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4; |
5717 | } | 5717 | } |
5718 | tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4; | 5718 | tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4; |
5719 | if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { | 5719 | if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { |
5720 | tmp.lval[j>>2] = lp->active; j+=4; | 5720 | tmp.lval[j>>2] = lp->active; j+=4; |
5721 | tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | 5721 | tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; |
5722 | tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | 5722 | tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; |
5723 | tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | 5723 | tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4; |
@@ -5734,10 +5734,10 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5734 | tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | 5734 | tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4; |
5735 | } | 5735 | } |
5736 | } | 5736 | } |
5737 | 5737 | ||
5738 | tmp.addr[j++] = lp->txRingSize; | 5738 | tmp.addr[j++] = lp->txRingSize; |
5739 | tmp.addr[j++] = netif_queue_stopped(dev); | 5739 | tmp.addr[j++] = netif_queue_stopped(dev); |
5740 | 5740 | ||
5741 | ioc->len = j; | 5741 | ioc->len = j; |
5742 | if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; | 5742 | if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; |
5743 | break; | 5743 | break; |
@@ -5746,7 +5746,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5746 | default: | 5746 | default: |
5747 | return -EOPNOTSUPP; | 5747 | return -EOPNOTSUPP; |
5748 | } | 5748 | } |
5749 | 5749 | ||
5750 | return status; | 5750 | return status; |
5751 | } | 5751 | } |
5752 | 5752 | ||
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h index ad37a4074302..57226e5eb8a6 100644 --- a/drivers/net/tulip/de4x5.h +++ b/drivers/net/tulip/de4x5.h | |||
@@ -38,11 +38,11 @@ | |||
38 | /* | 38 | /* |
39 | ** EISA Register Address Map | 39 | ** EISA Register Address Map |
40 | */ | 40 | */ |
41 | #define EISA_ID iobase+0x0c80 /* EISA ID Registers */ | 41 | #define EISA_ID iobase+0x0c80 /* EISA ID Registers */ |
42 | #define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */ | 42 | #define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */ |
43 | #define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */ | 43 | #define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */ |
44 | #define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */ | 44 | #define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */ |
45 | #define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */ | 45 | #define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */ |
46 | #define EISA_CR iobase+0x0c84 /* EISA Control Register */ | 46 | #define EISA_CR iobase+0x0c84 /* EISA Control Register */ |
47 | #define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */ | 47 | #define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */ |
48 | #define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */ | 48 | #define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */ |
@@ -1008,8 +1008,8 @@ struct de4x5_ioctl { | |||
1008 | unsigned char __user *data; /* Pointer to the data buffer */ | 1008 | unsigned char __user *data; /* Pointer to the data buffer */ |
1009 | }; | 1009 | }; |
1010 | 1010 | ||
1011 | /* | 1011 | /* |
1012 | ** Recognised commands for the driver | 1012 | ** Recognised commands for the driver |
1013 | */ | 1013 | */ |
1014 | #define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */ | 1014 | #define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */ |
1015 | #define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */ | 1015 | #define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */ |
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index 74e9075d9c48..ba5b112093f4 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c | |||
@@ -50,7 +50,7 @@ | |||
50 | forget to unmap PCI mapped skbs. | 50 | forget to unmap PCI mapped skbs. |
51 | 51 | ||
52 | Alan Cox <alan@redhat.com> | 52 | Alan Cox <alan@redhat.com> |
53 | Added new PCI identifiers provided by Clear Zhang at ALi | 53 | Added new PCI identifiers provided by Clear Zhang at ALi |
54 | for their 1563 ethernet device. | 54 | for their 1563 ethernet device. |
55 | 55 | ||
56 | TODO | 56 | TODO |
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c index fbd9ab60b052..5ffbd5b300c0 100644 --- a/drivers/net/tulip/eeprom.c +++ b/drivers/net/tulip/eeprom.c | |||
@@ -96,11 +96,11 @@ static const char *block_name[] __devinitdata = { | |||
96 | * tulip_build_fake_mediatable - Build a fake mediatable entry. | 96 | * tulip_build_fake_mediatable - Build a fake mediatable entry. |
97 | * @tp: Ptr to the tulip private data. | 97 | * @tp: Ptr to the tulip private data. |
98 | * | 98 | * |
99 | * Some cards like the 3x5 HSC cards (J3514A) do not have a standard | 99 | * Some cards like the 3x5 HSC cards (J3514A) do not have a standard |
100 | * srom and can not be handled under the fixup routine. These cards | 100 | * srom and can not be handled under the fixup routine. These cards |
101 | * still need a valid mediatable entry for correct csr12 setup and | 101 | * still need a valid mediatable entry for correct csr12 setup and |
102 | * mii handling. | 102 | * mii handling. |
103 | * | 103 | * |
104 | * Since this is currently a parisc-linux specific function, the | 104 | * Since this is currently a parisc-linux specific function, the |
105 | * #ifdef __hppa__ should completely optimize this function away for | 105 | * #ifdef __hppa__ should completely optimize this function away for |
106 | * non-parisc hardware. | 106 | * non-parisc hardware. |
@@ -140,7 +140,7 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp) | |||
140 | tp->flags |= HAS_PHY_IRQ; | 140 | tp->flags |= HAS_PHY_IRQ; |
141 | tp->csr12_shadow = -1; | 141 | tp->csr12_shadow = -1; |
142 | } | 142 | } |
143 | #endif | 143 | #endif |
144 | } | 144 | } |
145 | 145 | ||
146 | void __devinit tulip_parse_eeprom(struct net_device *dev) | 146 | void __devinit tulip_parse_eeprom(struct net_device *dev) |
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index bb3558164a5b..da4f7593c50f 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c | |||
@@ -139,22 +139,22 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
139 | } | 139 | } |
140 | /* Acknowledge current RX interrupt sources. */ | 140 | /* Acknowledge current RX interrupt sources. */ |
141 | iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); | 141 | iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); |
142 | 142 | ||
143 | 143 | ||
144 | /* If we own the next entry, it is a new packet. Send it up. */ | 144 | /* If we own the next entry, it is a new packet. Send it up. */ |
145 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { | 145 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { |
146 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); | 146 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); |
147 | 147 | ||
148 | 148 | ||
149 | if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) | 149 | if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) |
150 | break; | 150 | break; |
151 | 151 | ||
152 | if (tulip_debug > 5) | 152 | if (tulip_debug > 5) |
153 | printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", | 153 | printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", |
154 | dev->name, entry, status); | 154 | dev->name, entry, status); |
155 | if (--rx_work_limit < 0) | 155 | if (--rx_work_limit < 0) |
156 | goto not_done; | 156 | goto not_done; |
157 | 157 | ||
158 | if ((status & 0x38008300) != 0x0300) { | 158 | if ((status & 0x38008300) != 0x0300) { |
159 | if ((status & 0x38000300) != 0x0300) { | 159 | if ((status & 0x38000300) != 0x0300) { |
160 | /* Ingore earlier buffers. */ | 160 | /* Ingore earlier buffers. */ |
@@ -180,7 +180,7 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
180 | /* Omit the four octet CRC from the length. */ | 180 | /* Omit the four octet CRC from the length. */ |
181 | short pkt_len = ((status >> 16) & 0x7ff) - 4; | 181 | short pkt_len = ((status >> 16) & 0x7ff) - 4; |
182 | struct sk_buff *skb; | 182 | struct sk_buff *skb; |
183 | 183 | ||
184 | #ifndef final_version | 184 | #ifndef final_version |
185 | if (pkt_len > 1518) { | 185 | if (pkt_len > 1518) { |
186 | printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", | 186 | printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", |
@@ -213,7 +213,7 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
213 | } else { /* Pass up the skb already on the Rx ring. */ | 213 | } else { /* Pass up the skb already on the Rx ring. */ |
214 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, | 214 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, |
215 | pkt_len); | 215 | pkt_len); |
216 | 216 | ||
217 | #ifndef final_version | 217 | #ifndef final_version |
218 | if (tp->rx_buffers[entry].mapping != | 218 | if (tp->rx_buffers[entry].mapping != |
219 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { | 219 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { |
@@ -225,17 +225,17 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
225 | skb->head, temp); | 225 | skb->head, temp); |
226 | } | 226 | } |
227 | #endif | 227 | #endif |
228 | 228 | ||
229 | pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, | 229 | pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, |
230 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 230 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); |
231 | 231 | ||
232 | tp->rx_buffers[entry].skb = NULL; | 232 | tp->rx_buffers[entry].skb = NULL; |
233 | tp->rx_buffers[entry].mapping = 0; | 233 | tp->rx_buffers[entry].mapping = 0; |
234 | } | 234 | } |
235 | skb->protocol = eth_type_trans(skb, dev); | 235 | skb->protocol = eth_type_trans(skb, dev); |
236 | 236 | ||
237 | netif_receive_skb(skb); | 237 | netif_receive_skb(skb); |
238 | 238 | ||
239 | dev->last_rx = jiffies; | 239 | dev->last_rx = jiffies; |
240 | tp->stats.rx_packets++; | 240 | tp->stats.rx_packets++; |
241 | tp->stats.rx_bytes += pkt_len; | 241 | tp->stats.rx_bytes += pkt_len; |
@@ -245,12 +245,12 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
245 | entry = (++tp->cur_rx) % RX_RING_SIZE; | 245 | entry = (++tp->cur_rx) % RX_RING_SIZE; |
246 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) | 246 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) |
247 | tulip_refill_rx(dev); | 247 | tulip_refill_rx(dev); |
248 | 248 | ||
249 | } | 249 | } |
250 | 250 | ||
251 | /* New ack strategy... irq does not ack Rx any longer | 251 | /* New ack strategy... irq does not ack Rx any longer |
252 | hopefully this helps */ | 252 | hopefully this helps */ |
253 | 253 | ||
254 | /* Really bad things can happen here... If new packet arrives | 254 | /* Really bad things can happen here... If new packet arrives |
255 | * and an irq arrives (tx or just due to occasionally unset | 255 | * and an irq arrives (tx or just due to occasionally unset |
256 | * mask), it will be acked by irq handler, but new thread | 256 | * mask), it will be acked by irq handler, but new thread |
@@ -259,28 +259,28 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
259 | * tomorrow (night 011029). If it will not fail, we won | 259 | * tomorrow (night 011029). If it will not fail, we won |
260 | * finally: amount of IO did not increase at all. */ | 260 | * finally: amount of IO did not increase at all. */ |
261 | } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); | 261 | } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); |
262 | 262 | ||
263 | done: | 263 | done: |
264 | 264 | ||
265 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | 265 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
266 | 266 | ||
267 | /* We use this simplistic scheme for IM. It's proven by | 267 | /* We use this simplistic scheme for IM. It's proven by |
268 | real life installations. We can have IM enabled | 268 | real life installations. We can have IM enabled |
269 | continuesly but this would cause unnecessary latency. | 269 | continuesly but this would cause unnecessary latency. |
270 | Unfortunely we can't use all the NET_RX_* feedback here. | 270 | Unfortunely we can't use all the NET_RX_* feedback here. |
271 | This would turn on IM for devices that is not contributing | 271 | This would turn on IM for devices that is not contributing |
272 | to backlog congestion with unnecessary latency. | 272 | to backlog congestion with unnecessary latency. |
273 | 273 | ||
274 | We monitor the the device RX-ring and have: | 274 | We monitor the the device RX-ring and have: |
275 | 275 | ||
276 | HW Interrupt Mitigation either ON or OFF. | 276 | HW Interrupt Mitigation either ON or OFF. |
277 | 277 | ||
278 | ON: More then 1 pkt received (per intr.) OR we are dropping | 278 | ON: More then 1 pkt received (per intr.) OR we are dropping |
279 | OFF: Only 1 pkt received | 279 | OFF: Only 1 pkt received |
280 | 280 | ||
281 | Note. We only use min and max (0, 15) settings from mit_table */ | 281 | Note. We only use min and max (0, 15) settings from mit_table */ |
282 | 282 | ||
283 | 283 | ||
284 | if( tp->flags & HAS_INTR_MITIGATION) { | 284 | if( tp->flags & HAS_INTR_MITIGATION) { |
285 | if( received > 1 ) { | 285 | if( received > 1 ) { |
286 | if( ! tp->mit_on ) { | 286 | if( ! tp->mit_on ) { |
@@ -297,20 +297,20 @@ done: | |||
297 | } | 297 | } |
298 | 298 | ||
299 | #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ | 299 | #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ |
300 | 300 | ||
301 | dev->quota -= received; | 301 | dev->quota -= received; |
302 | *budget -= received; | 302 | *budget -= received; |
303 | 303 | ||
304 | tulip_refill_rx(dev); | 304 | tulip_refill_rx(dev); |
305 | 305 | ||
306 | /* If RX ring is not full we are out of memory. */ | 306 | /* If RX ring is not full we are out of memory. */ |
307 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; | 307 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; |
308 | 308 | ||
309 | /* Remove us from polling list and enable RX intr. */ | 309 | /* Remove us from polling list and enable RX intr. */ |
310 | 310 | ||
311 | netif_rx_complete(dev); | 311 | netif_rx_complete(dev); |
312 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); | 312 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); |
313 | 313 | ||
314 | /* The last op happens after poll completion. Which means the following: | 314 | /* The last op happens after poll completion. Which means the following: |
315 | * 1. it can race with disabling irqs in irq handler | 315 | * 1. it can race with disabling irqs in irq handler |
316 | * 2. it can race with dise/enabling irqs in other poll threads | 316 | * 2. it can race with dise/enabling irqs in other poll threads |
@@ -321,9 +321,9 @@ done: | |||
321 | * due to races in masking and due to too late acking of already | 321 | * due to races in masking and due to too late acking of already |
322 | * processed irqs. But it must not result in losing events. | 322 | * processed irqs. But it must not result in losing events. |
323 | */ | 323 | */ |
324 | 324 | ||
325 | return 0; | 325 | return 0; |
326 | 326 | ||
327 | not_done: | 327 | not_done: |
328 | if (!received) { | 328 | if (!received) { |
329 | 329 | ||
@@ -331,29 +331,29 @@ done: | |||
331 | } | 331 | } |
332 | dev->quota -= received; | 332 | dev->quota -= received; |
333 | *budget -= received; | 333 | *budget -= received; |
334 | 334 | ||
335 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || | 335 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || |
336 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | 336 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
337 | tulip_refill_rx(dev); | 337 | tulip_refill_rx(dev); |
338 | 338 | ||
339 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; | 339 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; |
340 | 340 | ||
341 | return 1; | 341 | return 1; |
342 | 342 | ||
343 | 343 | ||
344 | oom: /* Executed with RX ints disabled */ | 344 | oom: /* Executed with RX ints disabled */ |
345 | 345 | ||
346 | 346 | ||
347 | /* Start timer, stop polling, but do not enable rx interrupts. */ | 347 | /* Start timer, stop polling, but do not enable rx interrupts. */ |
348 | mod_timer(&tp->oom_timer, jiffies+1); | 348 | mod_timer(&tp->oom_timer, jiffies+1); |
349 | 349 | ||
350 | /* Think: timer_pending() was an explicit signature of bug. | 350 | /* Think: timer_pending() was an explicit signature of bug. |
351 | * Timer can be pending now but fired and completed | 351 | * Timer can be pending now but fired and completed |
352 | * before we did netif_rx_complete(). See? We would lose it. */ | 352 | * before we did netif_rx_complete(). See? We would lose it. */ |
353 | 353 | ||
354 | /* remove ourselves from the polling list */ | 354 | /* remove ourselves from the polling list */ |
355 | netif_rx_complete(dev); | 355 | netif_rx_complete(dev); |
356 | 356 | ||
357 | return 0; | 357 | return 0; |
358 | } | 358 | } |
359 | 359 | ||
@@ -521,9 +521,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | |||
521 | /* Let's see whether the interrupt really is for us */ | 521 | /* Let's see whether the interrupt really is for us */ |
522 | csr5 = ioread32(ioaddr + CSR5); | 522 | csr5 = ioread32(ioaddr + CSR5); |
523 | 523 | ||
524 | if (tp->flags & HAS_PHY_IRQ) | 524 | if (tp->flags & HAS_PHY_IRQ) |
525 | handled = phy_interrupt (dev); | 525 | handled = phy_interrupt (dev); |
526 | 526 | ||
527 | if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) | 527 | if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) |
528 | return IRQ_RETVAL(handled); | 528 | return IRQ_RETVAL(handled); |
529 | 529 | ||
@@ -538,17 +538,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | |||
538 | /* Mask RX intrs and add the device to poll list. */ | 538 | /* Mask RX intrs and add the device to poll list. */ |
539 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); | 539 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); |
540 | netif_rx_schedule(dev); | 540 | netif_rx_schedule(dev); |
541 | 541 | ||
542 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) | 542 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) |
543 | break; | 543 | break; |
544 | } | 544 | } |
545 | 545 | ||
546 | /* Acknowledge the interrupt sources we handle here ASAP | 546 | /* Acknowledge the interrupt sources we handle here ASAP |
547 | the poll function does Rx and RxNoBuf acking */ | 547 | the poll function does Rx and RxNoBuf acking */ |
548 | 548 | ||
549 | iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); | 549 | iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); |
550 | 550 | ||
551 | #else | 551 | #else |
552 | /* Acknowledge all of the current interrupt sources ASAP. */ | 552 | /* Acknowledge all of the current interrupt sources ASAP. */ |
553 | iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); | 553 | iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); |
554 | 554 | ||
@@ -559,11 +559,11 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | |||
559 | } | 559 | } |
560 | 560 | ||
561 | #endif /* CONFIG_TULIP_NAPI */ | 561 | #endif /* CONFIG_TULIP_NAPI */ |
562 | 562 | ||
563 | if (tulip_debug > 4) | 563 | if (tulip_debug > 4) |
564 | printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", | 564 | printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", |
565 | dev->name, csr5, ioread32(ioaddr + CSR5)); | 565 | dev->name, csr5, ioread32(ioaddr + CSR5)); |
566 | 566 | ||
567 | 567 | ||
568 | if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { | 568 | if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { |
569 | unsigned int dirty_tx; | 569 | unsigned int dirty_tx; |
@@ -737,17 +737,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | |||
737 | #ifdef CONFIG_TULIP_NAPI | 737 | #ifdef CONFIG_TULIP_NAPI |
738 | if (rxd) | 738 | if (rxd) |
739 | csr5 &= ~RxPollInt; | 739 | csr5 &= ~RxPollInt; |
740 | } while ((csr5 & (TxNoBuf | | 740 | } while ((csr5 & (TxNoBuf | |
741 | TxDied | | 741 | TxDied | |
742 | TxIntr | | 742 | TxIntr | |
743 | TimerInt | | 743 | TimerInt | |
744 | /* Abnormal intr. */ | 744 | /* Abnormal intr. */ |
745 | RxDied | | 745 | RxDied | |
746 | TxFIFOUnderflow | | 746 | TxFIFOUnderflow | |
747 | TxJabber | | 747 | TxJabber | |
748 | TPLnkFail | | 748 | TPLnkFail | |
749 | SytemError )) != 0); | 749 | SytemError )) != 0); |
750 | #else | 750 | #else |
751 | } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0); | 751 | } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0); |
752 | 752 | ||
753 | tulip_refill_rx(dev); | 753 | tulip_refill_rx(dev); |
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c index f53396fe79c9..e9bc2a958c14 100644 --- a/drivers/net/tulip/media.c +++ b/drivers/net/tulip/media.c | |||
@@ -140,7 +140,7 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val) | |||
140 | spin_unlock_irqrestore(&tp->mii_lock, flags); | 140 | spin_unlock_irqrestore(&tp->mii_lock, flags); |
141 | return; | 141 | return; |
142 | } | 142 | } |
143 | 143 | ||
144 | /* Establish sync by sending 32 logic ones. */ | 144 | /* Establish sync by sending 32 logic ones. */ |
145 | for (i = 32; i >= 0; i--) { | 145 | for (i = 32; i >= 0; i--) { |
146 | iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); | 146 | iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); |
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index 05d2d96f7be2..d25020da6798 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h | |||
@@ -259,7 +259,7 @@ enum t21143_csr6_bits { | |||
259 | There are no ill effects from too-large receive rings. */ | 259 | There are no ill effects from too-large receive rings. */ |
260 | 260 | ||
261 | #define TX_RING_SIZE 32 | 261 | #define TX_RING_SIZE 32 |
262 | #define RX_RING_SIZE 128 | 262 | #define RX_RING_SIZE 128 |
263 | #define MEDIA_MASK 31 | 263 | #define MEDIA_MASK 31 |
264 | 264 | ||
265 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ | 265 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ |
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index c67c91251d04..b3cf11d32e24 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -1224,7 +1224,7 @@ out: | |||
1224 | * Chips that have the MRM/reserved bit quirk and the burst quirk. That | 1224 | * Chips that have the MRM/reserved bit quirk and the burst quirk. That |
1225 | * is the DM910X and the on chip ULi devices | 1225 | * is the DM910X and the on chip ULi devices |
1226 | */ | 1226 | */ |
1227 | 1227 | ||
1228 | static int tulip_uli_dm_quirk(struct pci_dev *pdev) | 1228 | static int tulip_uli_dm_quirk(struct pci_dev *pdev) |
1229 | { | 1229 | { |
1230 | if (pdev->vendor == 0x1282 && pdev->device == 0x9102) | 1230 | if (pdev->vendor == 0x1282 && pdev->device == 0x9102) |
@@ -1297,7 +1297,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1297 | */ | 1297 | */ |
1298 | 1298 | ||
1299 | /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache | 1299 | /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache |
1300 | aligned. Aries might need this too. The Saturn errata are not | 1300 | aligned. Aries might need this too. The Saturn errata are not |
1301 | pretty reading but thankfully it's an old 486 chipset. | 1301 | pretty reading but thankfully it's an old 486 chipset. |
1302 | 1302 | ||
1303 | 2. The dreaded SiS496 486 chipset. Same workaround as Intel | 1303 | 2. The dreaded SiS496 486 chipset. Same workaround as Intel |
@@ -1500,7 +1500,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1500 | } | 1500 | } |
1501 | #endif | 1501 | #endif |
1502 | #ifdef CONFIG_MIPS_COBALT | 1502 | #ifdef CONFIG_MIPS_COBALT |
1503 | if ((pdev->bus->number == 0) && | 1503 | if ((pdev->bus->number == 0) && |
1504 | ((PCI_SLOT(pdev->devfn) == 7) || | 1504 | ((PCI_SLOT(pdev->devfn) == 7) || |
1505 | (PCI_SLOT(pdev->devfn) == 12))) { | 1505 | (PCI_SLOT(pdev->devfn) == 12))) { |
1506 | /* Cobalt MAC address in first EEPROM locations. */ | 1506 | /* Cobalt MAC address in first EEPROM locations. */ |
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 238e9c72cb3a..8b3a28f53c3d 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c | |||
@@ -9,7 +9,7 @@ | |||
9 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 9 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | GNU General Public License for more details. | 10 | GNU General Public License for more details. |
11 | 11 | ||
12 | 12 | ||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define DRV_NAME "uli526x" | 15 | #define DRV_NAME "uli526x" |
@@ -185,7 +185,7 @@ struct uli526x_board_info { | |||
185 | 185 | ||
186 | /* NIC SROM data */ | 186 | /* NIC SROM data */ |
187 | unsigned char srom[128]; | 187 | unsigned char srom[128]; |
188 | u8 init; | 188 | u8 init; |
189 | }; | 189 | }; |
190 | 190 | ||
191 | enum uli526x_offsets { | 191 | enum uli526x_offsets { |
@@ -258,7 +258,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, | |||
258 | struct uli526x_board_info *db; /* board information structure */ | 258 | struct uli526x_board_info *db; /* board information structure */ |
259 | struct net_device *dev; | 259 | struct net_device *dev; |
260 | int i, err; | 260 | int i, err; |
261 | 261 | ||
262 | ULI526X_DBUG(0, "uli526x_init_one()", 0); | 262 | ULI526X_DBUG(0, "uli526x_init_one()", 0); |
263 | 263 | ||
264 | if (!printed_version++) | 264 | if (!printed_version++) |
@@ -316,7 +316,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, | |||
316 | err = -ENOMEM; | 316 | err = -ENOMEM; |
317 | goto err_out_nomem; | 317 | goto err_out_nomem; |
318 | } | 318 | } |
319 | 319 | ||
320 | db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; | 320 | db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; |
321 | db->first_tx_desc_dma = db->desc_pool_dma_ptr; | 321 | db->first_tx_desc_dma = db->desc_pool_dma_ptr; |
322 | db->buf_pool_start = db->buf_pool_ptr; | 322 | db->buf_pool_start = db->buf_pool_ptr; |
@@ -324,14 +324,14 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, | |||
324 | 324 | ||
325 | db->chip_id = ent->driver_data; | 325 | db->chip_id = ent->driver_data; |
326 | db->ioaddr = pci_resource_start(pdev, 0); | 326 | db->ioaddr = pci_resource_start(pdev, 0); |
327 | 327 | ||
328 | db->pdev = pdev; | 328 | db->pdev = pdev; |
329 | db->init = 1; | 329 | db->init = 1; |
330 | 330 | ||
331 | dev->base_addr = db->ioaddr; | 331 | dev->base_addr = db->ioaddr; |
332 | dev->irq = pdev->irq; | 332 | dev->irq = pdev->irq; |
333 | pci_set_drvdata(pdev, dev); | 333 | pci_set_drvdata(pdev, dev); |
334 | 334 | ||
335 | /* Register some necessary functions */ | 335 | /* Register some necessary functions */ |
336 | dev->open = &uli526x_open; | 336 | dev->open = &uli526x_open; |
337 | dev->hard_start_xmit = &uli526x_start_xmit; | 337 | dev->hard_start_xmit = &uli526x_start_xmit; |
@@ -341,7 +341,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, | |||
341 | dev->ethtool_ops = &netdev_ethtool_ops; | 341 | dev->ethtool_ops = &netdev_ethtool_ops; |
342 | spin_lock_init(&db->lock); | 342 | spin_lock_init(&db->lock); |
343 | 343 | ||
344 | 344 | ||
345 | /* read 64 word srom data */ | 345 | /* read 64 word srom data */ |
346 | for (i = 0; i < 64; i++) | 346 | for (i = 0; i < 64; i++) |
347 | ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); | 347 | ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); |
@@ -374,7 +374,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, | |||
374 | goto err_out_res; | 374 | goto err_out_res; |
375 | 375 | ||
376 | printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev)); | 376 | printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev)); |
377 | 377 | ||
378 | for (i = 0; i < 6; i++) | 378 | for (i = 0; i < 6; i++) |
379 | printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); | 379 | printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); |
380 | printk(", irq %d.\n", dev->irq); | 380 | printk(", irq %d.\n", dev->irq); |
@@ -389,7 +389,7 @@ err_out_nomem: | |||
389 | if(db->desc_pool_ptr) | 389 | if(db->desc_pool_ptr) |
390 | pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, | 390 | pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, |
391 | db->desc_pool_ptr, db->desc_pool_dma_ptr); | 391 | db->desc_pool_ptr, db->desc_pool_dma_ptr); |
392 | 392 | ||
393 | if(db->buf_pool_ptr != NULL) | 393 | if(db->buf_pool_ptr != NULL) |
394 | pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, | 394 | pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, |
395 | db->buf_pool_ptr, db->buf_pool_dma_ptr); | 395 | db->buf_pool_ptr, db->buf_pool_dma_ptr); |
@@ -433,7 +433,7 @@ static int uli526x_open(struct net_device *dev) | |||
433 | { | 433 | { |
434 | int ret; | 434 | int ret; |
435 | struct uli526x_board_info *db = netdev_priv(dev); | 435 | struct uli526x_board_info *db = netdev_priv(dev); |
436 | 436 | ||
437 | ULI526X_DBUG(0, "uli526x_open", 0); | 437 | ULI526X_DBUG(0, "uli526x_open", 0); |
438 | 438 | ||
439 | ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev); | 439 | ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev); |
@@ -454,7 +454,7 @@ static int uli526x_open(struct net_device *dev) | |||
454 | /* CR6 operation mode decision */ | 454 | /* CR6 operation mode decision */ |
455 | db->cr6_data |= ULI526X_TXTH_256; | 455 | db->cr6_data |= ULI526X_TXTH_256; |
456 | db->cr0_data = CR0_DEFAULT; | 456 | db->cr0_data = CR0_DEFAULT; |
457 | 457 | ||
458 | /* Initialize ULI526X board */ | 458 | /* Initialize ULI526X board */ |
459 | uli526x_init(dev); | 459 | uli526x_init(dev); |
460 | 460 | ||
@@ -604,7 +604,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
604 | /* Restore CR7 to enable interrupt */ | 604 | /* Restore CR7 to enable interrupt */ |
605 | spin_unlock_irqrestore(&db->lock, flags); | 605 | spin_unlock_irqrestore(&db->lock, flags); |
606 | outl(db->cr7_data, dev->base_addr + DCR7); | 606 | outl(db->cr7_data, dev->base_addr + DCR7); |
607 | 607 | ||
608 | /* free this SKB */ | 608 | /* free this SKB */ |
609 | dev_kfree_skb(skb); | 609 | dev_kfree_skb(skb); |
610 | 610 | ||
@@ -782,7 +782,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info | |||
782 | struct sk_buff *skb; | 782 | struct sk_buff *skb; |
783 | int rxlen; | 783 | int rxlen; |
784 | u32 rdes0; | 784 | u32 rdes0; |
785 | 785 | ||
786 | rxptr = db->rx_ready_ptr; | 786 | rxptr = db->rx_ready_ptr; |
787 | 787 | ||
788 | while(db->rx_avail_cnt) { | 788 | while(db->rx_avail_cnt) { |
@@ -821,7 +821,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info | |||
821 | if ( !(rdes0 & 0x8000) || | 821 | if ( !(rdes0 & 0x8000) || |
822 | ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { | 822 | ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { |
823 | skb = rxptr->rx_skb_ptr; | 823 | skb = rxptr->rx_skb_ptr; |
824 | 824 | ||
825 | /* Good packet, send to upper layer */ | 825 | /* Good packet, send to upper layer */ |
826 | /* Shorst packet used new SKB */ | 826 | /* Shorst packet used new SKB */ |
827 | if ( (rxlen < RX_COPY_SIZE) && | 827 | if ( (rxlen < RX_COPY_SIZE) && |
@@ -841,7 +841,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info | |||
841 | dev->last_rx = jiffies; | 841 | dev->last_rx = jiffies; |
842 | db->stats.rx_packets++; | 842 | db->stats.rx_packets++; |
843 | db->stats.rx_bytes += rxlen; | 843 | db->stats.rx_bytes += rxlen; |
844 | 844 | ||
845 | } else { | 845 | } else { |
846 | /* Reuse SKB buffer when the packet is error */ | 846 | /* Reuse SKB buffer when the packet is error */ |
847 | ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0); | 847 | ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0); |
@@ -911,7 +911,7 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd) | |||
911 | SUPPORTED_100baseT_Full | | 911 | SUPPORTED_100baseT_Full | |
912 | SUPPORTED_Autoneg | | 912 | SUPPORTED_Autoneg | |
913 | SUPPORTED_MII); | 913 | SUPPORTED_MII); |
914 | 914 | ||
915 | ecmd->advertising = (ADVERTISED_10baseT_Half | | 915 | ecmd->advertising = (ADVERTISED_10baseT_Half | |
916 | ADVERTISED_10baseT_Full | | 916 | ADVERTISED_10baseT_Full | |
917 | ADVERTISED_100baseT_Half | | 917 | ADVERTISED_100baseT_Half | |
@@ -924,13 +924,13 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd) | |||
924 | ecmd->phy_address = db->phy_addr; | 924 | ecmd->phy_address = db->phy_addr; |
925 | 925 | ||
926 | ecmd->transceiver = XCVR_EXTERNAL; | 926 | ecmd->transceiver = XCVR_EXTERNAL; |
927 | 927 | ||
928 | ecmd->speed = 10; | 928 | ecmd->speed = 10; |
929 | ecmd->duplex = DUPLEX_HALF; | 929 | ecmd->duplex = DUPLEX_HALF; |
930 | 930 | ||
931 | if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) | 931 | if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) |
932 | { | 932 | { |
933 | ecmd->speed = 100; | 933 | ecmd->speed = 100; |
934 | } | 934 | } |
935 | if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) | 935 | if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) |
936 | { | 936 | { |
@@ -939,11 +939,11 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd) | |||
939 | if(db->link_failed) | 939 | if(db->link_failed) |
940 | { | 940 | { |
941 | ecmd->speed = -1; | 941 | ecmd->speed = -1; |
942 | ecmd->duplex = -1; | 942 | ecmd->duplex = -1; |
943 | } | 943 | } |
944 | 944 | ||
945 | if (db->media_mode & ULI526X_AUTO) | 945 | if (db->media_mode & ULI526X_AUTO) |
946 | { | 946 | { |
947 | ecmd->autoneg = AUTONEG_ENABLE; | 947 | ecmd->autoneg = AUTONEG_ENABLE; |
948 | } | 948 | } |
949 | } | 949 | } |
@@ -964,15 +964,15 @@ static void netdev_get_drvinfo(struct net_device *dev, | |||
964 | 964 | ||
965 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { | 965 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { |
966 | struct uli526x_board_info *np = netdev_priv(dev); | 966 | struct uli526x_board_info *np = netdev_priv(dev); |
967 | 967 | ||
968 | ULi_ethtool_gset(np, cmd); | 968 | ULi_ethtool_gset(np, cmd); |
969 | 969 | ||
970 | return 0; | 970 | return 0; |
971 | } | 971 | } |
972 | 972 | ||
973 | static u32 netdev_get_link(struct net_device *dev) { | 973 | static u32 netdev_get_link(struct net_device *dev) { |
974 | struct uli526x_board_info *np = netdev_priv(dev); | 974 | struct uli526x_board_info *np = netdev_priv(dev); |
975 | 975 | ||
976 | if(np->link_failed) | 976 | if(np->link_failed) |
977 | return 0; | 977 | return 0; |
978 | else | 978 | else |
@@ -1005,11 +1005,11 @@ static void uli526x_timer(unsigned long data) | |||
1005 | struct uli526x_board_info *db = netdev_priv(dev); | 1005 | struct uli526x_board_info *db = netdev_priv(dev); |
1006 | unsigned long flags; | 1006 | unsigned long flags; |
1007 | u8 TmpSpeed=10; | 1007 | u8 TmpSpeed=10; |
1008 | 1008 | ||
1009 | //ULI526X_DBUG(0, "uli526x_timer()", 0); | 1009 | //ULI526X_DBUG(0, "uli526x_timer()", 0); |
1010 | spin_lock_irqsave(&db->lock, flags); | 1010 | spin_lock_irqsave(&db->lock, flags); |
1011 | 1011 | ||
1012 | 1012 | ||
1013 | /* Dynamic reset ULI526X : system error or transmit time-out */ | 1013 | /* Dynamic reset ULI526X : system error or transmit time-out */ |
1014 | tmp_cr8 = inl(db->ioaddr + DCR8); | 1014 | tmp_cr8 = inl(db->ioaddr + DCR8); |
1015 | if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { | 1015 | if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { |
@@ -1021,9 +1021,9 @@ static void uli526x_timer(unsigned long data) | |||
1021 | /* TX polling kick monitor */ | 1021 | /* TX polling kick monitor */ |
1022 | if ( db->tx_packet_cnt && | 1022 | if ( db->tx_packet_cnt && |
1023 | time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) { | 1023 | time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) { |
1024 | outl(0x1, dev->base_addr + DCR1); // Tx polling again | 1024 | outl(0x1, dev->base_addr + DCR1); // Tx polling again |
1025 | 1025 | ||
1026 | // TX Timeout | 1026 | // TX Timeout |
1027 | if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) { | 1027 | if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) { |
1028 | db->reset_TXtimeout++; | 1028 | db->reset_TXtimeout++; |
1029 | db->wait_reset = 1; | 1029 | db->wait_reset = 1; |
@@ -1073,7 +1073,7 @@ static void uli526x_timer(unsigned long data) | |||
1073 | uli526x_sense_speed(db) ) | 1073 | uli526x_sense_speed(db) ) |
1074 | db->link_failed = 1; | 1074 | db->link_failed = 1; |
1075 | uli526x_process_mode(db); | 1075 | uli526x_process_mode(db); |
1076 | 1076 | ||
1077 | if(db->link_failed==0) | 1077 | if(db->link_failed==0) |
1078 | { | 1078 | { |
1079 | if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) | 1079 | if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) |
@@ -1404,7 +1404,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db) | |||
1404 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); | 1404 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); |
1405 | 1405 | ||
1406 | if ( (phy_mode & 0x24) == 0x24 ) { | 1406 | if ( (phy_mode & 0x24) == 0x24 ) { |
1407 | 1407 | ||
1408 | phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7); | 1408 | phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7); |
1409 | if(phy_mode&0x8000) | 1409 | if(phy_mode&0x8000) |
1410 | phy_mode = 0x8000; | 1410 | phy_mode = 0x8000; |
@@ -1414,7 +1414,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db) | |||
1414 | phy_mode = 0x2000; | 1414 | phy_mode = 0x2000; |
1415 | else | 1415 | else |
1416 | phy_mode = 0x1000; | 1416 | phy_mode = 0x1000; |
1417 | 1417 | ||
1418 | /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ | 1418 | /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ |
1419 | switch (phy_mode) { | 1419 | switch (phy_mode) { |
1420 | case 0x1000: db->op_mode = ULI526X_10MHF; break; | 1420 | case 0x1000: db->op_mode = ULI526X_10MHF; break; |
@@ -1442,7 +1442,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db) | |||
1442 | static void uli526x_set_phyxcer(struct uli526x_board_info *db) | 1442 | static void uli526x_set_phyxcer(struct uli526x_board_info *db) |
1443 | { | 1443 | { |
1444 | u16 phy_reg; | 1444 | u16 phy_reg; |
1445 | 1445 | ||
1446 | /* Phyxcer capability setting */ | 1446 | /* Phyxcer capability setting */ |
1447 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; | 1447 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; |
1448 | 1448 | ||
@@ -1457,7 +1457,7 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db) | |||
1457 | case ULI526X_100MHF: phy_reg |= 0x80; break; | 1457 | case ULI526X_100MHF: phy_reg |= 0x80; break; |
1458 | case ULI526X_100MFD: phy_reg |= 0x100; break; | 1458 | case ULI526X_100MFD: phy_reg |= 0x100; break; |
1459 | } | 1459 | } |
1460 | 1460 | ||
1461 | } | 1461 | } |
1462 | 1462 | ||
1463 | /* Write new capability to Phyxcer Reg4 */ | 1463 | /* Write new capability to Phyxcer Reg4 */ |
@@ -1556,7 +1556,7 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data | |||
1556 | /* Write a word data to PHY controller */ | 1556 | /* Write a word data to PHY controller */ |
1557 | for ( i = 0x8000; i > 0; i >>= 1) | 1557 | for ( i = 0x8000; i > 0; i >>= 1) |
1558 | phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); | 1558 | phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); |
1559 | 1559 | ||
1560 | } | 1560 | } |
1561 | 1561 | ||
1562 | 1562 | ||
@@ -1574,7 +1574,7 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) | |||
1574 | return phy_readby_cr10(iobase, phy_addr, offset); | 1574 | return phy_readby_cr10(iobase, phy_addr, offset); |
1575 | /* M5261/M5263 Chip */ | 1575 | /* M5261/M5263 Chip */ |
1576 | ioaddr = iobase + DCR9; | 1576 | ioaddr = iobase + DCR9; |
1577 | 1577 | ||
1578 | /* Send 33 synchronization clock to Phy controller */ | 1578 | /* Send 33 synchronization clock to Phy controller */ |
1579 | for (i = 0; i < 35; i++) | 1579 | for (i = 0; i < 35; i++) |
1580 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | 1580 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); |
@@ -1610,7 +1610,7 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) | |||
1610 | static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset) | 1610 | static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset) |
1611 | { | 1611 | { |
1612 | unsigned long ioaddr,cr10_value; | 1612 | unsigned long ioaddr,cr10_value; |
1613 | 1613 | ||
1614 | ioaddr = iobase + DCR10; | 1614 | ioaddr = iobase + DCR10; |
1615 | cr10_value = phy_addr; | 1615 | cr10_value = phy_addr; |
1616 | cr10_value = (cr10_value<<5) + offset; | 1616 | cr10_value = (cr10_value<<5) + offset; |
@@ -1629,7 +1629,7 @@ static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset) | |||
1629 | static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) | 1629 | static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) |
1630 | { | 1630 | { |
1631 | unsigned long ioaddr,cr10_value; | 1631 | unsigned long ioaddr,cr10_value; |
1632 | 1632 | ||
1633 | ioaddr = iobase + DCR10; | 1633 | ioaddr = iobase + DCR10; |
1634 | cr10_value = phy_addr; | 1634 | cr10_value = phy_addr; |
1635 | cr10_value = (cr10_value<<5) + offset; | 1635 | cr10_value = (cr10_value<<5) + offset; |
@@ -1659,7 +1659,7 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id) | |||
1659 | static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id) | 1659 | static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id) |
1660 | { | 1660 | { |
1661 | u16 phy_data; | 1661 | u16 phy_data; |
1662 | 1662 | ||
1663 | outl(0x50000 , ioaddr); | 1663 | outl(0x50000 , ioaddr); |
1664 | udelay(1); | 1664 | udelay(1); |
1665 | phy_data = ( inl(ioaddr) >> 19 ) & 0x1; | 1665 | phy_data = ( inl(ioaddr) >> 19 ) & 0x1; |
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 136a70c4d5e4..64ecf929d2ac 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c | |||
@@ -38,12 +38,12 @@ | |||
38 | Copyright (C) 2001 Manfred Spraul | 38 | Copyright (C) 2001 Manfred Spraul |
39 | * ethtool support (jgarzik) | 39 | * ethtool support (jgarzik) |
40 | * Replace some MII-related magic numbers with constants (jgarzik) | 40 | * Replace some MII-related magic numbers with constants (jgarzik) |
41 | 41 | ||
42 | TODO: | 42 | TODO: |
43 | * enable pci_power_off | 43 | * enable pci_power_off |
44 | * Wake-On-LAN | 44 | * Wake-On-LAN |
45 | */ | 45 | */ |
46 | 46 | ||
47 | #define DRV_NAME "winbond-840" | 47 | #define DRV_NAME "winbond-840" |
48 | #define DRV_VERSION "1.01-d" | 48 | #define DRV_VERSION "1.01-d" |
49 | #define DRV_RELDATE "Nov-17-2001" | 49 | #define DRV_RELDATE "Nov-17-2001" |
@@ -57,7 +57,7 @@ c-help-name: Winbond W89c840 PCI Ethernet support | |||
57 | c-help-symbol: CONFIG_WINBOND_840 | 57 | c-help-symbol: CONFIG_WINBOND_840 |
58 | c-help: This driver is for the Winbond W89c840 chip. It also works with | 58 | c-help: This driver is for the Winbond W89c840 chip. It also works with |
59 | c-help: the TX9882 chip on the Compex RL100-ATX board. | 59 | c-help: the TX9882 chip on the Compex RL100-ATX board. |
60 | c-help: More specific information and updates are available from | 60 | c-help: More specific information and updates are available from |
61 | c-help: http://www.scyld.com/network/drivers.html | 61 | c-help: http://www.scyld.com/network/drivers.html |
62 | */ | 62 | */ |
63 | 63 | ||
@@ -207,7 +207,7 @@ Test with 'ping -s 10000' on a fast computer. | |||
207 | 207 | ||
208 | */ | 208 | */ |
209 | 209 | ||
210 | 210 | ||
211 | 211 | ||
212 | /* | 212 | /* |
213 | PCI probe table. | 213 | PCI probe table. |
@@ -374,7 +374,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | |||
374 | static struct ethtool_ops netdev_ethtool_ops; | 374 | static struct ethtool_ops netdev_ethtool_ops; |
375 | static int netdev_close(struct net_device *dev); | 375 | static int netdev_close(struct net_device *dev); |
376 | 376 | ||
377 | 377 | ||
378 | 378 | ||
379 | static int __devinit w840_probe1 (struct pci_dev *pdev, | 379 | static int __devinit w840_probe1 (struct pci_dev *pdev, |
380 | const struct pci_device_id *ent) | 380 | const struct pci_device_id *ent) |
@@ -434,7 +434,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev, | |||
434 | np->mii_if.mdio_read = mdio_read; | 434 | np->mii_if.mdio_read = mdio_read; |
435 | np->mii_if.mdio_write = mdio_write; | 435 | np->mii_if.mdio_write = mdio_write; |
436 | np->base_addr = ioaddr; | 436 | np->base_addr = ioaddr; |
437 | 437 | ||
438 | pci_set_drvdata(pdev, dev); | 438 | pci_set_drvdata(pdev, dev); |
439 | 439 | ||
440 | if (dev->mem_start) | 440 | if (dev->mem_start) |
@@ -510,7 +510,7 @@ err_out_netdev: | |||
510 | return -ENODEV; | 510 | return -ENODEV; |
511 | } | 511 | } |
512 | 512 | ||
513 | 513 | ||
514 | /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are | 514 | /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are |
515 | often serial bit streams generated by the host processor. | 515 | often serial bit streams generated by the host processor. |
516 | The example below is for the common 93c46 EEPROM, 64 16 bit words. */ | 516 | The example below is for the common 93c46 EEPROM, 64 16 bit words. */ |
@@ -660,7 +660,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val | |||
660 | return; | 660 | return; |
661 | } | 661 | } |
662 | 662 | ||
663 | 663 | ||
664 | static int netdev_open(struct net_device *dev) | 664 | static int netdev_open(struct net_device *dev) |
665 | { | 665 | { |
666 | struct netdev_private *np = netdev_priv(dev); | 666 | struct netdev_private *np = netdev_priv(dev); |
@@ -731,7 +731,7 @@ static int update_link(struct net_device *dev) | |||
731 | dev->name, np->phys[0]); | 731 | dev->name, np->phys[0]); |
732 | netif_carrier_on(dev); | 732 | netif_carrier_on(dev); |
733 | } | 733 | } |
734 | 734 | ||
735 | if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) { | 735 | if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) { |
736 | /* If the link partner doesn't support autonegotiation | 736 | /* If the link partner doesn't support autonegotiation |
737 | * the MII detects it's abilities with the "parallel detection". | 737 | * the MII detects it's abilities with the "parallel detection". |
@@ -761,7 +761,7 @@ static int update_link(struct net_device *dev) | |||
761 | result |= 0x20000000; | 761 | result |= 0x20000000; |
762 | if (result != np->csr6 && debug) | 762 | if (result != np->csr6 && debug) |
763 | printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n", | 763 | printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n", |
764 | dev->name, fasteth ? 100 : 10, | 764 | dev->name, fasteth ? 100 : 10, |
765 | duplex ? "full" : "half", np->phys[0]); | 765 | duplex ? "full" : "half", np->phys[0]); |
766 | return result; | 766 | return result; |
767 | } | 767 | } |
@@ -947,7 +947,7 @@ static void init_registers(struct net_device *dev) | |||
947 | iowrite32(i, ioaddr + PCIBusCfg); | 947 | iowrite32(i, ioaddr + PCIBusCfg); |
948 | 948 | ||
949 | np->csr6 = 0; | 949 | np->csr6 = 0; |
950 | /* 128 byte Tx threshold; | 950 | /* 128 byte Tx threshold; |
951 | Transmit on; Receive on; */ | 951 | Transmit on; Receive on; */ |
952 | update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev)); | 952 | update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev)); |
953 | 953 | ||
@@ -1584,7 +1584,7 @@ static int netdev_close(struct net_device *dev) | |||
1584 | static void __devexit w840_remove1 (struct pci_dev *pdev) | 1584 | static void __devexit w840_remove1 (struct pci_dev *pdev) |
1585 | { | 1585 | { |
1586 | struct net_device *dev = pci_get_drvdata(pdev); | 1586 | struct net_device *dev = pci_get_drvdata(pdev); |
1587 | 1587 | ||
1588 | if (dev) { | 1588 | if (dev) { |
1589 | struct netdev_private *np = netdev_priv(dev); | 1589 | struct netdev_private *np = netdev_priv(dev); |
1590 | unregister_netdev(dev); | 1590 | unregister_netdev(dev); |
@@ -1640,7 +1640,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state) | |||
1640 | 1640 | ||
1641 | spin_unlock_wait(&dev->xmit_lock); | 1641 | spin_unlock_wait(&dev->xmit_lock); |
1642 | synchronize_irq(dev->irq); | 1642 | synchronize_irq(dev->irq); |
1643 | 1643 | ||
1644 | np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; | 1644 | np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; |
1645 | 1645 | ||
1646 | /* no more hardware accesses behind this line. */ | 1646 | /* no more hardware accesses behind this line. */ |
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index 56344103ac23..63c2175ed138 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c | |||
@@ -1,11 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards | 2 | * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards |
3 | * | 3 | * |
4 | * This software is (C) by the respective authors, and licensed under the GPL | 4 | * This software is (C) by the respective authors, and licensed under the GPL |
5 | * License. | 5 | * License. |
6 | * | 6 | * |
7 | * Written by Arjan van de Ven for Red Hat, Inc. | 7 | * Written by Arjan van de Ven for Red Hat, Inc. |
8 | * Based on work by Jeff Garzik, Doug Ledford and Donald Becker | 8 | * Based on work by Jeff Garzik, Doug Ledford and Donald Becker |
9 | * | 9 | * |
10 | * This software may be used and distributed according to the terms | 10 | * This software may be used and distributed according to the terms |
11 | * of the GNU General Public License, incorporated herein by reference. | 11 | * of the GNU General Public License, incorporated herein by reference. |
@@ -93,7 +93,7 @@ struct xircom_private { | |||
93 | 93 | ||
94 | unsigned long io_port; | 94 | unsigned long io_port; |
95 | int open; | 95 | int open; |
96 | 96 | ||
97 | /* transmit_used is the rotating counter that indicates which transmit | 97 | /* transmit_used is the rotating counter that indicates which transmit |
98 | descriptor has to be used next */ | 98 | descriptor has to be used next */ |
99 | int transmit_used; | 99 | int transmit_used; |
@@ -153,10 +153,10 @@ static struct pci_device_id xircom_pci_table[] = { | |||
153 | MODULE_DEVICE_TABLE(pci, xircom_pci_table); | 153 | MODULE_DEVICE_TABLE(pci, xircom_pci_table); |
154 | 154 | ||
155 | static struct pci_driver xircom_ops = { | 155 | static struct pci_driver xircom_ops = { |
156 | .name = "xircom_cb", | 156 | .name = "xircom_cb", |
157 | .id_table = xircom_pci_table, | 157 | .id_table = xircom_pci_table, |
158 | .probe = xircom_probe, | 158 | .probe = xircom_probe, |
159 | .remove = xircom_remove, | 159 | .remove = xircom_remove, |
160 | .suspend =NULL, | 160 | .suspend =NULL, |
161 | .resume =NULL | 161 | .resume =NULL |
162 | }; | 162 | }; |
@@ -174,7 +174,7 @@ static void print_binary(unsigned int number) | |||
174 | buffer[i2++]='1'; | 174 | buffer[i2++]='1'; |
175 | else | 175 | else |
176 | buffer[i2++]='0'; | 176 | buffer[i2++]='0'; |
177 | if ((i&3)==0) | 177 | if ((i&3)==0) |
178 | buffer[i2++]=' '; | 178 | buffer[i2++]=' '; |
179 | } | 179 | } |
180 | printk("%s\n",buffer); | 180 | printk("%s\n",buffer); |
@@ -196,10 +196,10 @@ static struct ethtool_ops netdev_ethtool_ops = { | |||
196 | 196 | ||
197 | /* xircom_probe is the code that gets called on device insertion. | 197 | /* xircom_probe is the code that gets called on device insertion. |
198 | it sets up the hardware and registers the device to the networklayer. | 198 | it sets up the hardware and registers the device to the networklayer. |
199 | 199 | ||
200 | TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the | 200 | TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the |
201 | first two packets that get send, and pump hates that. | 201 | first two packets that get send, and pump hates that. |
202 | 202 | ||
203 | */ | 203 | */ |
204 | static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 204 | static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
205 | { | 205 | { |
@@ -209,7 +209,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ | |||
209 | unsigned long flags; | 209 | unsigned long flags; |
210 | unsigned short tmp16; | 210 | unsigned short tmp16; |
211 | enter("xircom_probe"); | 211 | enter("xircom_probe"); |
212 | 212 | ||
213 | /* First do the PCI initialisation */ | 213 | /* First do the PCI initialisation */ |
214 | 214 | ||
215 | if (pci_enable_device(pdev)) | 215 | if (pci_enable_device(pdev)) |
@@ -217,24 +217,24 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ | |||
217 | 217 | ||
218 | /* disable all powermanagement */ | 218 | /* disable all powermanagement */ |
219 | pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000); | 219 | pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000); |
220 | 220 | ||
221 | pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/ | 221 | pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/ |
222 | 222 | ||
223 | /* clear PCI status, if any */ | 223 | /* clear PCI status, if any */ |
224 | pci_read_config_word (pdev,PCI_STATUS, &tmp16); | 224 | pci_read_config_word (pdev,PCI_STATUS, &tmp16); |
225 | pci_write_config_word (pdev, PCI_STATUS,tmp16); | 225 | pci_write_config_word (pdev, PCI_STATUS,tmp16); |
226 | 226 | ||
227 | pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev); | 227 | pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev); |
228 | 228 | ||
229 | if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { | 229 | if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { |
230 | printk(KERN_ERR "xircom_probe: failed to allocate io-region\n"); | 230 | printk(KERN_ERR "xircom_probe: failed to allocate io-region\n"); |
231 | return -ENODEV; | 231 | return -ENODEV; |
232 | } | 232 | } |
233 | 233 | ||
234 | /* | 234 | /* |
235 | Before changing the hardware, allocate the memory. | 235 | Before changing the hardware, allocate the memory. |
236 | This way, we can fail gracefully if not enough memory | 236 | This way, we can fail gracefully if not enough memory |
237 | is available. | 237 | is available. |
238 | */ | 238 | */ |
239 | dev = alloc_etherdev(sizeof(struct xircom_private)); | 239 | dev = alloc_etherdev(sizeof(struct xircom_private)); |
240 | if (!dev) { | 240 | if (!dev) { |
@@ -242,13 +242,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ | |||
242 | goto device_fail; | 242 | goto device_fail; |
243 | } | 243 | } |
244 | private = netdev_priv(dev); | 244 | private = netdev_priv(dev); |
245 | 245 | ||
246 | /* Allocate the send/receive buffers */ | 246 | /* Allocate the send/receive buffers */ |
247 | private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); | 247 | private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); |
248 | if (private->rx_buffer == NULL) { | 248 | if (private->rx_buffer == NULL) { |
249 | printk(KERN_ERR "xircom_probe: no memory for rx buffer \n"); | 249 | printk(KERN_ERR "xircom_probe: no memory for rx buffer \n"); |
250 | goto rx_buf_fail; | 250 | goto rx_buf_fail; |
251 | } | 251 | } |
252 | private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); | 252 | private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); |
253 | if (private->tx_buffer == NULL) { | 253 | if (private->tx_buffer == NULL) { |
254 | printk(KERN_ERR "xircom_probe: no memory for tx buffer \n"); | 254 | printk(KERN_ERR "xircom_probe: no memory for tx buffer \n"); |
@@ -265,11 +265,11 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ | |||
265 | spin_lock_init(&private->lock); | 265 | spin_lock_init(&private->lock); |
266 | dev->irq = pdev->irq; | 266 | dev->irq = pdev->irq; |
267 | dev->base_addr = private->io_port; | 267 | dev->base_addr = private->io_port; |
268 | 268 | ||
269 | initialize_card(private); | 269 | initialize_card(private); |
270 | read_mac_address(private); | 270 | read_mac_address(private); |
271 | setup_descriptors(private); | 271 | setup_descriptors(private); |
272 | 272 | ||
273 | dev->open = &xircom_open; | 273 | dev->open = &xircom_open; |
274 | dev->hard_start_xmit = &xircom_start_xmit; | 274 | dev->hard_start_xmit = &xircom_start_xmit; |
275 | dev->stop = &xircom_close; | 275 | dev->stop = &xircom_close; |
@@ -285,19 +285,19 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ | |||
285 | printk(KERN_ERR "xircom_probe: netdevice registration failed.\n"); | 285 | printk(KERN_ERR "xircom_probe: netdevice registration failed.\n"); |
286 | goto reg_fail; | 286 | goto reg_fail; |
287 | } | 287 | } |
288 | 288 | ||
289 | printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq); | 289 | printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq); |
290 | /* start the transmitter to get a heartbeat */ | 290 | /* start the transmitter to get a heartbeat */ |
291 | /* TODO: send 2 dummy packets here */ | 291 | /* TODO: send 2 dummy packets here */ |
292 | transceiver_voodoo(private); | 292 | transceiver_voodoo(private); |
293 | 293 | ||
294 | spin_lock_irqsave(&private->lock,flags); | 294 | spin_lock_irqsave(&private->lock,flags); |
295 | activate_transmitter(private); | 295 | activate_transmitter(private); |
296 | activate_receiver(private); | 296 | activate_receiver(private); |
297 | spin_unlock_irqrestore(&private->lock,flags); | 297 | spin_unlock_irqrestore(&private->lock,flags); |
298 | 298 | ||
299 | trigger_receive(private); | 299 | trigger_receive(private); |
300 | 300 | ||
301 | leave("xircom_probe"); | 301 | leave("xircom_probe"); |
302 | return 0; | 302 | return 0; |
303 | 303 | ||
@@ -332,7 +332,7 @@ static void __devexit xircom_remove(struct pci_dev *pdev) | |||
332 | free_netdev(dev); | 332 | free_netdev(dev); |
333 | pci_set_drvdata(pdev, NULL); | 333 | pci_set_drvdata(pdev, NULL); |
334 | leave("xircom_remove"); | 334 | leave("xircom_remove"); |
335 | } | 335 | } |
336 | 336 | ||
337 | static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | 337 | static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs) |
338 | { | 338 | { |
@@ -346,11 +346,11 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs | |||
346 | spin_lock(&card->lock); | 346 | spin_lock(&card->lock); |
347 | status = inl(card->io_port+CSR5); | 347 | status = inl(card->io_port+CSR5); |
348 | 348 | ||
349 | #ifdef DEBUG | 349 | #ifdef DEBUG |
350 | print_binary(status); | 350 | print_binary(status); |
351 | printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]); | 351 | printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]); |
352 | printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]); | 352 | printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]); |
353 | #endif | 353 | #endif |
354 | /* Handle shared irq and hotplug */ | 354 | /* Handle shared irq and hotplug */ |
355 | if (status == 0 || status == 0xffffffff) { | 355 | if (status == 0 || status == 0xffffffff) { |
356 | spin_unlock(&card->lock); | 356 | spin_unlock(&card->lock); |
@@ -366,21 +366,21 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs | |||
366 | netif_carrier_on(dev); | 366 | netif_carrier_on(dev); |
367 | else | 367 | else |
368 | netif_carrier_off(dev); | 368 | netif_carrier_off(dev); |
369 | 369 | ||
370 | } | 370 | } |
371 | 371 | ||
372 | /* Clear all remaining interrupts */ | 372 | /* Clear all remaining interrupts */ |
373 | status |= 0xffffffff; /* FIXME: make this clear only the | 373 | status |= 0xffffffff; /* FIXME: make this clear only the |
374 | real existing bits */ | 374 | real existing bits */ |
375 | outl(status,card->io_port+CSR5); | 375 | outl(status,card->io_port+CSR5); |
376 | |||
377 | 376 | ||
378 | for (i=0;i<NUMDESCRIPTORS;i++) | 377 | |
378 | for (i=0;i<NUMDESCRIPTORS;i++) | ||
379 | investigate_write_descriptor(dev,card,i,bufferoffsets[i]); | 379 | investigate_write_descriptor(dev,card,i,bufferoffsets[i]); |
380 | for (i=0;i<NUMDESCRIPTORS;i++) | 380 | for (i=0;i<NUMDESCRIPTORS;i++) |
381 | investigate_read_descriptor(dev,card,i,bufferoffsets[i]); | 381 | investigate_read_descriptor(dev,card,i,bufferoffsets[i]); |
382 | 382 | ||
383 | 383 | ||
384 | spin_unlock(&card->lock); | 384 | spin_unlock(&card->lock); |
385 | leave("xircom_interrupt"); | 385 | leave("xircom_interrupt"); |
386 | return IRQ_HANDLED; | 386 | return IRQ_HANDLED; |
@@ -393,38 +393,38 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
393 | int nextdescriptor; | 393 | int nextdescriptor; |
394 | int desc; | 394 | int desc; |
395 | enter("xircom_start_xmit"); | 395 | enter("xircom_start_xmit"); |
396 | 396 | ||
397 | card = netdev_priv(dev); | 397 | card = netdev_priv(dev); |
398 | spin_lock_irqsave(&card->lock,flags); | 398 | spin_lock_irqsave(&card->lock,flags); |
399 | 399 | ||
400 | /* First see if we can free some descriptors */ | 400 | /* First see if we can free some descriptors */ |
401 | for (desc=0;desc<NUMDESCRIPTORS;desc++) | 401 | for (desc=0;desc<NUMDESCRIPTORS;desc++) |
402 | investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]); | 402 | investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]); |
403 | 403 | ||
404 | 404 | ||
405 | nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS); | 405 | nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS); |
406 | desc = card->transmit_used; | 406 | desc = card->transmit_used; |
407 | 407 | ||
408 | /* only send the packet if the descriptor is free */ | 408 | /* only send the packet if the descriptor is free */ |
409 | if (card->tx_buffer[4*desc]==0) { | 409 | if (card->tx_buffer[4*desc]==0) { |
410 | /* Copy the packet data; zero the memory first as the card | 410 | /* Copy the packet data; zero the memory first as the card |
411 | sometimes sends more than you ask it to. */ | 411 | sometimes sends more than you ask it to. */ |
412 | 412 | ||
413 | memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); | 413 | memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); |
414 | memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len); | 414 | memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len); |
415 | 415 | ||
416 | 416 | ||
417 | /* FIXME: The specification tells us that the length we send HAS to be a multiple of | 417 | /* FIXME: The specification tells us that the length we send HAS to be a multiple of |
418 | 4 bytes. */ | 418 | 4 bytes. */ |
419 | 419 | ||
420 | card->tx_buffer[4*desc+1] = skb->len; | 420 | card->tx_buffer[4*desc+1] = skb->len; |
421 | if (desc == NUMDESCRIPTORS-1) | 421 | if (desc == NUMDESCRIPTORS-1) |
422 | card->tx_buffer[4*desc+1] |= (1<<25); /* bit 25: last descriptor of the ring */ | 422 | card->tx_buffer[4*desc+1] |= (1<<25); /* bit 25: last descriptor of the ring */ |
423 | 423 | ||
424 | card->tx_buffer[4*desc+1] |= 0xF0000000; | 424 | card->tx_buffer[4*desc+1] |= 0xF0000000; |
425 | /* 0xF0... means want interrupts*/ | 425 | /* 0xF0... means want interrupts*/ |
426 | card->tx_skb[desc] = skb; | 426 | card->tx_skb[desc] = skb; |
427 | 427 | ||
428 | wmb(); | 428 | wmb(); |
429 | /* This gives the descriptor to the card */ | 429 | /* This gives the descriptor to the card */ |
430 | card->tx_buffer[4*desc] = 0x80000000; | 430 | card->tx_buffer[4*desc] = 0x80000000; |
@@ -433,18 +433,18 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
433 | netif_stop_queue(dev); | 433 | netif_stop_queue(dev); |
434 | } | 434 | } |
435 | card->transmit_used = nextdescriptor; | 435 | card->transmit_used = nextdescriptor; |
436 | leave("xircom-start_xmit - sent"); | 436 | leave("xircom-start_xmit - sent"); |
437 | spin_unlock_irqrestore(&card->lock,flags); | 437 | spin_unlock_irqrestore(&card->lock,flags); |
438 | return 0; | 438 | return 0; |
439 | } | 439 | } |
440 | 440 | ||
441 | 441 | ||
442 | 442 | ||
443 | /* Uh oh... no free descriptor... drop the packet */ | 443 | /* Uh oh... no free descriptor... drop the packet */ |
444 | netif_stop_queue(dev); | 444 | netif_stop_queue(dev); |
445 | spin_unlock_irqrestore(&card->lock,flags); | 445 | spin_unlock_irqrestore(&card->lock,flags); |
446 | trigger_transmit(card); | 446 | trigger_transmit(card); |
447 | 447 | ||
448 | return -EIO; | 448 | return -EIO; |
449 | } | 449 | } |
450 | 450 | ||
@@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev) | |||
462 | leave("xircom_open - No IRQ"); | 462 | leave("xircom_open - No IRQ"); |
463 | return retval; | 463 | return retval; |
464 | } | 464 | } |
465 | 465 | ||
466 | xircom_up(xp); | 466 | xircom_up(xp); |
467 | xp->open = 1; | 467 | xp->open = 1; |
468 | leave("xircom_open"); | 468 | leave("xircom_open"); |
@@ -473,31 +473,31 @@ static int xircom_close(struct net_device *dev) | |||
473 | { | 473 | { |
474 | struct xircom_private *card; | 474 | struct xircom_private *card; |
475 | unsigned long flags; | 475 | unsigned long flags; |
476 | 476 | ||
477 | enter("xircom_close"); | 477 | enter("xircom_close"); |
478 | card = netdev_priv(dev); | 478 | card = netdev_priv(dev); |
479 | netif_stop_queue(dev); /* we don't want new packets */ | 479 | netif_stop_queue(dev); /* we don't want new packets */ |
480 | 480 | ||
481 | 481 | ||
482 | spin_lock_irqsave(&card->lock,flags); | 482 | spin_lock_irqsave(&card->lock,flags); |
483 | 483 | ||
484 | disable_all_interrupts(card); | 484 | disable_all_interrupts(card); |
485 | #if 0 | 485 | #if 0 |
486 | /* We can enable this again once we send dummy packets on ifconfig ethX up */ | 486 | /* We can enable this again once we send dummy packets on ifconfig ethX up */ |
487 | deactivate_receiver(card); | 487 | deactivate_receiver(card); |
488 | deactivate_transmitter(card); | 488 | deactivate_transmitter(card); |
489 | #endif | 489 | #endif |
490 | remove_descriptors(card); | 490 | remove_descriptors(card); |
491 | 491 | ||
492 | spin_unlock_irqrestore(&card->lock,flags); | 492 | spin_unlock_irqrestore(&card->lock,flags); |
493 | 493 | ||
494 | card->open = 0; | 494 | card->open = 0; |
495 | free_irq(dev->irq,dev); | 495 | free_irq(dev->irq,dev); |
496 | 496 | ||
497 | leave("xircom_close"); | 497 | leave("xircom_close"); |
498 | 498 | ||
499 | return 0; | 499 | return 0; |
500 | 500 | ||
501 | } | 501 | } |
502 | 502 | ||
503 | 503 | ||
@@ -506,8 +506,8 @@ static struct net_device_stats *xircom_get_stats(struct net_device *dev) | |||
506 | { | 506 | { |
507 | struct xircom_private *card = netdev_priv(dev); | 507 | struct xircom_private *card = netdev_priv(dev); |
508 | return &card->stats; | 508 | return &card->stats; |
509 | } | 509 | } |
510 | 510 | ||
511 | 511 | ||
512 | #ifdef CONFIG_NET_POLL_CONTROLLER | 512 | #ifdef CONFIG_NET_POLL_CONTROLLER |
513 | static void xircom_poll_controller(struct net_device *dev) | 513 | static void xircom_poll_controller(struct net_device *dev) |
@@ -540,7 +540,7 @@ static void initialize_card(struct xircom_private *card) | |||
540 | outl(val, card->io_port + CSR0); | 540 | outl(val, card->io_port + CSR0); |
541 | 541 | ||
542 | 542 | ||
543 | val = 0; /* Value 0x00 is a safe and conservative value | 543 | val = 0; /* Value 0x00 is a safe and conservative value |
544 | for the PCI configuration settings */ | 544 | for the PCI configuration settings */ |
545 | outl(val, card->io_port + CSR0); | 545 | outl(val, card->io_port + CSR0); |
546 | 546 | ||
@@ -617,23 +617,23 @@ static void setup_descriptors(struct xircom_private *card) | |||
617 | 617 | ||
618 | /* Rx Descr2: address of the buffer | 618 | /* Rx Descr2: address of the buffer |
619 | we store the buffer at the 2nd half of the page */ | 619 | we store the buffer at the 2nd half of the page */ |
620 | 620 | ||
621 | address = (unsigned long) card->rx_dma_handle; | 621 | address = (unsigned long) card->rx_dma_handle; |
622 | card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]); | 622 | card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]); |
623 | /* Rx Desc3: address of 2nd buffer -> 0 */ | 623 | /* Rx Desc3: address of 2nd buffer -> 0 */ |
624 | card->rx_buffer[i*4 + 3] = 0; | 624 | card->rx_buffer[i*4 + 3] = 0; |
625 | } | 625 | } |
626 | 626 | ||
627 | wmb(); | 627 | wmb(); |
628 | /* Write the receive descriptor ring address to the card */ | 628 | /* Write the receive descriptor ring address to the card */ |
629 | address = (unsigned long) card->rx_dma_handle; | 629 | address = (unsigned long) card->rx_dma_handle; |
630 | val = cpu_to_le32(address); | 630 | val = cpu_to_le32(address); |
631 | outl(val, card->io_port + CSR3); /* Receive descr list address */ | 631 | outl(val, card->io_port + CSR3); /* Receive descr list address */ |
632 | 632 | ||
633 | 633 | ||
634 | /* transmit descriptors */ | 634 | /* transmit descriptors */ |
635 | memset(card->tx_buffer, 0, 128); /* clear the descriptors */ | 635 | memset(card->tx_buffer, 0, 128); /* clear the descriptors */ |
636 | 636 | ||
637 | for (i=0;i<NUMDESCRIPTORS;i++ ) { | 637 | for (i=0;i<NUMDESCRIPTORS;i++ ) { |
638 | /* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */ | 638 | /* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */ |
639 | card->tx_buffer[i*4 + 0] = 0x00000000; | 639 | card->tx_buffer[i*4 + 0] = 0x00000000; |
@@ -641,7 +641,7 @@ static void setup_descriptors(struct xircom_private *card) | |||
641 | card->tx_buffer[i*4 + 1] = 1536; | 641 | card->tx_buffer[i*4 + 1] = 1536; |
642 | if (i==NUMDESCRIPTORS-1) | 642 | if (i==NUMDESCRIPTORS-1) |
643 | card->tx_buffer[i*4 + 1] |= (1 << 25); /* bit 25 is "last descriptor" */ | 643 | card->tx_buffer[i*4 + 1] |= (1 << 25); /* bit 25 is "last descriptor" */ |
644 | 644 | ||
645 | /* Tx Descr2: address of the buffer | 645 | /* Tx Descr2: address of the buffer |
646 | we store the buffer at the 2nd half of the page */ | 646 | we store the buffer at the 2nd half of the page */ |
647 | address = (unsigned long) card->tx_dma_handle; | 647 | address = (unsigned long) card->tx_dma_handle; |
@@ -748,7 +748,7 @@ static int receive_active(struct xircom_private *card) | |||
748 | activate_receiver enables the receiver on the card. | 748 | activate_receiver enables the receiver on the card. |
749 | Before being allowed to active the receiver, the receiver | 749 | Before being allowed to active the receiver, the receiver |
750 | must be completely de-activated. To achieve this, | 750 | must be completely de-activated. To achieve this, |
751 | this code actually disables the receiver first; then it waits for the | 751 | this code actually disables the receiver first; then it waits for the |
752 | receiver to become inactive, then it activates the receiver and then | 752 | receiver to become inactive, then it activates the receiver and then |
753 | it waits for the receiver to be active. | 753 | it waits for the receiver to be active. |
754 | 754 | ||
@@ -762,13 +762,13 @@ static void activate_receiver(struct xircom_private *card) | |||
762 | 762 | ||
763 | 763 | ||
764 | val = inl(card->io_port + CSR6); /* Operation mode */ | 764 | val = inl(card->io_port + CSR6); /* Operation mode */ |
765 | 765 | ||
766 | /* If the "active" bit is set and the receiver is already | 766 | /* If the "active" bit is set and the receiver is already |
767 | active, no need to do the expensive thing */ | 767 | active, no need to do the expensive thing */ |
768 | if ((val&2) && (receive_active(card))) | 768 | if ((val&2) && (receive_active(card))) |
769 | return; | 769 | return; |
770 | 770 | ||
771 | 771 | ||
772 | val = val & ~2; /* disable the receiver */ | 772 | val = val & ~2; /* disable the receiver */ |
773 | outl(val, card->io_port + CSR6); | 773 | outl(val, card->io_port + CSR6); |
774 | 774 | ||
@@ -805,7 +805,7 @@ static void activate_receiver(struct xircom_private *card) | |||
805 | 805 | ||
806 | /* | 806 | /* |
807 | deactivate_receiver disables the receiver on the card. | 807 | deactivate_receiver disables the receiver on the card. |
808 | To achieve this this code disables the receiver first; | 808 | To achieve this this code disables the receiver first; |
809 | then it waits for the receiver to become inactive. | 809 | then it waits for the receiver to become inactive. |
810 | 810 | ||
811 | must be called with the lock held and interrupts disabled. | 811 | must be called with the lock held and interrupts disabled. |
@@ -840,7 +840,7 @@ static void deactivate_receiver(struct xircom_private *card) | |||
840 | activate_transmitter enables the transmitter on the card. | 840 | activate_transmitter enables the transmitter on the card. |
841 | Before being allowed to active the transmitter, the transmitter | 841 | Before being allowed to active the transmitter, the transmitter |
842 | must be completely de-activated. To achieve this, | 842 | must be completely de-activated. To achieve this, |
843 | this code actually disables the transmitter first; then it waits for the | 843 | this code actually disables the transmitter first; then it waits for the |
844 | transmitter to become inactive, then it activates the transmitter and then | 844 | transmitter to become inactive, then it activates the transmitter and then |
845 | it waits for the transmitter to be active again. | 845 | it waits for the transmitter to be active again. |
846 | 846 | ||
@@ -856,7 +856,7 @@ static void activate_transmitter(struct xircom_private *card) | |||
856 | val = inl(card->io_port + CSR6); /* Operation mode */ | 856 | val = inl(card->io_port + CSR6); /* Operation mode */ |
857 | 857 | ||
858 | /* If the "active" bit is set and the receiver is already | 858 | /* If the "active" bit is set and the receiver is already |
859 | active, no need to do the expensive thing */ | 859 | active, no need to do the expensive thing */ |
860 | if ((val&(1<<13)) && (transmit_active(card))) | 860 | if ((val&(1<<13)) && (transmit_active(card))) |
861 | return; | 861 | return; |
862 | 862 | ||
@@ -896,7 +896,7 @@ static void activate_transmitter(struct xircom_private *card) | |||
896 | 896 | ||
897 | /* | 897 | /* |
898 | deactivate_transmitter disables the transmitter on the card. | 898 | deactivate_transmitter disables the transmitter on the card. |
899 | To achieve this this code disables the transmitter first; | 899 | To achieve this this code disables the transmitter first; |
900 | then it waits for the transmitter to become inactive. | 900 | then it waits for the transmitter to become inactive. |
901 | 901 | ||
902 | must be called with the lock held and interrupts disabled. | 902 | must be called with the lock held and interrupts disabled. |
@@ -990,7 +990,7 @@ static void disable_all_interrupts(struct xircom_private *card) | |||
990 | { | 990 | { |
991 | unsigned int val; | 991 | unsigned int val; |
992 | enter("enable_all_interrupts"); | 992 | enter("enable_all_interrupts"); |
993 | 993 | ||
994 | val = 0; /* disable all interrupts */ | 994 | val = 0; /* disable all interrupts */ |
995 | outl(val, card->io_port + CSR7); | 995 | outl(val, card->io_port + CSR7); |
996 | 996 | ||
@@ -1031,8 +1031,8 @@ static int enable_promisc(struct xircom_private *card) | |||
1031 | unsigned int val; | 1031 | unsigned int val; |
1032 | enter("enable_promisc"); | 1032 | enter("enable_promisc"); |
1033 | 1033 | ||
1034 | val = inl(card->io_port + CSR6); | 1034 | val = inl(card->io_port + CSR6); |
1035 | val = val | (1 << 6); | 1035 | val = val | (1 << 6); |
1036 | outl(val, card->io_port + CSR6); | 1036 | outl(val, card->io_port + CSR6); |
1037 | 1037 | ||
1038 | leave("enable_promisc"); | 1038 | leave("enable_promisc"); |
@@ -1042,7 +1042,7 @@ static int enable_promisc(struct xircom_private *card) | |||
1042 | 1042 | ||
1043 | 1043 | ||
1044 | 1044 | ||
1045 | /* | 1045 | /* |
1046 | link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. | 1046 | link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. |
1047 | 1047 | ||
1048 | Must be called in locked state with interrupts disabled | 1048 | Must be called in locked state with interrupts disabled |
@@ -1051,15 +1051,15 @@ static int link_status(struct xircom_private *card) | |||
1051 | { | 1051 | { |
1052 | unsigned int val; | 1052 | unsigned int val; |
1053 | enter("link_status"); | 1053 | enter("link_status"); |
1054 | 1054 | ||
1055 | val = inb(card->io_port + CSR12); | 1055 | val = inb(card->io_port + CSR12); |
1056 | 1056 | ||
1057 | if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ | 1057 | if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ |
1058 | return 10; | 1058 | return 10; |
1059 | if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ | 1059 | if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ |
1060 | return 100; | 1060 | return 100; |
1061 | 1061 | ||
1062 | /* If we get here -> no link at all */ | 1062 | /* If we get here -> no link at all */ |
1063 | 1063 | ||
1064 | leave("link_status"); | 1064 | leave("link_status"); |
1065 | return 0; | 1065 | return 0; |
@@ -1071,7 +1071,7 @@ static int link_status(struct xircom_private *card) | |||
1071 | 1071 | ||
1072 | /* | 1072 | /* |
1073 | read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure. | 1073 | read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure. |
1074 | 1074 | ||
1075 | This function will take the spinlock itself and can, as a result, not be called with the lock helt. | 1075 | This function will take the spinlock itself and can, as a result, not be called with the lock helt. |
1076 | */ | 1076 | */ |
1077 | static void read_mac_address(struct xircom_private *card) | 1077 | static void read_mac_address(struct xircom_private *card) |
@@ -1081,7 +1081,7 @@ static void read_mac_address(struct xircom_private *card) | |||
1081 | int i; | 1081 | int i; |
1082 | 1082 | ||
1083 | enter("read_mac_address"); | 1083 | enter("read_mac_address"); |
1084 | 1084 | ||
1085 | spin_lock_irqsave(&card->lock, flags); | 1085 | spin_lock_irqsave(&card->lock, flags); |
1086 | 1086 | ||
1087 | outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */ | 1087 | outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */ |
@@ -1095,7 +1095,7 @@ static void read_mac_address(struct xircom_private *card) | |||
1095 | outl(i + 3, card->io_port + CSR10); | 1095 | outl(i + 3, card->io_port + CSR10); |
1096 | data_count = inl(card->io_port + CSR9) & 0xff; | 1096 | data_count = inl(card->io_port + CSR9) & 0xff; |
1097 | if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { | 1097 | if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { |
1098 | /* | 1098 | /* |
1099 | * This is it. We have the data we want. | 1099 | * This is it. We have the data we want. |
1100 | */ | 1100 | */ |
1101 | for (j = 0; j < 6; j++) { | 1101 | for (j = 0; j < 6; j++) { |
@@ -1136,12 +1136,12 @@ static void transceiver_voodoo(struct xircom_private *card) | |||
1136 | spin_lock_irqsave(&card->lock, flags); | 1136 | spin_lock_irqsave(&card->lock, flags); |
1137 | 1137 | ||
1138 | outl(0x0008, card->io_port + CSR15); | 1138 | outl(0x0008, card->io_port + CSR15); |
1139 | udelay(25); | 1139 | udelay(25); |
1140 | outl(0xa8050000, card->io_port + CSR15); | 1140 | outl(0xa8050000, card->io_port + CSR15); |
1141 | udelay(25); | 1141 | udelay(25); |
1142 | outl(0xa00f0000, card->io_port + CSR15); | 1142 | outl(0xa00f0000, card->io_port + CSR15); |
1143 | udelay(25); | 1143 | udelay(25); |
1144 | 1144 | ||
1145 | spin_unlock_irqrestore(&card->lock, flags); | 1145 | spin_unlock_irqrestore(&card->lock, flags); |
1146 | 1146 | ||
1147 | netif_start_queue(card->dev); | 1147 | netif_start_queue(card->dev); |
@@ -1163,15 +1163,15 @@ static void xircom_up(struct xircom_private *card) | |||
1163 | 1163 | ||
1164 | spin_lock_irqsave(&card->lock, flags); | 1164 | spin_lock_irqsave(&card->lock, flags); |
1165 | 1165 | ||
1166 | 1166 | ||
1167 | enable_link_interrupt(card); | 1167 | enable_link_interrupt(card); |
1168 | enable_transmit_interrupt(card); | 1168 | enable_transmit_interrupt(card); |
1169 | enable_receive_interrupt(card); | 1169 | enable_receive_interrupt(card); |
1170 | enable_common_interrupts(card); | 1170 | enable_common_interrupts(card); |
1171 | enable_promisc(card); | 1171 | enable_promisc(card); |
1172 | 1172 | ||
1173 | /* The card can have received packets already, read them away now */ | 1173 | /* The card can have received packets already, read them away now */ |
1174 | for (i=0;i<NUMDESCRIPTORS;i++) | 1174 | for (i=0;i<NUMDESCRIPTORS;i++) |
1175 | investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]); | 1175 | investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]); |
1176 | 1176 | ||
1177 | 1177 | ||
@@ -1185,15 +1185,15 @@ static void xircom_up(struct xircom_private *card) | |||
1185 | /* Bufferoffset is in BYTES */ | 1185 | /* Bufferoffset is in BYTES */ |
1186 | static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset) | 1186 | static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset) |
1187 | { | 1187 | { |
1188 | int status; | 1188 | int status; |
1189 | 1189 | ||
1190 | enter("investigate_read_descriptor"); | 1190 | enter("investigate_read_descriptor"); |
1191 | status = card->rx_buffer[4*descnr]; | 1191 | status = card->rx_buffer[4*descnr]; |
1192 | 1192 | ||
1193 | if ((status > 0)) { /* packet received */ | 1193 | if ((status > 0)) { /* packet received */ |
1194 | 1194 | ||
1195 | /* TODO: discard error packets */ | 1195 | /* TODO: discard error packets */ |
1196 | 1196 | ||
1197 | short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */ | 1197 | short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */ |
1198 | struct sk_buff *skb; | 1198 | struct sk_buff *skb; |
1199 | 1199 | ||
@@ -1216,7 +1216,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri | |||
1216 | dev->last_rx = jiffies; | 1216 | dev->last_rx = jiffies; |
1217 | card->stats.rx_packets++; | 1217 | card->stats.rx_packets++; |
1218 | card->stats.rx_bytes += pkt_len; | 1218 | card->stats.rx_bytes += pkt_len; |
1219 | 1219 | ||
1220 | out: | 1220 | out: |
1221 | /* give the buffer back to the card */ | 1221 | /* give the buffer back to the card */ |
1222 | card->rx_buffer[4*descnr] = 0x80000000; | 1222 | card->rx_buffer[4*descnr] = 0x80000000; |
@@ -1234,9 +1234,9 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p | |||
1234 | int status; | 1234 | int status; |
1235 | 1235 | ||
1236 | enter("investigate_write_descriptor"); | 1236 | enter("investigate_write_descriptor"); |
1237 | 1237 | ||
1238 | status = card->tx_buffer[4*descnr]; | 1238 | status = card->tx_buffer[4*descnr]; |
1239 | #if 0 | 1239 | #if 0 |
1240 | if (status & 0x8000) { /* Major error */ | 1240 | if (status & 0x8000) { /* Major error */ |
1241 | printk(KERN_ERR "Major transmit error status %x \n", status); | 1241 | printk(KERN_ERR "Major transmit error status %x \n", status); |
1242 | card->tx_buffer[4*descnr] = 0; | 1242 | card->tx_buffer[4*descnr] = 0; |
@@ -1258,7 +1258,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p | |||
1258 | } | 1258 | } |
1259 | 1259 | ||
1260 | leave("investigate_write_descriptor"); | 1260 | leave("investigate_write_descriptor"); |
1261 | 1261 | ||
1262 | } | 1262 | } |
1263 | 1263 | ||
1264 | 1264 | ||
@@ -1271,8 +1271,8 @@ static int __init xircom_init(void) | |||
1271 | static void __exit xircom_exit(void) | 1271 | static void __exit xircom_exit(void) |
1272 | { | 1272 | { |
1273 | pci_unregister_driver(&xircom_ops); | 1273 | pci_unregister_driver(&xircom_ops); |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | module_init(xircom_init) | 1276 | module_init(xircom_init) |
1277 | module_exit(xircom_exit) | 1277 | module_exit(xircom_exit) |
1278 | 1278 | ||
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index d9a774b91ddc..f1b2640ebdc6 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h | |||
@@ -307,7 +307,7 @@ enum velocity_owner { | |||
307 | #define TX_QUEUE_NO 4 | 307 | #define TX_QUEUE_NO 4 |
308 | 308 | ||
309 | #define MAX_HW_MIB_COUNTER 32 | 309 | #define MAX_HW_MIB_COUNTER 32 |
310 | #define VELOCITY_MIN_MTU (1514-14) | 310 | #define VELOCITY_MIN_MTU (64) |
311 | #define VELOCITY_MAX_MTU (9000) | 311 | #define VELOCITY_MAX_MTU (9000) |
312 | 312 | ||
313 | /* | 313 | /* |
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index eba8e5cfacc2..f485a97844cc 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c | |||
@@ -50,10 +50,6 @@ static const char* devname = "PCI200SYN"; | |||
50 | static int pci_clock_freq = 33000000; | 50 | static int pci_clock_freq = 33000000; |
51 | #define CLOCK_BASE pci_clock_freq | 51 | #define CLOCK_BASE pci_clock_freq |
52 | 52 | ||
53 | #define PCI_VENDOR_ID_GORAMO 0x10B5 /* uses PLX:9050 ID - this card */ | ||
54 | #define PCI_DEVICE_ID_PCI200SYN 0x9050 /* doesn't have its own ID */ | ||
55 | |||
56 | |||
57 | /* | 53 | /* |
58 | * PLX PCI9052 local configuration and shared runtime registers. | 54 | * PLX PCI9052 local configuration and shared runtime registers. |
59 | * This structure can be used to access 9052 registers (memory mapped). | 55 | * This structure can be used to access 9052 registers (memory mapped). |
@@ -262,7 +258,7 @@ static void pci200_pci_remove_one(struct pci_dev *pdev) | |||
262 | int i; | 258 | int i; |
263 | card_t *card = pci_get_drvdata(pdev); | 259 | card_t *card = pci_get_drvdata(pdev); |
264 | 260 | ||
265 | for(i = 0; i < 2; i++) | 261 | for (i = 0; i < 2; i++) |
266 | if (card->ports[i].card) { | 262 | if (card->ports[i].card) { |
267 | struct net_device *dev = port_to_dev(&card->ports[i]); | 263 | struct net_device *dev = port_to_dev(&card->ports[i]); |
268 | unregister_hdlc_device(dev); | 264 | unregister_hdlc_device(dev); |
@@ -385,6 +381,15 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, | |||
385 | " %u RX packets rings\n", ramsize / 1024, ramphys, | 381 | " %u RX packets rings\n", ramsize / 1024, ramphys, |
386 | pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers); | 382 | pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers); |
387 | 383 | ||
384 | if (pdev->subsystem_device == PCI_DEVICE_ID_PLX_9050) { | ||
385 | printk(KERN_ERR "Detected PCI200SYN card with old " | ||
386 | "configuration data.\n"); | ||
387 | printk(KERN_ERR "See <http://www.kernel.org/pub/" | ||
388 | "linux/utils/net/hdlc/pci200syn/> for update.\n"); | ||
389 | printk(KERN_ERR "The card will stop working with" | ||
390 | " future versions of Linux if not updated.\n"); | ||
391 | } | ||
392 | |||
388 | if (card->tx_ring_buffers < 1) { | 393 | if (card->tx_ring_buffers < 1) { |
389 | printk(KERN_ERR "pci200syn: RAM test failed\n"); | 394 | printk(KERN_ERR "pci200syn: RAM test failed\n"); |
390 | pci200_pci_remove_one(pdev); | 395 | pci200_pci_remove_one(pdev); |
@@ -396,7 +401,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, | |||
396 | writew(readw(p) | 0x0040, p); | 401 | writew(readw(p) | 0x0040, p); |
397 | 402 | ||
398 | /* Allocate IRQ */ | 403 | /* Allocate IRQ */ |
399 | if(request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) { | 404 | if (request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) { |
400 | printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n", | 405 | printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n", |
401 | pdev->irq); | 406 | pdev->irq); |
402 | pci200_pci_remove_one(pdev); | 407 | pci200_pci_remove_one(pdev); |
@@ -406,7 +411,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, | |||
406 | 411 | ||
407 | sca_init(card, 0); | 412 | sca_init(card, 0); |
408 | 413 | ||
409 | for(i = 0; i < 2; i++) { | 414 | for (i = 0; i < 2; i++) { |
410 | port_t *port = &card->ports[i]; | 415 | port_t *port = &card->ports[i]; |
411 | struct net_device *dev = port_to_dev(port); | 416 | struct net_device *dev = port_to_dev(port); |
412 | hdlc_device *hdlc = dev_to_hdlc(dev); | 417 | hdlc_device *hdlc = dev_to_hdlc(dev); |
@@ -425,7 +430,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, | |||
425 | hdlc->xmit = sca_xmit; | 430 | hdlc->xmit = sca_xmit; |
426 | port->settings.clock_type = CLOCK_EXT; | 431 | port->settings.clock_type = CLOCK_EXT; |
427 | port->card = card; | 432 | port->card = card; |
428 | if(register_hdlc_device(dev)) { | 433 | if (register_hdlc_device(dev)) { |
429 | printk(KERN_ERR "pci200syn: unable to register hdlc " | 434 | printk(KERN_ERR "pci200syn: unable to register hdlc " |
430 | "device\n"); | 435 | "device\n"); |
431 | port->card = NULL; | 436 | port->card = NULL; |
@@ -445,8 +450,10 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, | |||
445 | 450 | ||
446 | 451 | ||
447 | static struct pci_device_id pci200_pci_tbl[] __devinitdata = { | 452 | static struct pci_device_id pci200_pci_tbl[] __devinitdata = { |
448 | { PCI_VENDOR_ID_GORAMO, PCI_DEVICE_ID_PCI200SYN, PCI_ANY_ID, | 453 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX, |
449 | PCI_ANY_ID, 0, 0, 0 }, | 454 | PCI_DEVICE_ID_PLX_9050, 0, 0, 0 }, |
455 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX, | ||
456 | PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 }, | ||
450 | { 0, } | 457 | { 0, } |
451 | }; | 458 | }; |
452 | 459 | ||
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index e0874cbfefea..d7691c482835 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig | |||
@@ -235,7 +235,35 @@ config IPW2200_MONITOR | |||
235 | promiscuous mode via the Wireless Tool's Monitor mode. While in this | 235 | promiscuous mode via the Wireless Tool's Monitor mode. While in this |
236 | mode, no packets can be sent. | 236 | mode, no packets can be sent. |
237 | 237 | ||
238 | config IPW_QOS | 238 | config IPW2200_RADIOTAP |
239 | bool "Enable radiotap format 802.11 raw packet support" | ||
240 | depends on IPW2200_MONITOR | ||
241 | |||
242 | config IPW2200_PROMISCUOUS | ||
243 | bool "Enable creation of a RF radiotap promiscuous interface" | ||
244 | depends on IPW2200_MONITOR | ||
245 | select IPW2200_RADIOTAP | ||
246 | ---help--- | ||
247 | Enables the creation of a second interface prefixed 'rtap'. | ||
248 | This second interface will provide every received in radiotap | ||
249 | format. | ||
250 | |||
251 | This is useful for performing wireless network analysis while | ||
252 | maintaining an active association. | ||
253 | |||
254 | Example usage: | ||
255 | |||
256 | % modprobe ipw2200 rtap_iface=1 | ||
257 | % ifconfig rtap0 up | ||
258 | % tethereal -i rtap0 | ||
259 | |||
260 | If you do not specify 'rtap_iface=1' as a module parameter then | ||
261 | the rtap interface will not be created and you will need to turn | ||
262 | it on via sysfs: | ||
263 | |||
264 | % echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface | ||
265 | |||
266 | config IPW2200_QOS | ||
239 | bool "Enable QoS support" | 267 | bool "Enable QoS support" |
240 | depends on IPW2200 && EXPERIMENTAL | 268 | depends on IPW2200 && EXPERIMENTAL |
241 | 269 | ||
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 00764ddd74d8..4069b79d8259 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/ioport.h> | 47 | #include <linux/ioport.h> |
48 | #include <linux/pci.h> | 48 | #include <linux/pci.h> |
49 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
50 | #include <net/ieee80211.h> | ||
50 | 51 | ||
51 | #include "airo.h" | 52 | #include "airo.h" |
52 | 53 | ||
@@ -467,6 +468,8 @@ static int do8bitIO = 0; | |||
467 | #define RID_ECHOTEST_RESULTS 0xFF71 | 468 | #define RID_ECHOTEST_RESULTS 0xFF71 |
468 | #define RID_BSSLISTFIRST 0xFF72 | 469 | #define RID_BSSLISTFIRST 0xFF72 |
469 | #define RID_BSSLISTNEXT 0xFF73 | 470 | #define RID_BSSLISTNEXT 0xFF73 |
471 | #define RID_WPA_BSSLISTFIRST 0xFF74 | ||
472 | #define RID_WPA_BSSLISTNEXT 0xFF75 | ||
470 | 473 | ||
471 | typedef struct { | 474 | typedef struct { |
472 | u16 cmd; | 475 | u16 cmd; |
@@ -739,6 +742,14 @@ typedef struct { | |||
739 | u16 extSoftCap; | 742 | u16 extSoftCap; |
740 | } CapabilityRid; | 743 | } CapabilityRid; |
741 | 744 | ||
745 | |||
746 | /* Only present on firmware >= 5.30.17 */ | ||
747 | typedef struct { | ||
748 | u16 unknown[4]; | ||
749 | u8 fixed[12]; /* WLAN management frame */ | ||
750 | u8 iep[624]; | ||
751 | } BSSListRidExtra; | ||
752 | |||
742 | typedef struct { | 753 | typedef struct { |
743 | u16 len; | 754 | u16 len; |
744 | u16 index; /* First is 0 and 0xffff means end of list */ | 755 | u16 index; /* First is 0 and 0xffff means end of list */ |
@@ -767,6 +778,9 @@ typedef struct { | |||
767 | } fh; | 778 | } fh; |
768 | u16 dsChannel; | 779 | u16 dsChannel; |
769 | u16 atimWindow; | 780 | u16 atimWindow; |
781 | |||
782 | /* Only present on firmware >= 5.30.17 */ | ||
783 | BSSListRidExtra extra; | ||
770 | } BSSListRid; | 784 | } BSSListRid; |
771 | 785 | ||
772 | typedef struct { | 786 | typedef struct { |
@@ -1140,8 +1154,6 @@ struct airo_info { | |||
1140 | char defindex; // Used with auto wep | 1154 | char defindex; // Used with auto wep |
1141 | struct proc_dir_entry *proc_entry; | 1155 | struct proc_dir_entry *proc_entry; |
1142 | spinlock_t aux_lock; | 1156 | spinlock_t aux_lock; |
1143 | unsigned long flags; | ||
1144 | #define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */ | ||
1145 | #define FLAG_RADIO_OFF 0 /* User disabling of MAC */ | 1157 | #define FLAG_RADIO_OFF 0 /* User disabling of MAC */ |
1146 | #define FLAG_RADIO_DOWN 1 /* ifup/ifdown disabling of MAC */ | 1158 | #define FLAG_RADIO_DOWN 1 /* ifup/ifdown disabling of MAC */ |
1147 | #define FLAG_RADIO_MASK 0x03 | 1159 | #define FLAG_RADIO_MASK 0x03 |
@@ -1151,6 +1163,7 @@ struct airo_info { | |||
1151 | #define FLAG_UPDATE_MULTI 5 | 1163 | #define FLAG_UPDATE_MULTI 5 |
1152 | #define FLAG_UPDATE_UNI 6 | 1164 | #define FLAG_UPDATE_UNI 6 |
1153 | #define FLAG_802_11 7 | 1165 | #define FLAG_802_11 7 |
1166 | #define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */ | ||
1154 | #define FLAG_PENDING_XMIT 9 | 1167 | #define FLAG_PENDING_XMIT 9 |
1155 | #define FLAG_PENDING_XMIT11 10 | 1168 | #define FLAG_PENDING_XMIT11 10 |
1156 | #define FLAG_MPI 11 | 1169 | #define FLAG_MPI 11 |
@@ -1158,17 +1171,19 @@ struct airo_info { | |||
1158 | #define FLAG_COMMIT 13 | 1171 | #define FLAG_COMMIT 13 |
1159 | #define FLAG_RESET 14 | 1172 | #define FLAG_RESET 14 |
1160 | #define FLAG_FLASHING 15 | 1173 | #define FLAG_FLASHING 15 |
1161 | #define JOB_MASK 0x2ff0000 | 1174 | #define FLAG_WPA_CAPABLE 16 |
1162 | #define JOB_DIE 16 | 1175 | unsigned long flags; |
1163 | #define JOB_XMIT 17 | 1176 | #define JOB_DIE 0 |
1164 | #define JOB_XMIT11 18 | 1177 | #define JOB_XMIT 1 |
1165 | #define JOB_STATS 19 | 1178 | #define JOB_XMIT11 2 |
1166 | #define JOB_PROMISC 20 | 1179 | #define JOB_STATS 3 |
1167 | #define JOB_MIC 21 | 1180 | #define JOB_PROMISC 4 |
1168 | #define JOB_EVENT 22 | 1181 | #define JOB_MIC 5 |
1169 | #define JOB_AUTOWEP 23 | 1182 | #define JOB_EVENT 6 |
1170 | #define JOB_WSTATS 24 | 1183 | #define JOB_AUTOWEP 7 |
1171 | #define JOB_SCAN_RESULTS 25 | 1184 | #define JOB_WSTATS 8 |
1185 | #define JOB_SCAN_RESULTS 9 | ||
1186 | unsigned long jobs; | ||
1172 | int (*bap_read)(struct airo_info*, u16 *pu16Dst, int bytelen, | 1187 | int (*bap_read)(struct airo_info*, u16 *pu16Dst, int bytelen, |
1173 | int whichbap); | 1188 | int whichbap); |
1174 | unsigned short *flash; | 1189 | unsigned short *flash; |
@@ -1208,6 +1223,11 @@ struct airo_info { | |||
1208 | #define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE | 1223 | #define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE |
1209 | char proc_name[IFNAMSIZ]; | 1224 | char proc_name[IFNAMSIZ]; |
1210 | 1225 | ||
1226 | /* WPA-related stuff */ | ||
1227 | unsigned int bssListFirst; | ||
1228 | unsigned int bssListNext; | ||
1229 | unsigned int bssListRidLen; | ||
1230 | |||
1211 | struct list_head network_list; | 1231 | struct list_head network_list; |
1212 | struct list_head network_free_list; | 1232 | struct list_head network_free_list; |
1213 | BSSListElement *networks; | 1233 | BSSListElement *networks; |
@@ -1264,7 +1284,7 @@ static void micinit(struct airo_info *ai) | |||
1264 | { | 1284 | { |
1265 | MICRid mic_rid; | 1285 | MICRid mic_rid; |
1266 | 1286 | ||
1267 | clear_bit(JOB_MIC, &ai->flags); | 1287 | clear_bit(JOB_MIC, &ai->jobs); |
1268 | PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0); | 1288 | PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0); |
1269 | up(&ai->sem); | 1289 | up(&ai->sem); |
1270 | 1290 | ||
@@ -1705,24 +1725,24 @@ static void emmh32_final(emmh32_context *context, u8 digest[4]) | |||
1705 | static int readBSSListRid(struct airo_info *ai, int first, | 1725 | static int readBSSListRid(struct airo_info *ai, int first, |
1706 | BSSListRid *list) { | 1726 | BSSListRid *list) { |
1707 | int rc; | 1727 | int rc; |
1708 | Cmd cmd; | 1728 | Cmd cmd; |
1709 | Resp rsp; | 1729 | Resp rsp; |
1710 | 1730 | ||
1711 | if (first == 1) { | 1731 | if (first == 1) { |
1712 | if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN; | 1732 | if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN; |
1713 | memset(&cmd, 0, sizeof(cmd)); | 1733 | memset(&cmd, 0, sizeof(cmd)); |
1714 | cmd.cmd=CMD_LISTBSS; | 1734 | cmd.cmd=CMD_LISTBSS; |
1715 | if (down_interruptible(&ai->sem)) | 1735 | if (down_interruptible(&ai->sem)) |
1716 | return -ERESTARTSYS; | 1736 | return -ERESTARTSYS; |
1717 | issuecommand(ai, &cmd, &rsp); | 1737 | issuecommand(ai, &cmd, &rsp); |
1718 | up(&ai->sem); | 1738 | up(&ai->sem); |
1719 | /* Let the command take effect */ | 1739 | /* Let the command take effect */ |
1720 | ai->task = current; | 1740 | ai->task = current; |
1721 | ssleep(3); | 1741 | ssleep(3); |
1722 | ai->task = NULL; | 1742 | ai->task = NULL; |
1723 | } | 1743 | } |
1724 | rc = PC4500_readrid(ai, first ? RID_BSSLISTFIRST : RID_BSSLISTNEXT, | 1744 | rc = PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext, |
1725 | list, sizeof(*list), 1); | 1745 | list, ai->bssListRidLen, 1); |
1726 | 1746 | ||
1727 | list->len = le16_to_cpu(list->len); | 1747 | list->len = le16_to_cpu(list->len); |
1728 | list->index = le16_to_cpu(list->index); | 1748 | list->index = le16_to_cpu(list->index); |
@@ -2112,7 +2132,7 @@ static void airo_end_xmit(struct net_device *dev) { | |||
2112 | int fid = priv->xmit.fid; | 2132 | int fid = priv->xmit.fid; |
2113 | u32 *fids = priv->fids; | 2133 | u32 *fids = priv->fids; |
2114 | 2134 | ||
2115 | clear_bit(JOB_XMIT, &priv->flags); | 2135 | clear_bit(JOB_XMIT, &priv->jobs); |
2116 | clear_bit(FLAG_PENDING_XMIT, &priv->flags); | 2136 | clear_bit(FLAG_PENDING_XMIT, &priv->flags); |
2117 | status = transmit_802_3_packet (priv, fids[fid], skb->data); | 2137 | status = transmit_802_3_packet (priv, fids[fid], skb->data); |
2118 | up(&priv->sem); | 2138 | up(&priv->sem); |
@@ -2162,7 +2182,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) { | |||
2162 | if (down_trylock(&priv->sem) != 0) { | 2182 | if (down_trylock(&priv->sem) != 0) { |
2163 | set_bit(FLAG_PENDING_XMIT, &priv->flags); | 2183 | set_bit(FLAG_PENDING_XMIT, &priv->flags); |
2164 | netif_stop_queue(dev); | 2184 | netif_stop_queue(dev); |
2165 | set_bit(JOB_XMIT, &priv->flags); | 2185 | set_bit(JOB_XMIT, &priv->jobs); |
2166 | wake_up_interruptible(&priv->thr_wait); | 2186 | wake_up_interruptible(&priv->thr_wait); |
2167 | } else | 2187 | } else |
2168 | airo_end_xmit(dev); | 2188 | airo_end_xmit(dev); |
@@ -2177,7 +2197,7 @@ static void airo_end_xmit11(struct net_device *dev) { | |||
2177 | int fid = priv->xmit11.fid; | 2197 | int fid = priv->xmit11.fid; |
2178 | u32 *fids = priv->fids; | 2198 | u32 *fids = priv->fids; |
2179 | 2199 | ||
2180 | clear_bit(JOB_XMIT11, &priv->flags); | 2200 | clear_bit(JOB_XMIT11, &priv->jobs); |
2181 | clear_bit(FLAG_PENDING_XMIT11, &priv->flags); | 2201 | clear_bit(FLAG_PENDING_XMIT11, &priv->flags); |
2182 | status = transmit_802_11_packet (priv, fids[fid], skb->data); | 2202 | status = transmit_802_11_packet (priv, fids[fid], skb->data); |
2183 | up(&priv->sem); | 2203 | up(&priv->sem); |
@@ -2233,7 +2253,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) { | |||
2233 | if (down_trylock(&priv->sem) != 0) { | 2253 | if (down_trylock(&priv->sem) != 0) { |
2234 | set_bit(FLAG_PENDING_XMIT11, &priv->flags); | 2254 | set_bit(FLAG_PENDING_XMIT11, &priv->flags); |
2235 | netif_stop_queue(dev); | 2255 | netif_stop_queue(dev); |
2236 | set_bit(JOB_XMIT11, &priv->flags); | 2256 | set_bit(JOB_XMIT11, &priv->jobs); |
2237 | wake_up_interruptible(&priv->thr_wait); | 2257 | wake_up_interruptible(&priv->thr_wait); |
2238 | } else | 2258 | } else |
2239 | airo_end_xmit11(dev); | 2259 | airo_end_xmit11(dev); |
@@ -2244,7 +2264,7 @@ static void airo_read_stats(struct airo_info *ai) { | |||
2244 | StatsRid stats_rid; | 2264 | StatsRid stats_rid; |
2245 | u32 *vals = stats_rid.vals; | 2265 | u32 *vals = stats_rid.vals; |
2246 | 2266 | ||
2247 | clear_bit(JOB_STATS, &ai->flags); | 2267 | clear_bit(JOB_STATS, &ai->jobs); |
2248 | if (ai->power.event) { | 2268 | if (ai->power.event) { |
2249 | up(&ai->sem); | 2269 | up(&ai->sem); |
2250 | return; | 2270 | return; |
@@ -2272,10 +2292,10 @@ static struct net_device_stats *airo_get_stats(struct net_device *dev) | |||
2272 | { | 2292 | { |
2273 | struct airo_info *local = dev->priv; | 2293 | struct airo_info *local = dev->priv; |
2274 | 2294 | ||
2275 | if (!test_bit(JOB_STATS, &local->flags)) { | 2295 | if (!test_bit(JOB_STATS, &local->jobs)) { |
2276 | /* Get stats out of the card if available */ | 2296 | /* Get stats out of the card if available */ |
2277 | if (down_trylock(&local->sem) != 0) { | 2297 | if (down_trylock(&local->sem) != 0) { |
2278 | set_bit(JOB_STATS, &local->flags); | 2298 | set_bit(JOB_STATS, &local->jobs); |
2279 | wake_up_interruptible(&local->thr_wait); | 2299 | wake_up_interruptible(&local->thr_wait); |
2280 | } else | 2300 | } else |
2281 | airo_read_stats(local); | 2301 | airo_read_stats(local); |
@@ -2290,7 +2310,7 @@ static void airo_set_promisc(struct airo_info *ai) { | |||
2290 | 2310 | ||
2291 | memset(&cmd, 0, sizeof(cmd)); | 2311 | memset(&cmd, 0, sizeof(cmd)); |
2292 | cmd.cmd=CMD_SETMODE; | 2312 | cmd.cmd=CMD_SETMODE; |
2293 | clear_bit(JOB_PROMISC, &ai->flags); | 2313 | clear_bit(JOB_PROMISC, &ai->jobs); |
2294 | cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC; | 2314 | cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC; |
2295 | issuecommand(ai, &cmd, &rsp); | 2315 | issuecommand(ai, &cmd, &rsp); |
2296 | up(&ai->sem); | 2316 | up(&ai->sem); |
@@ -2302,7 +2322,7 @@ static void airo_set_multicast_list(struct net_device *dev) { | |||
2302 | if ((dev->flags ^ ai->flags) & IFF_PROMISC) { | 2322 | if ((dev->flags ^ ai->flags) & IFF_PROMISC) { |
2303 | change_bit(FLAG_PROMISC, &ai->flags); | 2323 | change_bit(FLAG_PROMISC, &ai->flags); |
2304 | if (down_trylock(&ai->sem) != 0) { | 2324 | if (down_trylock(&ai->sem) != 0) { |
2305 | set_bit(JOB_PROMISC, &ai->flags); | 2325 | set_bit(JOB_PROMISC, &ai->jobs); |
2306 | wake_up_interruptible(&ai->thr_wait); | 2326 | wake_up_interruptible(&ai->thr_wait); |
2307 | } else | 2327 | } else |
2308 | airo_set_promisc(ai); | 2328 | airo_set_promisc(ai); |
@@ -2380,7 +2400,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) | |||
2380 | } | 2400 | } |
2381 | clear_bit(FLAG_REGISTERED, &ai->flags); | 2401 | clear_bit(FLAG_REGISTERED, &ai->flags); |
2382 | } | 2402 | } |
2383 | set_bit(JOB_DIE, &ai->flags); | 2403 | set_bit(JOB_DIE, &ai->jobs); |
2384 | kill_proc(ai->thr_pid, SIGTERM, 1); | 2404 | kill_proc(ai->thr_pid, SIGTERM, 1); |
2385 | wait_for_completion(&ai->thr_exited); | 2405 | wait_for_completion(&ai->thr_exited); |
2386 | 2406 | ||
@@ -2701,14 +2721,14 @@ static int reset_card( struct net_device *dev , int lock) { | |||
2701 | return 0; | 2721 | return 0; |
2702 | } | 2722 | } |
2703 | 2723 | ||
2704 | #define MAX_NETWORK_COUNT 64 | 2724 | #define AIRO_MAX_NETWORK_COUNT 64 |
2705 | static int airo_networks_allocate(struct airo_info *ai) | 2725 | static int airo_networks_allocate(struct airo_info *ai) |
2706 | { | 2726 | { |
2707 | if (ai->networks) | 2727 | if (ai->networks) |
2708 | return 0; | 2728 | return 0; |
2709 | 2729 | ||
2710 | ai->networks = | 2730 | ai->networks = |
2711 | kzalloc(MAX_NETWORK_COUNT * sizeof(BSSListElement), | 2731 | kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement), |
2712 | GFP_KERNEL); | 2732 | GFP_KERNEL); |
2713 | if (!ai->networks) { | 2733 | if (!ai->networks) { |
2714 | airo_print_warn(ai->dev->name, "Out of memory allocating beacons"); | 2734 | airo_print_warn(ai->dev->name, "Out of memory allocating beacons"); |
@@ -2732,11 +2752,33 @@ static void airo_networks_initialize(struct airo_info *ai) | |||
2732 | 2752 | ||
2733 | INIT_LIST_HEAD(&ai->network_free_list); | 2753 | INIT_LIST_HEAD(&ai->network_free_list); |
2734 | INIT_LIST_HEAD(&ai->network_list); | 2754 | INIT_LIST_HEAD(&ai->network_list); |
2735 | for (i = 0; i < MAX_NETWORK_COUNT; i++) | 2755 | for (i = 0; i < AIRO_MAX_NETWORK_COUNT; i++) |
2736 | list_add_tail(&ai->networks[i].list, | 2756 | list_add_tail(&ai->networks[i].list, |
2737 | &ai->network_free_list); | 2757 | &ai->network_free_list); |
2738 | } | 2758 | } |
2739 | 2759 | ||
2760 | static int airo_test_wpa_capable(struct airo_info *ai) | ||
2761 | { | ||
2762 | int status; | ||
2763 | CapabilityRid cap_rid; | ||
2764 | const char *name = ai->dev->name; | ||
2765 | |||
2766 | status = readCapabilityRid(ai, &cap_rid, 1); | ||
2767 | if (status != SUCCESS) return 0; | ||
2768 | |||
2769 | /* Only firmware versions 5.30.17 or better can do WPA */ | ||
2770 | if ((cap_rid.softVer > 0x530) | ||
2771 | || ((cap_rid.softVer == 0x530) && (cap_rid.softSubVer >= 17))) { | ||
2772 | airo_print_info(name, "WPA is supported."); | ||
2773 | return 1; | ||
2774 | } | ||
2775 | |||
2776 | /* No WPA support */ | ||
2777 | airo_print_info(name, "WPA unsupported (only firmware versions 5.30.17" | ||
2778 | " and greater support WPA. Detected %s)", cap_rid.prodVer); | ||
2779 | return 0; | ||
2780 | } | ||
2781 | |||
2740 | static struct net_device *_init_airo_card( unsigned short irq, int port, | 2782 | static struct net_device *_init_airo_card( unsigned short irq, int port, |
2741 | int is_pcmcia, struct pci_dev *pci, | 2783 | int is_pcmcia, struct pci_dev *pci, |
2742 | struct device *dmdev ) | 2784 | struct device *dmdev ) |
@@ -2759,6 +2801,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, | |||
2759 | ai = dev->priv; | 2801 | ai = dev->priv; |
2760 | ai->wifidev = NULL; | 2802 | ai->wifidev = NULL; |
2761 | ai->flags = 0; | 2803 | ai->flags = 0; |
2804 | ai->jobs = 0; | ||
2762 | ai->dev = dev; | 2805 | ai->dev = dev; |
2763 | if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) { | 2806 | if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) { |
2764 | airo_print_dbg(dev->name, "Found an MPI350 card"); | 2807 | airo_print_dbg(dev->name, "Found an MPI350 card"); |
@@ -2838,6 +2881,18 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, | |||
2838 | set_bit(FLAG_FLASHING, &ai->flags); | 2881 | set_bit(FLAG_FLASHING, &ai->flags); |
2839 | } | 2882 | } |
2840 | 2883 | ||
2884 | /* Test for WPA support */ | ||
2885 | if (airo_test_wpa_capable(ai)) { | ||
2886 | set_bit(FLAG_WPA_CAPABLE, &ai->flags); | ||
2887 | ai->bssListFirst = RID_WPA_BSSLISTFIRST; | ||
2888 | ai->bssListNext = RID_WPA_BSSLISTNEXT; | ||
2889 | ai->bssListRidLen = sizeof(BSSListRid); | ||
2890 | } else { | ||
2891 | ai->bssListFirst = RID_BSSLISTFIRST; | ||
2892 | ai->bssListNext = RID_BSSLISTNEXT; | ||
2893 | ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra); | ||
2894 | } | ||
2895 | |||
2841 | rc = register_netdev(dev); | 2896 | rc = register_netdev(dev); |
2842 | if (rc) { | 2897 | if (rc) { |
2843 | airo_print_err(dev->name, "Couldn't register_netdev"); | 2898 | airo_print_err(dev->name, "Couldn't register_netdev"); |
@@ -2875,7 +2930,7 @@ err_out_irq: | |||
2875 | err_out_unlink: | 2930 | err_out_unlink: |
2876 | del_airo_dev(dev); | 2931 | del_airo_dev(dev); |
2877 | err_out_thr: | 2932 | err_out_thr: |
2878 | set_bit(JOB_DIE, &ai->flags); | 2933 | set_bit(JOB_DIE, &ai->jobs); |
2879 | kill_proc(ai->thr_pid, SIGTERM, 1); | 2934 | kill_proc(ai->thr_pid, SIGTERM, 1); |
2880 | wait_for_completion(&ai->thr_exited); | 2935 | wait_for_completion(&ai->thr_exited); |
2881 | err_out_free: | 2936 | err_out_free: |
@@ -2933,7 +2988,7 @@ static void airo_send_event(struct net_device *dev) { | |||
2933 | union iwreq_data wrqu; | 2988 | union iwreq_data wrqu; |
2934 | StatusRid status_rid; | 2989 | StatusRid status_rid; |
2935 | 2990 | ||
2936 | clear_bit(JOB_EVENT, &ai->flags); | 2991 | clear_bit(JOB_EVENT, &ai->jobs); |
2937 | PC4500_readrid(ai, RID_STATUS, &status_rid, sizeof(status_rid), 0); | 2992 | PC4500_readrid(ai, RID_STATUS, &status_rid, sizeof(status_rid), 0); |
2938 | up(&ai->sem); | 2993 | up(&ai->sem); |
2939 | wrqu.data.length = 0; | 2994 | wrqu.data.length = 0; |
@@ -2947,7 +3002,7 @@ static void airo_send_event(struct net_device *dev) { | |||
2947 | 3002 | ||
2948 | static void airo_process_scan_results (struct airo_info *ai) { | 3003 | static void airo_process_scan_results (struct airo_info *ai) { |
2949 | union iwreq_data wrqu; | 3004 | union iwreq_data wrqu; |
2950 | BSSListRid BSSList; | 3005 | BSSListRid bss; |
2951 | int rc; | 3006 | int rc; |
2952 | BSSListElement * loop_net; | 3007 | BSSListElement * loop_net; |
2953 | BSSListElement * tmp_net; | 3008 | BSSListElement * tmp_net; |
@@ -2960,15 +3015,15 @@ static void airo_process_scan_results (struct airo_info *ai) { | |||
2960 | } | 3015 | } |
2961 | 3016 | ||
2962 | /* Try to read the first entry of the scan result */ | 3017 | /* Try to read the first entry of the scan result */ |
2963 | rc = PC4500_readrid(ai, RID_BSSLISTFIRST, &BSSList, sizeof(BSSList), 0); | 3018 | rc = PC4500_readrid(ai, ai->bssListFirst, &bss, ai->bssListRidLen, 0); |
2964 | if((rc) || (BSSList.index == 0xffff)) { | 3019 | if((rc) || (bss.index == 0xffff)) { |
2965 | /* No scan results */ | 3020 | /* No scan results */ |
2966 | goto out; | 3021 | goto out; |
2967 | } | 3022 | } |
2968 | 3023 | ||
2969 | /* Read and parse all entries */ | 3024 | /* Read and parse all entries */ |
2970 | tmp_net = NULL; | 3025 | tmp_net = NULL; |
2971 | while((!rc) && (BSSList.index != 0xffff)) { | 3026 | while((!rc) && (bss.index != 0xffff)) { |
2972 | /* Grab a network off the free list */ | 3027 | /* Grab a network off the free list */ |
2973 | if (!list_empty(&ai->network_free_list)) { | 3028 | if (!list_empty(&ai->network_free_list)) { |
2974 | tmp_net = list_entry(ai->network_free_list.next, | 3029 | tmp_net = list_entry(ai->network_free_list.next, |
@@ -2977,19 +3032,19 @@ static void airo_process_scan_results (struct airo_info *ai) { | |||
2977 | } | 3032 | } |
2978 | 3033 | ||
2979 | if (tmp_net != NULL) { | 3034 | if (tmp_net != NULL) { |
2980 | memcpy(tmp_net, &BSSList, sizeof(tmp_net->bss)); | 3035 | memcpy(tmp_net, &bss, sizeof(tmp_net->bss)); |
2981 | list_add_tail(&tmp_net->list, &ai->network_list); | 3036 | list_add_tail(&tmp_net->list, &ai->network_list); |
2982 | tmp_net = NULL; | 3037 | tmp_net = NULL; |
2983 | } | 3038 | } |
2984 | 3039 | ||
2985 | /* Read next entry */ | 3040 | /* Read next entry */ |
2986 | rc = PC4500_readrid(ai, RID_BSSLISTNEXT, | 3041 | rc = PC4500_readrid(ai, ai->bssListNext, |
2987 | &BSSList, sizeof(BSSList), 0); | 3042 | &bss, ai->bssListRidLen, 0); |
2988 | } | 3043 | } |
2989 | 3044 | ||
2990 | out: | 3045 | out: |
2991 | ai->scan_timeout = 0; | 3046 | ai->scan_timeout = 0; |
2992 | clear_bit(JOB_SCAN_RESULTS, &ai->flags); | 3047 | clear_bit(JOB_SCAN_RESULTS, &ai->jobs); |
2993 | up(&ai->sem); | 3048 | up(&ai->sem); |
2994 | 3049 | ||
2995 | /* Send an empty event to user space. | 3050 | /* Send an empty event to user space. |
@@ -3019,10 +3074,10 @@ static int airo_thread(void *data) { | |||
3019 | /* make swsusp happy with our thread */ | 3074 | /* make swsusp happy with our thread */ |
3020 | try_to_freeze(); | 3075 | try_to_freeze(); |
3021 | 3076 | ||
3022 | if (test_bit(JOB_DIE, &ai->flags)) | 3077 | if (test_bit(JOB_DIE, &ai->jobs)) |
3023 | break; | 3078 | break; |
3024 | 3079 | ||
3025 | if (ai->flags & JOB_MASK) { | 3080 | if (ai->jobs) { |
3026 | locked = down_interruptible(&ai->sem); | 3081 | locked = down_interruptible(&ai->sem); |
3027 | } else { | 3082 | } else { |
3028 | wait_queue_t wait; | 3083 | wait_queue_t wait; |
@@ -3031,16 +3086,16 @@ static int airo_thread(void *data) { | |||
3031 | add_wait_queue(&ai->thr_wait, &wait); | 3086 | add_wait_queue(&ai->thr_wait, &wait); |
3032 | for (;;) { | 3087 | for (;;) { |
3033 | set_current_state(TASK_INTERRUPTIBLE); | 3088 | set_current_state(TASK_INTERRUPTIBLE); |
3034 | if (ai->flags & JOB_MASK) | 3089 | if (ai->jobs) |
3035 | break; | 3090 | break; |
3036 | if (ai->expires || ai->scan_timeout) { | 3091 | if (ai->expires || ai->scan_timeout) { |
3037 | if (ai->scan_timeout && | 3092 | if (ai->scan_timeout && |
3038 | time_after_eq(jiffies,ai->scan_timeout)){ | 3093 | time_after_eq(jiffies,ai->scan_timeout)){ |
3039 | set_bit(JOB_SCAN_RESULTS,&ai->flags); | 3094 | set_bit(JOB_SCAN_RESULTS, &ai->jobs); |
3040 | break; | 3095 | break; |
3041 | } else if (ai->expires && | 3096 | } else if (ai->expires && |
3042 | time_after_eq(jiffies,ai->expires)){ | 3097 | time_after_eq(jiffies,ai->expires)){ |
3043 | set_bit(JOB_AUTOWEP,&ai->flags); | 3098 | set_bit(JOB_AUTOWEP, &ai->jobs); |
3044 | break; | 3099 | break; |
3045 | } | 3100 | } |
3046 | if (!signal_pending(current)) { | 3101 | if (!signal_pending(current)) { |
@@ -3069,7 +3124,7 @@ static int airo_thread(void *data) { | |||
3069 | if (locked) | 3124 | if (locked) |
3070 | continue; | 3125 | continue; |
3071 | 3126 | ||
3072 | if (test_bit(JOB_DIE, &ai->flags)) { | 3127 | if (test_bit(JOB_DIE, &ai->jobs)) { |
3073 | up(&ai->sem); | 3128 | up(&ai->sem); |
3074 | break; | 3129 | break; |
3075 | } | 3130 | } |
@@ -3079,23 +3134,23 @@ static int airo_thread(void *data) { | |||
3079 | continue; | 3134 | continue; |
3080 | } | 3135 | } |
3081 | 3136 | ||
3082 | if (test_bit(JOB_XMIT, &ai->flags)) | 3137 | if (test_bit(JOB_XMIT, &ai->jobs)) |
3083 | airo_end_xmit(dev); | 3138 | airo_end_xmit(dev); |
3084 | else if (test_bit(JOB_XMIT11, &ai->flags)) | 3139 | else if (test_bit(JOB_XMIT11, &ai->jobs)) |
3085 | airo_end_xmit11(dev); | 3140 | airo_end_xmit11(dev); |
3086 | else if (test_bit(JOB_STATS, &ai->flags)) | 3141 | else if (test_bit(JOB_STATS, &ai->jobs)) |
3087 | airo_read_stats(ai); | 3142 | airo_read_stats(ai); |
3088 | else if (test_bit(JOB_WSTATS, &ai->flags)) | 3143 | else if (test_bit(JOB_WSTATS, &ai->jobs)) |
3089 | airo_read_wireless_stats(ai); | 3144 | airo_read_wireless_stats(ai); |
3090 | else if (test_bit(JOB_PROMISC, &ai->flags)) | 3145 | else if (test_bit(JOB_PROMISC, &ai->jobs)) |
3091 | airo_set_promisc(ai); | 3146 | airo_set_promisc(ai); |
3092 | else if (test_bit(JOB_MIC, &ai->flags)) | 3147 | else if (test_bit(JOB_MIC, &ai->jobs)) |
3093 | micinit(ai); | 3148 | micinit(ai); |
3094 | else if (test_bit(JOB_EVENT, &ai->flags)) | 3149 | else if (test_bit(JOB_EVENT, &ai->jobs)) |
3095 | airo_send_event(dev); | 3150 | airo_send_event(dev); |
3096 | else if (test_bit(JOB_AUTOWEP, &ai->flags)) | 3151 | else if (test_bit(JOB_AUTOWEP, &ai->jobs)) |
3097 | timer_func(dev); | 3152 | timer_func(dev); |
3098 | else if (test_bit(JOB_SCAN_RESULTS, &ai->flags)) | 3153 | else if (test_bit(JOB_SCAN_RESULTS, &ai->jobs)) |
3099 | airo_process_scan_results(ai); | 3154 | airo_process_scan_results(ai); |
3100 | else /* Shouldn't get here, but we make sure to unlock */ | 3155 | else /* Shouldn't get here, but we make sure to unlock */ |
3101 | up(&ai->sem); | 3156 | up(&ai->sem); |
@@ -3133,7 +3188,7 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) | |||
3133 | if ( status & EV_MIC ) { | 3188 | if ( status & EV_MIC ) { |
3134 | OUT4500( apriv, EVACK, EV_MIC ); | 3189 | OUT4500( apriv, EVACK, EV_MIC ); |
3135 | if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) { | 3190 | if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) { |
3136 | set_bit(JOB_MIC, &apriv->flags); | 3191 | set_bit(JOB_MIC, &apriv->jobs); |
3137 | wake_up_interruptible(&apriv->thr_wait); | 3192 | wake_up_interruptible(&apriv->thr_wait); |
3138 | } | 3193 | } |
3139 | } | 3194 | } |
@@ -3187,7 +3242,7 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) | |||
3187 | set_bit(FLAG_UPDATE_MULTI, &apriv->flags); | 3242 | set_bit(FLAG_UPDATE_MULTI, &apriv->flags); |
3188 | 3243 | ||
3189 | if (down_trylock(&apriv->sem) != 0) { | 3244 | if (down_trylock(&apriv->sem) != 0) { |
3190 | set_bit(JOB_EVENT, &apriv->flags); | 3245 | set_bit(JOB_EVENT, &apriv->jobs); |
3191 | wake_up_interruptible(&apriv->thr_wait); | 3246 | wake_up_interruptible(&apriv->thr_wait); |
3192 | } else | 3247 | } else |
3193 | airo_send_event(dev); | 3248 | airo_send_event(dev); |
@@ -5485,7 +5540,7 @@ static void timer_func( struct net_device *dev ) { | |||
5485 | up(&apriv->sem); | 5540 | up(&apriv->sem); |
5486 | 5541 | ||
5487 | /* Schedule check to see if the change worked */ | 5542 | /* Schedule check to see if the change worked */ |
5488 | clear_bit(JOB_AUTOWEP, &apriv->flags); | 5543 | clear_bit(JOB_AUTOWEP, &apriv->jobs); |
5489 | apriv->expires = RUN_AT(HZ*3); | 5544 | apriv->expires = RUN_AT(HZ*3); |
5490 | } | 5545 | } |
5491 | 5546 | ||
@@ -6876,7 +6931,7 @@ static int airo_get_range(struct net_device *dev, | |||
6876 | } | 6931 | } |
6877 | range->num_txpower = i; | 6932 | range->num_txpower = i; |
6878 | range->txpower_capa = IW_TXPOW_MWATT; | 6933 | range->txpower_capa = IW_TXPOW_MWATT; |
6879 | range->we_version_source = 12; | 6934 | range->we_version_source = 19; |
6880 | range->we_version_compiled = WIRELESS_EXT; | 6935 | range->we_version_compiled = WIRELESS_EXT; |
6881 | range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME; | 6936 | range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME; |
6882 | range->retry_flags = IW_RETRY_LIMIT; | 6937 | range->retry_flags = IW_RETRY_LIMIT; |
@@ -7152,6 +7207,7 @@ static inline char *airo_translate_scan(struct net_device *dev, | |||
7152 | u16 capabilities; | 7207 | u16 capabilities; |
7153 | char * current_val; /* For rates */ | 7208 | char * current_val; /* For rates */ |
7154 | int i; | 7209 | int i; |
7210 | char * buf; | ||
7155 | 7211 | ||
7156 | /* First entry *MUST* be the AP MAC address */ | 7212 | /* First entry *MUST* be the AP MAC address */ |
7157 | iwe.cmd = SIOCGIWAP; | 7213 | iwe.cmd = SIOCGIWAP; |
@@ -7238,8 +7294,69 @@ static inline char *airo_translate_scan(struct net_device *dev, | |||
7238 | if((current_val - current_ev) > IW_EV_LCP_LEN) | 7294 | if((current_val - current_ev) > IW_EV_LCP_LEN) |
7239 | current_ev = current_val; | 7295 | current_ev = current_val; |
7240 | 7296 | ||
7241 | /* The other data in the scan result are not really | 7297 | /* Beacon interval */ |
7242 | * interesting, so for now drop it - Jean II */ | 7298 | buf = kmalloc(30, GFP_KERNEL); |
7299 | if (buf) { | ||
7300 | iwe.cmd = IWEVCUSTOM; | ||
7301 | sprintf(buf, "bcn_int=%d", bss->beaconInterval); | ||
7302 | iwe.u.data.length = strlen(buf); | ||
7303 | current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, buf); | ||
7304 | kfree(buf); | ||
7305 | } | ||
7306 | |||
7307 | /* Put WPA/RSN Information Elements into the event stream */ | ||
7308 | if (test_bit(FLAG_WPA_CAPABLE, &ai->flags)) { | ||
7309 | unsigned int num_null_ies = 0; | ||
7310 | u16 length = sizeof (bss->extra.iep); | ||
7311 | struct ieee80211_info_element *info_element = | ||
7312 | (struct ieee80211_info_element *) &bss->extra.iep; | ||
7313 | |||
7314 | while ((length >= sizeof(*info_element)) && (num_null_ies < 2)) { | ||
7315 | if (sizeof(*info_element) + info_element->len > length) { | ||
7316 | /* Invalid element, don't continue parsing IE */ | ||
7317 | break; | ||
7318 | } | ||
7319 | |||
7320 | switch (info_element->id) { | ||
7321 | case MFIE_TYPE_SSID: | ||
7322 | /* Two zero-length SSID elements | ||
7323 | * mean we're done parsing elements */ | ||
7324 | if (!info_element->len) | ||
7325 | num_null_ies++; | ||
7326 | break; | ||
7327 | |||
7328 | case MFIE_TYPE_GENERIC: | ||
7329 | if (info_element->len >= 4 && | ||
7330 | info_element->data[0] == 0x00 && | ||
7331 | info_element->data[1] == 0x50 && | ||
7332 | info_element->data[2] == 0xf2 && | ||
7333 | info_element->data[3] == 0x01) { | ||
7334 | iwe.cmd = IWEVGENIE; | ||
7335 | iwe.u.data.length = min(info_element->len + 2, | ||
7336 | MAX_WPA_IE_LEN); | ||
7337 | current_ev = iwe_stream_add_point(current_ev, end_buf, | ||
7338 | &iwe, (char *) info_element); | ||
7339 | } | ||
7340 | break; | ||
7341 | |||
7342 | case MFIE_TYPE_RSN: | ||
7343 | iwe.cmd = IWEVGENIE; | ||
7344 | iwe.u.data.length = min(info_element->len + 2, | ||
7345 | MAX_WPA_IE_LEN); | ||
7346 | current_ev = iwe_stream_add_point(current_ev, end_buf, | ||
7347 | &iwe, (char *) info_element); | ||
7348 | break; | ||
7349 | |||
7350 | default: | ||
7351 | break; | ||
7352 | } | ||
7353 | |||
7354 | length -= sizeof(*info_element) + info_element->len; | ||
7355 | info_element = | ||
7356 | (struct ieee80211_info_element *)&info_element-> | ||
7357 | data[info_element->len]; | ||
7358 | } | ||
7359 | } | ||
7243 | return current_ev; | 7360 | return current_ev; |
7244 | } | 7361 | } |
7245 | 7362 | ||
@@ -7521,7 +7638,7 @@ static void airo_read_wireless_stats(struct airo_info *local) | |||
7521 | u32 *vals = stats_rid.vals; | 7638 | u32 *vals = stats_rid.vals; |
7522 | 7639 | ||
7523 | /* Get stats out of the card */ | 7640 | /* Get stats out of the card */ |
7524 | clear_bit(JOB_WSTATS, &local->flags); | 7641 | clear_bit(JOB_WSTATS, &local->jobs); |
7525 | if (local->power.event) { | 7642 | if (local->power.event) { |
7526 | up(&local->sem); | 7643 | up(&local->sem); |
7527 | return; | 7644 | return; |
@@ -7565,10 +7682,10 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev) | |||
7565 | { | 7682 | { |
7566 | struct airo_info *local = dev->priv; | 7683 | struct airo_info *local = dev->priv; |
7567 | 7684 | ||
7568 | if (!test_bit(JOB_WSTATS, &local->flags)) { | 7685 | if (!test_bit(JOB_WSTATS, &local->jobs)) { |
7569 | /* Get stats out of the card if available */ | 7686 | /* Get stats out of the card if available */ |
7570 | if (down_trylock(&local->sem) != 0) { | 7687 | if (down_trylock(&local->sem) != 0) { |
7571 | set_bit(JOB_WSTATS, &local->flags); | 7688 | set_bit(JOB_WSTATS, &local->jobs); |
7572 | wake_up_interruptible(&local->thr_wait); | 7689 | wake_up_interruptible(&local->thr_wait); |
7573 | } else | 7690 | } else |
7574 | airo_read_wireless_stats(local); | 7691 | airo_read_wireless_stats(local); |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h index 2e83083935e1..e66fdb1f3cfd 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx.h | |||
@@ -645,7 +645,6 @@ struct bcm43xx_private { | |||
645 | unsigned int irq; | 645 | unsigned int irq; |
646 | 646 | ||
647 | void __iomem *mmio_addr; | 647 | void __iomem *mmio_addr; |
648 | unsigned int mmio_len; | ||
649 | 648 | ||
650 | /* Do not use the lock directly. Use the bcm43xx_lock* helper | 649 | /* Do not use the lock directly. Use the bcm43xx_lock* helper |
651 | * functions, to be MMIO-safe. */ | 650 | * functions, to be MMIO-safe. */ |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c index 35a4fcb6d923..7497fb16076e 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c | |||
@@ -92,7 +92,7 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf, | |||
92 | fappend("subsystem_vendor: 0x%04x subsystem_device: 0x%04x\n", | 92 | fappend("subsystem_vendor: 0x%04x subsystem_device: 0x%04x\n", |
93 | pci_dev->subsystem_vendor, pci_dev->subsystem_device); | 93 | pci_dev->subsystem_vendor, pci_dev->subsystem_device); |
94 | fappend("IRQ: %d\n", bcm->irq); | 94 | fappend("IRQ: %d\n", bcm->irq); |
95 | fappend("mmio_addr: 0x%p mmio_len: %u\n", bcm->mmio_addr, bcm->mmio_len); | 95 | fappend("mmio_addr: 0x%p\n", bcm->mmio_addr); |
96 | fappend("chip_id: 0x%04x chip_rev: 0x%02x\n", bcm->chip_id, bcm->chip_rev); | 96 | fappend("chip_id: 0x%04x chip_rev: 0x%02x\n", bcm->chip_id, bcm->chip_rev); |
97 | if ((bcm->core_80211[0].rev >= 3) && (bcm43xx_read32(bcm, 0x0158) & (1 << 16))) | 97 | if ((bcm->core_80211[0].rev >= 3) && (bcm43xx_read32(bcm, 0x0158) & (1 << 16))) |
98 | fappend("Radio disabled by hardware!\n"); | 98 | fappend("Radio disabled by hardware!\n"); |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index 7ed18cad29f7..c0502905a956 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c | |||
@@ -128,13 +128,15 @@ MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for debugging."); | |||
128 | static struct pci_device_id bcm43xx_pci_tbl[] = { | 128 | static struct pci_device_id bcm43xx_pci_tbl[] = { |
129 | /* Broadcom 4303 802.11b */ | 129 | /* Broadcom 4303 802.11b */ |
130 | { PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 130 | { PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, |
131 | /* Broadcom 4307 802.11b */ | 131 | /* Broadcom 4307 802.11b */ |
132 | { PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 132 | { PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, |
133 | /* Broadcom 4318 802.11b/g */ | 133 | /* Broadcom 4318 802.11b/g */ |
134 | { PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 134 | { PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, |
135 | /* Broadcom 4319 802.11a/b/g */ | ||
136 | { PCI_VENDOR_ID_BROADCOM, 0x4319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | ||
135 | /* Broadcom 4306 802.11b/g */ | 137 | /* Broadcom 4306 802.11b/g */ |
136 | { PCI_VENDOR_ID_BROADCOM, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 138 | { PCI_VENDOR_ID_BROADCOM, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, |
137 | /* Broadcom 4306 802.11a */ | 139 | /* Broadcom 4306 802.11a */ |
138 | // { PCI_VENDOR_ID_BROADCOM, 0x4321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 140 | // { PCI_VENDOR_ID_BROADCOM, 0x4321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, |
139 | /* Broadcom 4309 802.11a/b/g */ | 141 | /* Broadcom 4309 802.11a/b/g */ |
140 | { PCI_VENDOR_ID_BROADCOM, 0x4324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 142 | { PCI_VENDOR_ID_BROADCOM, 0x4324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, |
@@ -3299,8 +3301,7 @@ static void bcm43xx_detach_board(struct bcm43xx_private *bcm) | |||
3299 | 3301 | ||
3300 | bcm43xx_chipset_detach(bcm); | 3302 | bcm43xx_chipset_detach(bcm); |
3301 | /* Do _not_ access the chip, after it is detached. */ | 3303 | /* Do _not_ access the chip, after it is detached. */ |
3302 | iounmap(bcm->mmio_addr); | 3304 | pci_iounmap(pci_dev, bcm->mmio_addr); |
3303 | |||
3304 | pci_release_regions(pci_dev); | 3305 | pci_release_regions(pci_dev); |
3305 | pci_disable_device(pci_dev); | 3306 | pci_disable_device(pci_dev); |
3306 | 3307 | ||
@@ -3390,40 +3391,26 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm) | |||
3390 | struct net_device *net_dev = bcm->net_dev; | 3391 | struct net_device *net_dev = bcm->net_dev; |
3391 | int err; | 3392 | int err; |
3392 | int i; | 3393 | int i; |
3393 | unsigned long mmio_start, mmio_flags, mmio_len; | ||
3394 | u32 coremask; | 3394 | u32 coremask; |
3395 | 3395 | ||
3396 | err = pci_enable_device(pci_dev); | 3396 | err = pci_enable_device(pci_dev); |
3397 | if (err) { | 3397 | if (err) { |
3398 | printk(KERN_ERR PFX "unable to wake up pci device (%i)\n", err); | 3398 | printk(KERN_ERR PFX "pci_enable_device() failed\n"); |
3399 | goto out; | 3399 | goto out; |
3400 | } | 3400 | } |
3401 | mmio_start = pci_resource_start(pci_dev, 0); | ||
3402 | mmio_flags = pci_resource_flags(pci_dev, 0); | ||
3403 | mmio_len = pci_resource_len(pci_dev, 0); | ||
3404 | if (!(mmio_flags & IORESOURCE_MEM)) { | ||
3405 | printk(KERN_ERR PFX | ||
3406 | "%s, region #0 not an MMIO resource, aborting\n", | ||
3407 | pci_name(pci_dev)); | ||
3408 | err = -ENODEV; | ||
3409 | goto err_pci_disable; | ||
3410 | } | ||
3411 | err = pci_request_regions(pci_dev, KBUILD_MODNAME); | 3401 | err = pci_request_regions(pci_dev, KBUILD_MODNAME); |
3412 | if (err) { | 3402 | if (err) { |
3413 | printk(KERN_ERR PFX | 3403 | printk(KERN_ERR PFX "pci_request_regions() failed\n"); |
3414 | "could not access PCI resources (%i)\n", err); | ||
3415 | goto err_pci_disable; | 3404 | goto err_pci_disable; |
3416 | } | 3405 | } |
3417 | /* enable PCI bus-mastering */ | 3406 | /* enable PCI bus-mastering */ |
3418 | pci_set_master(pci_dev); | 3407 | pci_set_master(pci_dev); |
3419 | bcm->mmio_addr = ioremap(mmio_start, mmio_len); | 3408 | bcm->mmio_addr = pci_iomap(pci_dev, 0, ~0UL); |
3420 | if (!bcm->mmio_addr) { | 3409 | if (!bcm->mmio_addr) { |
3421 | printk(KERN_ERR PFX "%s: cannot remap MMIO, aborting\n", | 3410 | printk(KERN_ERR PFX "pci_iomap() failed\n"); |
3422 | pci_name(pci_dev)); | ||
3423 | err = -EIO; | 3411 | err = -EIO; |
3424 | goto err_pci_release; | 3412 | goto err_pci_release; |
3425 | } | 3413 | } |
3426 | bcm->mmio_len = mmio_len; | ||
3427 | net_dev->base_addr = (unsigned long)bcm->mmio_addr; | 3414 | net_dev->base_addr = (unsigned long)bcm->mmio_addr; |
3428 | 3415 | ||
3429 | bcm43xx_pci_read_config16(bcm, PCI_SUBSYSTEM_VENDOR_ID, | 3416 | bcm43xx_pci_read_config16(bcm, PCI_SUBSYSTEM_VENDOR_ID, |
@@ -3517,7 +3504,7 @@ err_80211_unwind: | |||
3517 | err_chipset_detach: | 3504 | err_chipset_detach: |
3518 | bcm43xx_chipset_detach(bcm); | 3505 | bcm43xx_chipset_detach(bcm); |
3519 | err_iounmap: | 3506 | err_iounmap: |
3520 | iounmap(bcm->mmio_addr); | 3507 | pci_iounmap(pci_dev, bcm->mmio_addr); |
3521 | err_pci_release: | 3508 | err_pci_release: |
3522 | pci_release_regions(pci_dev); | 3509 | pci_release_regions(pci_dev); |
3523 | err_pci_disable: | 3510 | err_pci_disable: |
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c index 346c6febb033..2aa2f389c0d5 100644 --- a/drivers/net/wireless/hermes.c +++ b/drivers/net/wireless/hermes.c | |||
@@ -121,12 +121,6 @@ void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing) | |||
121 | hw->iobase = address; | 121 | hw->iobase = address; |
122 | hw->reg_spacing = reg_spacing; | 122 | hw->reg_spacing = reg_spacing; |
123 | hw->inten = 0x0; | 123 | hw->inten = 0x0; |
124 | |||
125 | #ifdef HERMES_DEBUG_BUFFER | ||
126 | hw->dbufp = 0; | ||
127 | memset(&hw->dbuf, 0xff, sizeof(hw->dbuf)); | ||
128 | memset(&hw->profile, 0, sizeof(hw->profile)); | ||
129 | #endif | ||
130 | } | 124 | } |
131 | 125 | ||
132 | int hermes_init(hermes_t *hw) | 126 | int hermes_init(hermes_t *hw) |
@@ -347,19 +341,6 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset) | |||
347 | reg = hermes_read_reg(hw, oreg); | 341 | reg = hermes_read_reg(hw, oreg); |
348 | } | 342 | } |
349 | 343 | ||
350 | #ifdef HERMES_DEBUG_BUFFER | ||
351 | hw->profile[HERMES_BAP_BUSY_TIMEOUT - k]++; | ||
352 | |||
353 | if (k < HERMES_BAP_BUSY_TIMEOUT) { | ||
354 | struct hermes_debug_entry *e = | ||
355 | &hw->dbuf[(hw->dbufp++) % HERMES_DEBUG_BUFSIZE]; | ||
356 | e->bap = bap; | ||
357 | e->id = id; | ||
358 | e->offset = offset; | ||
359 | e->cycles = HERMES_BAP_BUSY_TIMEOUT - k; | ||
360 | } | ||
361 | #endif | ||
362 | |||
363 | if (reg & HERMES_OFFSET_BUSY) | 344 | if (reg & HERMES_OFFSET_BUSY) |
364 | return -ETIMEDOUT; | 345 | return -ETIMEDOUT; |
365 | 346 | ||
@@ -419,8 +400,7 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len, | |||
419 | } | 400 | } |
420 | 401 | ||
421 | /* Write a block of data to the chip's buffer, via the | 402 | /* Write a block of data to the chip's buffer, via the |
422 | * BAP. Synchronization/serialization is the caller's problem. len | 403 | * BAP. Synchronization/serialization is the caller's problem. |
423 | * must be even. | ||
424 | * | 404 | * |
425 | * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware | 405 | * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware |
426 | */ | 406 | */ |
@@ -430,7 +410,7 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len, | |||
430 | int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; | 410 | int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; |
431 | int err = 0; | 411 | int err = 0; |
432 | 412 | ||
433 | if ( (len < 0) || (len % 2) ) | 413 | if (len < 0) |
434 | return -EINVAL; | 414 | return -EINVAL; |
435 | 415 | ||
436 | err = hermes_bap_seek(hw, bap, id, offset); | 416 | err = hermes_bap_seek(hw, bap, id, offset); |
@@ -438,49 +418,12 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len, | |||
438 | goto out; | 418 | goto out; |
439 | 419 | ||
440 | /* Actually do the transfer */ | 420 | /* Actually do the transfer */ |
441 | hermes_write_words(hw, dreg, buf, len/2); | 421 | hermes_write_bytes(hw, dreg, buf, len); |
442 | 422 | ||
443 | out: | 423 | out: |
444 | return err; | 424 | return err; |
445 | } | 425 | } |
446 | 426 | ||
447 | /* Write a block of data to the chip's buffer with padding if | ||
448 | * neccessary, via the BAP. Synchronization/serialization is the | ||
449 | * caller's problem. len must be even. | ||
450 | * | ||
451 | * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware | ||
452 | */ | ||
453 | int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf, unsigned data_len, int len, | ||
454 | u16 id, u16 offset) | ||
455 | { | ||
456 | int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; | ||
457 | int err = 0; | ||
458 | |||
459 | if (len < 0 || len % 2 || data_len > len) | ||
460 | return -EINVAL; | ||
461 | |||
462 | err = hermes_bap_seek(hw, bap, id, offset); | ||
463 | if (err) | ||
464 | goto out; | ||
465 | |||
466 | /* Transfer all the complete words of data */ | ||
467 | hermes_write_words(hw, dreg, buf, data_len/2); | ||
468 | /* If there is an odd byte left over pad and transfer it */ | ||
469 | if (data_len & 1) { | ||
470 | u8 end[2]; | ||
471 | end[1] = 0; | ||
472 | end[0] = ((unsigned char *)buf)[data_len - 1]; | ||
473 | hermes_write_words(hw, dreg, end, 1); | ||
474 | data_len ++; | ||
475 | } | ||
476 | /* Now send zeros for the padding */ | ||
477 | if (data_len < len) | ||
478 | hermes_clear_words(hw, dreg, (len - data_len) / 2); | ||
479 | /* Complete */ | ||
480 | out: | ||
481 | return err; | ||
482 | } | ||
483 | |||
484 | /* Read a Length-Type-Value record from the card. | 427 | /* Read a Length-Type-Value record from the card. |
485 | * | 428 | * |
486 | * If length is NULL, we ignore the length read from the card, and | 429 | * If length is NULL, we ignore the length read from the card, and |
@@ -553,7 +496,7 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, | |||
553 | 496 | ||
554 | count = length - 1; | 497 | count = length - 1; |
555 | 498 | ||
556 | hermes_write_words(hw, dreg, value, count); | 499 | hermes_write_bytes(hw, dreg, value, count << 1); |
557 | 500 | ||
558 | err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE, | 501 | err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE, |
559 | rid, NULL); | 502 | rid, NULL); |
@@ -568,7 +511,6 @@ EXPORT_SYMBOL(hermes_allocate); | |||
568 | 511 | ||
569 | EXPORT_SYMBOL(hermes_bap_pread); | 512 | EXPORT_SYMBOL(hermes_bap_pread); |
570 | EXPORT_SYMBOL(hermes_bap_pwrite); | 513 | EXPORT_SYMBOL(hermes_bap_pwrite); |
571 | EXPORT_SYMBOL(hermes_bap_pwrite_pad); | ||
572 | EXPORT_SYMBOL(hermes_read_ltv); | 514 | EXPORT_SYMBOL(hermes_read_ltv); |
573 | EXPORT_SYMBOL(hermes_write_ltv); | 515 | EXPORT_SYMBOL(hermes_write_ltv); |
574 | 516 | ||
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h index 7644f72a9f4e..8e3f0e3edb58 100644 --- a/drivers/net/wireless/hermes.h +++ b/drivers/net/wireless/hermes.h | |||
@@ -328,16 +328,6 @@ struct hermes_multicast { | |||
328 | u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN]; | 328 | u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN]; |
329 | } __attribute__ ((packed)); | 329 | } __attribute__ ((packed)); |
330 | 330 | ||
331 | // #define HERMES_DEBUG_BUFFER 1 | ||
332 | #define HERMES_DEBUG_BUFSIZE 4096 | ||
333 | struct hermes_debug_entry { | ||
334 | int bap; | ||
335 | u16 id, offset; | ||
336 | int cycles; | ||
337 | }; | ||
338 | |||
339 | #ifdef __KERNEL__ | ||
340 | |||
341 | /* Timeouts */ | 331 | /* Timeouts */ |
342 | #define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */ | 332 | #define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */ |
343 | 333 | ||
@@ -347,14 +337,7 @@ typedef struct hermes { | |||
347 | int reg_spacing; | 337 | int reg_spacing; |
348 | #define HERMES_16BIT_REGSPACING 0 | 338 | #define HERMES_16BIT_REGSPACING 0 |
349 | #define HERMES_32BIT_REGSPACING 1 | 339 | #define HERMES_32BIT_REGSPACING 1 |
350 | |||
351 | u16 inten; /* Which interrupts should be enabled? */ | 340 | u16 inten; /* Which interrupts should be enabled? */ |
352 | |||
353 | #ifdef HERMES_DEBUG_BUFFER | ||
354 | struct hermes_debug_entry dbuf[HERMES_DEBUG_BUFSIZE]; | ||
355 | unsigned long dbufp; | ||
356 | unsigned long profile[HERMES_BAP_BUSY_TIMEOUT+1]; | ||
357 | #endif | ||
358 | } hermes_t; | 341 | } hermes_t; |
359 | 342 | ||
360 | /* Register access convenience macros */ | 343 | /* Register access convenience macros */ |
@@ -376,8 +359,6 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len, | |||
376 | u16 id, u16 offset); | 359 | u16 id, u16 offset); |
377 | int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len, | 360 | int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len, |
378 | u16 id, u16 offset); | 361 | u16 id, u16 offset); |
379 | int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf, | ||
380 | unsigned data_len, int len, u16 id, u16 offset); | ||
381 | int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen, | 362 | int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen, |
382 | u16 *length, void *buf); | 363 | u16 *length, void *buf); |
383 | int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, | 364 | int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, |
@@ -425,10 +406,13 @@ static inline void hermes_read_words(struct hermes *hw, int off, void *buf, unsi | |||
425 | ioread16_rep(hw->iobase + off, buf, count); | 406 | ioread16_rep(hw->iobase + off, buf, count); |
426 | } | 407 | } |
427 | 408 | ||
428 | static inline void hermes_write_words(struct hermes *hw, int off, const void *buf, unsigned count) | 409 | static inline void hermes_write_bytes(struct hermes *hw, int off, |
410 | const char *buf, unsigned count) | ||
429 | { | 411 | { |
430 | off = off << hw->reg_spacing; | 412 | off = off << hw->reg_spacing; |
431 | iowrite16_rep(hw->iobase + off, buf, count); | 413 | iowrite16_rep(hw->iobase + off, buf, count >> 1); |
414 | if (unlikely(count & 1)) | ||
415 | iowrite8(buf[count - 1], hw->iobase + off); | ||
432 | } | 416 | } |
433 | 417 | ||
434 | static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count) | 418 | static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count) |
@@ -462,21 +446,4 @@ static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word) | |||
462 | return HERMES_WRITE_RECORD(hw, bap, rid, &rec); | 446 | return HERMES_WRITE_RECORD(hw, bap, rid, &rec); |
463 | } | 447 | } |
464 | 448 | ||
465 | #else /* ! __KERNEL__ */ | ||
466 | |||
467 | /* These are provided for the benefit of userspace drivers and testing programs | ||
468 | which use ioperm() or iopl() */ | ||
469 | |||
470 | #define hermes_read_reg(base, off) (inw((base) + (off))) | ||
471 | #define hermes_write_reg(base, off, val) (outw((val), (base) + (off))) | ||
472 | |||
473 | #define hermes_read_regn(base, name) (hermes_read_reg((base), HERMES_##name)) | ||
474 | #define hermes_write_regn(base, name, val) (hermes_write_reg((base), HERMES_##name, (val))) | ||
475 | |||
476 | /* Note that for the next two, the count is in 16-bit words, not bytes */ | ||
477 | #define hermes_read_data(base, off, buf, count) (insw((base) + (off), (buf), (count))) | ||
478 | #define hermes_write_data(base, off, buf, count) (outsw((base) + (off), (buf), (count))) | ||
479 | |||
480 | #endif /* ! __KERNEL__ */ | ||
481 | |||
482 | #endif /* _HERMES_H */ | 449 | #endif /* _HERMES_H */ |
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 06a5214145e3..4a5be70c0419 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c | |||
@@ -534,5 +534,4 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
534 | } | 534 | } |
535 | 535 | ||
536 | 536 | ||
537 | EXPORT_SYMBOL(hostap_dump_tx_80211); | ||
538 | EXPORT_SYMBOL(hostap_master_start_xmit); | 537 | EXPORT_SYMBOL(hostap_master_start_xmit); |
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index 06c3fa32b310..ba13125024cb 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c | |||
@@ -3276,17 +3276,6 @@ EXPORT_SYMBOL(hostap_init_data); | |||
3276 | EXPORT_SYMBOL(hostap_init_ap_proc); | 3276 | EXPORT_SYMBOL(hostap_init_ap_proc); |
3277 | EXPORT_SYMBOL(hostap_free_data); | 3277 | EXPORT_SYMBOL(hostap_free_data); |
3278 | EXPORT_SYMBOL(hostap_check_sta_fw_version); | 3278 | EXPORT_SYMBOL(hostap_check_sta_fw_version); |
3279 | EXPORT_SYMBOL(hostap_handle_sta_tx); | ||
3280 | EXPORT_SYMBOL(hostap_handle_sta_release); | ||
3281 | EXPORT_SYMBOL(hostap_handle_sta_tx_exc); | 3279 | EXPORT_SYMBOL(hostap_handle_sta_tx_exc); |
3282 | EXPORT_SYMBOL(hostap_update_sta_ps); | ||
3283 | EXPORT_SYMBOL(hostap_handle_sta_rx); | ||
3284 | EXPORT_SYMBOL(hostap_is_sta_assoc); | ||
3285 | EXPORT_SYMBOL(hostap_is_sta_authorized); | ||
3286 | EXPORT_SYMBOL(hostap_add_sta); | ||
3287 | EXPORT_SYMBOL(hostap_update_rates); | ||
3288 | EXPORT_SYMBOL(hostap_add_wds_links); | ||
3289 | EXPORT_SYMBOL(hostap_wds_link_oper); | ||
3290 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT | 3280 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT |
3291 | EXPORT_SYMBOL(hostap_deauth_all_stas); | ||
3292 | #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ | 3281 | #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ |
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index 55bed923fbe9..db03dc2646df 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c | |||
@@ -881,6 +881,12 @@ static struct pcmcia_device_id hostap_cs_ids[] = { | |||
881 | PCMCIA_DEVICE_PROD_ID12( | 881 | PCMCIA_DEVICE_PROD_ID12( |
882 | "ZoomAir 11Mbps High", "Rate wireless Networking", | 882 | "ZoomAir 11Mbps High", "Rate wireless Networking", |
883 | 0x273fe3db, 0x32a1eaee), | 883 | 0x273fe3db, 0x32a1eaee), |
884 | PCMCIA_DEVICE_PROD_ID123( | ||
885 | "Pretec", "CompactWLAN Card 802.11b", "2.5", | ||
886 | 0x1cadd3e5, 0xe697636c, 0x7a5bfcf1), | ||
887 | PCMCIA_DEVICE_PROD_ID123( | ||
888 | "U.S. Robotics", "IEEE 802.11b PC-CARD", "Version 01.02", | ||
889 | 0xc7b8df9d, 0x1700d087, 0x4b74baa0), | ||
884 | PCMCIA_DEVICE_NULL | 890 | PCMCIA_DEVICE_NULL |
885 | }; | 891 | }; |
886 | MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids); | 892 | MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids); |
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 8dd4c4446a64..93786f4218f0 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c | |||
@@ -1125,11 +1125,9 @@ EXPORT_SYMBOL(hostap_set_auth_algs); | |||
1125 | EXPORT_SYMBOL(hostap_dump_rx_header); | 1125 | EXPORT_SYMBOL(hostap_dump_rx_header); |
1126 | EXPORT_SYMBOL(hostap_dump_tx_header); | 1126 | EXPORT_SYMBOL(hostap_dump_tx_header); |
1127 | EXPORT_SYMBOL(hostap_80211_header_parse); | 1127 | EXPORT_SYMBOL(hostap_80211_header_parse); |
1128 | EXPORT_SYMBOL(hostap_80211_prism_header_parse); | ||
1129 | EXPORT_SYMBOL(hostap_80211_get_hdrlen); | 1128 | EXPORT_SYMBOL(hostap_80211_get_hdrlen); |
1130 | EXPORT_SYMBOL(hostap_get_stats); | 1129 | EXPORT_SYMBOL(hostap_get_stats); |
1131 | EXPORT_SYMBOL(hostap_setup_dev); | 1130 | EXPORT_SYMBOL(hostap_setup_dev); |
1132 | EXPORT_SYMBOL(hostap_proc); | ||
1133 | EXPORT_SYMBOL(hostap_set_multicast_list_queue); | 1131 | EXPORT_SYMBOL(hostap_set_multicast_list_queue); |
1134 | EXPORT_SYMBOL(hostap_set_hostapd); | 1132 | EXPORT_SYMBOL(hostap_set_hostapd); |
1135 | EXPORT_SYMBOL(hostap_set_hostapd_sta); | 1133 | EXPORT_SYMBOL(hostap_set_hostapd_sta); |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index bca89cff85a6..39f82f219749 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -33,7 +33,44 @@ | |||
33 | #include "ipw2200.h" | 33 | #include "ipw2200.h" |
34 | #include <linux/version.h> | 34 | #include <linux/version.h> |
35 | 35 | ||
36 | #define IPW2200_VERSION "git-1.1.1" | 36 | |
37 | #ifndef KBUILD_EXTMOD | ||
38 | #define VK "k" | ||
39 | #else | ||
40 | #define VK | ||
41 | #endif | ||
42 | |||
43 | #ifdef CONFIG_IPW2200_DEBUG | ||
44 | #define VD "d" | ||
45 | #else | ||
46 | #define VD | ||
47 | #endif | ||
48 | |||
49 | #ifdef CONFIG_IPW2200_MONITOR | ||
50 | #define VM "m" | ||
51 | #else | ||
52 | #define VM | ||
53 | #endif | ||
54 | |||
55 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
56 | #define VP "p" | ||
57 | #else | ||
58 | #define VP | ||
59 | #endif | ||
60 | |||
61 | #ifdef CONFIG_IPW2200_RADIOTAP | ||
62 | #define VR "r" | ||
63 | #else | ||
64 | #define VR | ||
65 | #endif | ||
66 | |||
67 | #ifdef CONFIG_IPW2200_QOS | ||
68 | #define VQ "q" | ||
69 | #else | ||
70 | #define VQ | ||
71 | #endif | ||
72 | |||
73 | #define IPW2200_VERSION "1.1.2" VK VD VM VP VR VQ | ||
37 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" | 74 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" |
38 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" | 75 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" |
39 | #define DRV_VERSION IPW2200_VERSION | 76 | #define DRV_VERSION IPW2200_VERSION |
@@ -46,7 +83,9 @@ MODULE_AUTHOR(DRV_COPYRIGHT); | |||
46 | MODULE_LICENSE("GPL"); | 83 | MODULE_LICENSE("GPL"); |
47 | 84 | ||
48 | static int cmdlog = 0; | 85 | static int cmdlog = 0; |
86 | #ifdef CONFIG_IPW2200_DEBUG | ||
49 | static int debug = 0; | 87 | static int debug = 0; |
88 | #endif | ||
50 | static int channel = 0; | 89 | static int channel = 0; |
51 | static int mode = 0; | 90 | static int mode = 0; |
52 | 91 | ||
@@ -61,8 +100,14 @@ static int roaming = 1; | |||
61 | static const char ipw_modes[] = { | 100 | static const char ipw_modes[] = { |
62 | 'a', 'b', 'g', '?' | 101 | 'a', 'b', 'g', '?' |
63 | }; | 102 | }; |
103 | static int antenna = CFG_SYS_ANTENNA_BOTH; | ||
64 | 104 | ||
65 | #ifdef CONFIG_IPW_QOS | 105 | #ifdef CONFIG_IPW2200_PROMISCUOUS |
106 | static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */ | ||
107 | #endif | ||
108 | |||
109 | |||
110 | #ifdef CONFIG_IPW2200_QOS | ||
66 | static int qos_enable = 0; | 111 | static int qos_enable = 0; |
67 | static int qos_burst_enable = 0; | 112 | static int qos_burst_enable = 0; |
68 | static int qos_no_ack_mask = 0; | 113 | static int qos_no_ack_mask = 0; |
@@ -126,7 +171,7 @@ static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_q | |||
126 | *qos_param); | 171 | *qos_param); |
127 | static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element | 172 | static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element |
128 | *qos_param); | 173 | *qos_param); |
129 | #endif /* CONFIG_IPW_QOS */ | 174 | #endif /* CONFIG_IPW2200_QOS */ |
130 | 175 | ||
131 | static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev); | 176 | static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev); |
132 | static void ipw_remove_current_network(struct ipw_priv *priv); | 177 | static void ipw_remove_current_network(struct ipw_priv *priv); |
@@ -1269,6 +1314,105 @@ static ssize_t show_cmd_log(struct device *d, | |||
1269 | 1314 | ||
1270 | static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL); | 1315 | static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL); |
1271 | 1316 | ||
1317 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
1318 | static void ipw_prom_free(struct ipw_priv *priv); | ||
1319 | static int ipw_prom_alloc(struct ipw_priv *priv); | ||
1320 | static ssize_t store_rtap_iface(struct device *d, | ||
1321 | struct device_attribute *attr, | ||
1322 | const char *buf, size_t count) | ||
1323 | { | ||
1324 | struct ipw_priv *priv = dev_get_drvdata(d); | ||
1325 | int rc = 0; | ||
1326 | |||
1327 | if (count < 1) | ||
1328 | return -EINVAL; | ||
1329 | |||
1330 | switch (buf[0]) { | ||
1331 | case '0': | ||
1332 | if (!rtap_iface) | ||
1333 | return count; | ||
1334 | |||
1335 | if (netif_running(priv->prom_net_dev)) { | ||
1336 | IPW_WARNING("Interface is up. Cannot unregister.\n"); | ||
1337 | return count; | ||
1338 | } | ||
1339 | |||
1340 | ipw_prom_free(priv); | ||
1341 | rtap_iface = 0; | ||
1342 | break; | ||
1343 | |||
1344 | case '1': | ||
1345 | if (rtap_iface) | ||
1346 | return count; | ||
1347 | |||
1348 | rc = ipw_prom_alloc(priv); | ||
1349 | if (!rc) | ||
1350 | rtap_iface = 1; | ||
1351 | break; | ||
1352 | |||
1353 | default: | ||
1354 | return -EINVAL; | ||
1355 | } | ||
1356 | |||
1357 | if (rc) { | ||
1358 | IPW_ERROR("Failed to register promiscuous network " | ||
1359 | "device (error %d).\n", rc); | ||
1360 | } | ||
1361 | |||
1362 | return count; | ||
1363 | } | ||
1364 | |||
1365 | static ssize_t show_rtap_iface(struct device *d, | ||
1366 | struct device_attribute *attr, | ||
1367 | char *buf) | ||
1368 | { | ||
1369 | struct ipw_priv *priv = dev_get_drvdata(d); | ||
1370 | if (rtap_iface) | ||
1371 | return sprintf(buf, "%s", priv->prom_net_dev->name); | ||
1372 | else { | ||
1373 | buf[0] = '-'; | ||
1374 | buf[1] = '1'; | ||
1375 | buf[2] = '\0'; | ||
1376 | return 3; | ||
1377 | } | ||
1378 | } | ||
1379 | |||
1380 | static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface, | ||
1381 | store_rtap_iface); | ||
1382 | |||
1383 | static ssize_t store_rtap_filter(struct device *d, | ||
1384 | struct device_attribute *attr, | ||
1385 | const char *buf, size_t count) | ||
1386 | { | ||
1387 | struct ipw_priv *priv = dev_get_drvdata(d); | ||
1388 | |||
1389 | if (!priv->prom_priv) { | ||
1390 | IPW_ERROR("Attempting to set filter without " | ||
1391 | "rtap_iface enabled.\n"); | ||
1392 | return -EPERM; | ||
1393 | } | ||
1394 | |||
1395 | priv->prom_priv->filter = simple_strtol(buf, NULL, 0); | ||
1396 | |||
1397 | IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n", | ||
1398 | BIT_ARG16(priv->prom_priv->filter)); | ||
1399 | |||
1400 | return count; | ||
1401 | } | ||
1402 | |||
1403 | static ssize_t show_rtap_filter(struct device *d, | ||
1404 | struct device_attribute *attr, | ||
1405 | char *buf) | ||
1406 | { | ||
1407 | struct ipw_priv *priv = dev_get_drvdata(d); | ||
1408 | return sprintf(buf, "0x%04X", | ||
1409 | priv->prom_priv ? priv->prom_priv->filter : 0); | ||
1410 | } | ||
1411 | |||
1412 | static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter, | ||
1413 | store_rtap_filter); | ||
1414 | #endif | ||
1415 | |||
1272 | static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, | 1416 | static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, |
1273 | char *buf) | 1417 | char *buf) |
1274 | { | 1418 | { |
@@ -2025,16 +2169,11 @@ static int ipw_send_host_complete(struct ipw_priv *priv) | |||
2025 | return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE); | 2169 | return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE); |
2026 | } | 2170 | } |
2027 | 2171 | ||
2028 | static int ipw_send_system_config(struct ipw_priv *priv, | 2172 | static int ipw_send_system_config(struct ipw_priv *priv) |
2029 | struct ipw_sys_config *config) | ||
2030 | { | 2173 | { |
2031 | if (!priv || !config) { | 2174 | return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, |
2032 | IPW_ERROR("Invalid args\n"); | 2175 | sizeof(priv->sys_config), |
2033 | return -1; | 2176 | &priv->sys_config); |
2034 | } | ||
2035 | |||
2036 | return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config), | ||
2037 | config); | ||
2038 | } | 2177 | } |
2039 | 2178 | ||
2040 | static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) | 2179 | static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) |
@@ -3104,10 +3243,10 @@ static int ipw_reset_nic(struct ipw_priv *priv) | |||
3104 | 3243 | ||
3105 | 3244 | ||
3106 | struct ipw_fw { | 3245 | struct ipw_fw { |
3107 | u32 ver; | 3246 | __le32 ver; |
3108 | u32 boot_size; | 3247 | __le32 boot_size; |
3109 | u32 ucode_size; | 3248 | __le32 ucode_size; |
3110 | u32 fw_size; | 3249 | __le32 fw_size; |
3111 | u8 data[0]; | 3250 | u8 data[0]; |
3112 | }; | 3251 | }; |
3113 | 3252 | ||
@@ -3131,8 +3270,8 @@ static int ipw_get_fw(struct ipw_priv *priv, | |||
3131 | 3270 | ||
3132 | fw = (void *)(*raw)->data; | 3271 | fw = (void *)(*raw)->data; |
3133 | 3272 | ||
3134 | if ((*raw)->size < sizeof(*fw) + | 3273 | if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) + |
3135 | fw->boot_size + fw->ucode_size + fw->fw_size) { | 3274 | le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) { |
3136 | IPW_ERROR("%s is too small or corrupt (%zd)\n", | 3275 | IPW_ERROR("%s is too small or corrupt (%zd)\n", |
3137 | name, (*raw)->size); | 3276 | name, (*raw)->size); |
3138 | return -EINVAL; | 3277 | return -EINVAL; |
@@ -3237,8 +3376,9 @@ static int ipw_load(struct ipw_priv *priv) | |||
3237 | 3376 | ||
3238 | fw = (void *)raw->data; | 3377 | fw = (void *)raw->data; |
3239 | boot_img = &fw->data[0]; | 3378 | boot_img = &fw->data[0]; |
3240 | ucode_img = &fw->data[fw->boot_size]; | 3379 | ucode_img = &fw->data[le32_to_cpu(fw->boot_size)]; |
3241 | fw_img = &fw->data[fw->boot_size + fw->ucode_size]; | 3380 | fw_img = &fw->data[le32_to_cpu(fw->boot_size) + |
3381 | le32_to_cpu(fw->ucode_size)]; | ||
3242 | 3382 | ||
3243 | if (rc < 0) | 3383 | if (rc < 0) |
3244 | goto error; | 3384 | goto error; |
@@ -3272,7 +3412,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3272 | IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); | 3412 | IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); |
3273 | 3413 | ||
3274 | /* DMA the initial boot firmware into the device */ | 3414 | /* DMA the initial boot firmware into the device */ |
3275 | rc = ipw_load_firmware(priv, boot_img, fw->boot_size); | 3415 | rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size)); |
3276 | if (rc < 0) { | 3416 | if (rc < 0) { |
3277 | IPW_ERROR("Unable to load boot firmware: %d\n", rc); | 3417 | IPW_ERROR("Unable to load boot firmware: %d\n", rc); |
3278 | goto error; | 3418 | goto error; |
@@ -3294,7 +3434,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3294 | ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); | 3434 | ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); |
3295 | 3435 | ||
3296 | /* DMA the ucode into the device */ | 3436 | /* DMA the ucode into the device */ |
3297 | rc = ipw_load_ucode(priv, ucode_img, fw->ucode_size); | 3437 | rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size)); |
3298 | if (rc < 0) { | 3438 | if (rc < 0) { |
3299 | IPW_ERROR("Unable to load ucode: %d\n", rc); | 3439 | IPW_ERROR("Unable to load ucode: %d\n", rc); |
3300 | goto error; | 3440 | goto error; |
@@ -3304,7 +3444,7 @@ static int ipw_load(struct ipw_priv *priv) | |||
3304 | ipw_stop_nic(priv); | 3444 | ipw_stop_nic(priv); |
3305 | 3445 | ||
3306 | /* DMA bss firmware into the device */ | 3446 | /* DMA bss firmware into the device */ |
3307 | rc = ipw_load_firmware(priv, fw_img, fw->fw_size); | 3447 | rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size)); |
3308 | if (rc < 0) { | 3448 | if (rc < 0) { |
3309 | IPW_ERROR("Unable to load firmware: %d\n", rc); | 3449 | IPW_ERROR("Unable to load firmware: %d\n", rc); |
3310 | goto error; | 3450 | goto error; |
@@ -3700,7 +3840,17 @@ static void ipw_bg_disassociate(void *data) | |||
3700 | static void ipw_system_config(void *data) | 3840 | static void ipw_system_config(void *data) |
3701 | { | 3841 | { |
3702 | struct ipw_priv *priv = data; | 3842 | struct ipw_priv *priv = data; |
3703 | ipw_send_system_config(priv, &priv->sys_config); | 3843 | |
3844 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
3845 | if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { | ||
3846 | priv->sys_config.accept_all_data_frames = 1; | ||
3847 | priv->sys_config.accept_non_directed_frames = 1; | ||
3848 | priv->sys_config.accept_all_mgmt_bcpr = 1; | ||
3849 | priv->sys_config.accept_all_mgmt_frames = 1; | ||
3850 | } | ||
3851 | #endif | ||
3852 | |||
3853 | ipw_send_system_config(priv); | ||
3704 | } | 3854 | } |
3705 | 3855 | ||
3706 | struct ipw_status_code { | 3856 | struct ipw_status_code { |
@@ -3771,6 +3921,13 @@ static void inline average_init(struct average *avg) | |||
3771 | memset(avg, 0, sizeof(*avg)); | 3921 | memset(avg, 0, sizeof(*avg)); |
3772 | } | 3922 | } |
3773 | 3923 | ||
3924 | #define DEPTH_RSSI 8 | ||
3925 | #define DEPTH_NOISE 16 | ||
3926 | static s16 exponential_average(s16 prev_avg, s16 val, u8 depth) | ||
3927 | { | ||
3928 | return ((depth-1)*prev_avg + val)/depth; | ||
3929 | } | ||
3930 | |||
3774 | static void average_add(struct average *avg, s16 val) | 3931 | static void average_add(struct average *avg, s16 val) |
3775 | { | 3932 | { |
3776 | avg->sum -= avg->entries[avg->pos]; | 3933 | avg->sum -= avg->entries[avg->pos]; |
@@ -3800,8 +3957,8 @@ static void ipw_reset_stats(struct ipw_priv *priv) | |||
3800 | priv->quality = 0; | 3957 | priv->quality = 0; |
3801 | 3958 | ||
3802 | average_init(&priv->average_missed_beacons); | 3959 | average_init(&priv->average_missed_beacons); |
3803 | average_init(&priv->average_rssi); | 3960 | priv->exp_avg_rssi = -60; |
3804 | average_init(&priv->average_noise); | 3961 | priv->exp_avg_noise = -85 + 0x100; |
3805 | 3962 | ||
3806 | priv->last_rate = 0; | 3963 | priv->last_rate = 0; |
3807 | priv->last_missed_beacons = 0; | 3964 | priv->last_missed_beacons = 0; |
@@ -4008,7 +4165,7 @@ static void ipw_gather_stats(struct ipw_priv *priv) | |||
4008 | IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n", | 4165 | IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n", |
4009 | tx_quality, tx_failures_delta, tx_packets_delta); | 4166 | tx_quality, tx_failures_delta, tx_packets_delta); |
4010 | 4167 | ||
4011 | rssi = average_value(&priv->average_rssi); | 4168 | rssi = priv->exp_avg_rssi; |
4012 | signal_quality = | 4169 | signal_quality = |
4013 | (100 * | 4170 | (100 * |
4014 | (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * | 4171 | (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * |
@@ -4185,7 +4342,7 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4185 | queue_work(priv->workqueue, | 4342 | queue_work(priv->workqueue, |
4186 | &priv->system_config); | 4343 | &priv->system_config); |
4187 | 4344 | ||
4188 | #ifdef CONFIG_IPW_QOS | 4345 | #ifdef CONFIG_IPW2200_QOS |
4189 | #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ | 4346 | #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ |
4190 | le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl)) | 4347 | le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl)) |
4191 | if ((priv->status & STATUS_AUTH) && | 4348 | if ((priv->status & STATUS_AUTH) && |
@@ -4482,6 +4639,24 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4482 | && priv->status & STATUS_ASSOCIATED) | 4639 | && priv->status & STATUS_ASSOCIATED) |
4483 | queue_delayed_work(priv->workqueue, | 4640 | queue_delayed_work(priv->workqueue, |
4484 | &priv->request_scan, HZ); | 4641 | &priv->request_scan, HZ); |
4642 | |||
4643 | /* Send an empty event to user space. | ||
4644 | * We don't send the received data on the event because | ||
4645 | * it would require us to do complex transcoding, and | ||
4646 | * we want to minimise the work done in the irq handler | ||
4647 | * Use a request to extract the data. | ||
4648 | * Also, we generate this even for any scan, regardless | ||
4649 | * on how the scan was initiated. User space can just | ||
4650 | * sync on periodic scan to get fresh data... | ||
4651 | * Jean II */ | ||
4652 | if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) { | ||
4653 | union iwreq_data wrqu; | ||
4654 | |||
4655 | wrqu.data.length = 0; | ||
4656 | wrqu.data.flags = 0; | ||
4657 | wireless_send_event(priv->net_dev, SIOCGIWSCAN, | ||
4658 | &wrqu, NULL); | ||
4659 | } | ||
4485 | break; | 4660 | break; |
4486 | } | 4661 | } |
4487 | 4662 | ||
@@ -4577,11 +4752,10 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4577 | 4752 | ||
4578 | case HOST_NOTIFICATION_NOISE_STATS:{ | 4753 | case HOST_NOTIFICATION_NOISE_STATS:{ |
4579 | if (notif->size == sizeof(u32)) { | 4754 | if (notif->size == sizeof(u32)) { |
4580 | priv->last_noise = | 4755 | priv->exp_avg_noise = |
4581 | (u8) (le32_to_cpu(notif->u.noise.value) & | 4756 | exponential_average(priv->exp_avg_noise, |
4582 | 0xff); | 4757 | (u8) (le32_to_cpu(notif->u.noise.value) & 0xff), |
4583 | average_add(&priv->average_noise, | 4758 | DEPTH_NOISE); |
4584 | priv->last_noise); | ||
4585 | break; | 4759 | break; |
4586 | } | 4760 | } |
4587 | 4761 | ||
@@ -6170,8 +6344,6 @@ static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, | |||
6170 | { | 6344 | { |
6171 | /* make sure WPA is enabled */ | 6345 | /* make sure WPA is enabled */ |
6172 | ipw_wpa_enable(priv, 1); | 6346 | ipw_wpa_enable(priv, 1); |
6173 | |||
6174 | ipw_disassociate(priv); | ||
6175 | } | 6347 | } |
6176 | 6348 | ||
6177 | static int ipw_set_rsn_capa(struct ipw_priv *priv, | 6349 | static int ipw_set_rsn_capa(struct ipw_priv *priv, |
@@ -6365,6 +6537,7 @@ static int ipw_wx_set_auth(struct net_device *dev, | |||
6365 | 6537 | ||
6366 | case IW_AUTH_WPA_ENABLED: | 6538 | case IW_AUTH_WPA_ENABLED: |
6367 | ret = ipw_wpa_enable(priv, param->value); | 6539 | ret = ipw_wpa_enable(priv, param->value); |
6540 | ipw_disassociate(priv); | ||
6368 | break; | 6541 | break; |
6369 | 6542 | ||
6370 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: | 6543 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: |
@@ -6506,7 +6679,7 @@ static int ipw_wx_set_mlme(struct net_device *dev, | |||
6506 | return 0; | 6679 | return 0; |
6507 | } | 6680 | } |
6508 | 6681 | ||
6509 | #ifdef CONFIG_IPW_QOS | 6682 | #ifdef CONFIG_IPW2200_QOS |
6510 | 6683 | ||
6511 | /* QoS */ | 6684 | /* QoS */ |
6512 | /* | 6685 | /* |
@@ -6853,61 +7026,55 @@ static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority) | |||
6853 | return from_priority_to_tx_queue[priority] - 1; | 7026 | return from_priority_to_tx_queue[priority] - 1; |
6854 | } | 7027 | } |
6855 | 7028 | ||
6856 | /* | 7029 | static int ipw_is_qos_active(struct net_device *dev, |
6857 | * add QoS parameter to the TX command | 7030 | struct sk_buff *skb) |
6858 | */ | ||
6859 | static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv, | ||
6860 | u16 priority, | ||
6861 | struct tfd_data *tfd, u8 unicast) | ||
6862 | { | 7031 | { |
6863 | int ret = 0; | 7032 | struct ipw_priv *priv = ieee80211_priv(dev); |
6864 | int tx_queue_id = 0; | ||
6865 | struct ieee80211_qos_data *qos_data = NULL; | 7033 | struct ieee80211_qos_data *qos_data = NULL; |
6866 | int active, supported; | 7034 | int active, supported; |
6867 | unsigned long flags; | 7035 | u8 *daddr = skb->data + ETH_ALEN; |
7036 | int unicast = !is_multicast_ether_addr(daddr); | ||
6868 | 7037 | ||
6869 | if (!(priv->status & STATUS_ASSOCIATED)) | 7038 | if (!(priv->status & STATUS_ASSOCIATED)) |
6870 | return 0; | 7039 | return 0; |
6871 | 7040 | ||
6872 | qos_data = &priv->assoc_network->qos_data; | 7041 | qos_data = &priv->assoc_network->qos_data; |
6873 | 7042 | ||
6874 | spin_lock_irqsave(&priv->ieee->lock, flags); | ||
6875 | |||
6876 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) { | 7043 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) { |
6877 | if (unicast == 0) | 7044 | if (unicast == 0) |
6878 | qos_data->active = 0; | 7045 | qos_data->active = 0; |
6879 | else | 7046 | else |
6880 | qos_data->active = qos_data->supported; | 7047 | qos_data->active = qos_data->supported; |
6881 | } | 7048 | } |
6882 | |||
6883 | active = qos_data->active; | 7049 | active = qos_data->active; |
6884 | supported = qos_data->supported; | 7050 | supported = qos_data->supported; |
6885 | |||
6886 | spin_unlock_irqrestore(&priv->ieee->lock, flags); | ||
6887 | |||
6888 | IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d " | 7051 | IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d " |
6889 | "unicast %d\n", | 7052 | "unicast %d\n", |
6890 | priv->qos_data.qos_enable, active, supported, unicast); | 7053 | priv->qos_data.qos_enable, active, supported, unicast); |
6891 | if (active && priv->qos_data.qos_enable) { | 7054 | if (active && priv->qos_data.qos_enable) |
6892 | ret = from_priority_to_tx_queue[priority]; | 7055 | return 1; |
6893 | tx_queue_id = ret - 1; | ||
6894 | IPW_DEBUG_QOS("QoS packet priority is %d \n", priority); | ||
6895 | if (priority <= 7) { | ||
6896 | tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED; | ||
6897 | tfd->tfd.tfd_26.mchdr.qos_ctrl = priority; | ||
6898 | tfd->tfd.tfd_26.mchdr.frame_ctl |= | ||
6899 | IEEE80211_STYPE_QOS_DATA; | ||
6900 | |||
6901 | if (priv->qos_data.qos_no_ack_mask & | ||
6902 | (1UL << tx_queue_id)) { | ||
6903 | tfd->tx_flags &= ~DCT_FLAG_ACK_REQD; | ||
6904 | tfd->tfd.tfd_26.mchdr.qos_ctrl |= | ||
6905 | CTRL_QOS_NO_ACK; | ||
6906 | } | ||
6907 | } | ||
6908 | } | ||
6909 | 7056 | ||
6910 | return ret; | 7057 | return 0; |
7058 | |||
7059 | } | ||
7060 | /* | ||
7061 | * add QoS parameter to the TX command | ||
7062 | */ | ||
7063 | static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv, | ||
7064 | u16 priority, | ||
7065 | struct tfd_data *tfd) | ||
7066 | { | ||
7067 | int tx_queue_id = 0; | ||
7068 | |||
7069 | |||
7070 | tx_queue_id = from_priority_to_tx_queue[priority] - 1; | ||
7071 | tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED; | ||
7072 | |||
7073 | if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) { | ||
7074 | tfd->tx_flags &= ~DCT_FLAG_ACK_REQD; | ||
7075 | tfd->tfd.tfd_26.mchdr.qos_ctrl |= CTRL_QOS_NO_ACK; | ||
7076 | } | ||
7077 | return 0; | ||
6911 | } | 7078 | } |
6912 | 7079 | ||
6913 | /* | 7080 | /* |
@@ -6977,7 +7144,7 @@ static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos | |||
6977 | qos_param); | 7144 | qos_param); |
6978 | } | 7145 | } |
6979 | 7146 | ||
6980 | #endif /* CONFIG_IPW_QOS */ | 7147 | #endif /* CONFIG_IPW2200_QOS */ |
6981 | 7148 | ||
6982 | static int ipw_associate_network(struct ipw_priv *priv, | 7149 | static int ipw_associate_network(struct ipw_priv *priv, |
6983 | struct ieee80211_network *network, | 7150 | struct ieee80211_network *network, |
@@ -7116,7 +7283,7 @@ static int ipw_associate_network(struct ipw_priv *priv, | |||
7116 | else | 7283 | else |
7117 | priv->sys_config.answer_broadcast_ssid_probe = 0; | 7284 | priv->sys_config.answer_broadcast_ssid_probe = 0; |
7118 | 7285 | ||
7119 | err = ipw_send_system_config(priv, &priv->sys_config); | 7286 | err = ipw_send_system_config(priv); |
7120 | if (err) { | 7287 | if (err) { |
7121 | IPW_DEBUG_HC("Attempt to send sys config command failed.\n"); | 7288 | IPW_DEBUG_HC("Attempt to send sys config command failed.\n"); |
7122 | return err; | 7289 | return err; |
@@ -7141,7 +7308,7 @@ static int ipw_associate_network(struct ipw_priv *priv, | |||
7141 | 7308 | ||
7142 | priv->assoc_network = network; | 7309 | priv->assoc_network = network; |
7143 | 7310 | ||
7144 | #ifdef CONFIG_IPW_QOS | 7311 | #ifdef CONFIG_IPW2200_QOS |
7145 | ipw_qos_association(priv, network); | 7312 | ipw_qos_association(priv, network); |
7146 | #endif | 7313 | #endif |
7147 | 7314 | ||
@@ -7415,7 +7582,7 @@ static void ipw_handle_data_packet(struct ipw_priv *priv, | |||
7415 | } | 7582 | } |
7416 | } | 7583 | } |
7417 | 7584 | ||
7418 | #ifdef CONFIG_IEEE80211_RADIOTAP | 7585 | #ifdef CONFIG_IPW2200_RADIOTAP |
7419 | static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, | 7586 | static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, |
7420 | struct ipw_rx_mem_buffer *rxb, | 7587 | struct ipw_rx_mem_buffer *rxb, |
7421 | struct ieee80211_rx_stats *stats) | 7588 | struct ieee80211_rx_stats *stats) |
@@ -7432,15 +7599,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, | |||
7432 | /* Magic struct that slots into the radiotap header -- no reason | 7599 | /* Magic struct that slots into the radiotap header -- no reason |
7433 | * to build this manually element by element, we can write it much | 7600 | * to build this manually element by element, we can write it much |
7434 | * more efficiently than we can parse it. ORDER MATTERS HERE */ | 7601 | * more efficiently than we can parse it. ORDER MATTERS HERE */ |
7435 | struct ipw_rt_hdr { | 7602 | struct ipw_rt_hdr *ipw_rt; |
7436 | struct ieee80211_radiotap_header rt_hdr; | ||
7437 | u8 rt_flags; /* radiotap packet flags */ | ||
7438 | u8 rt_rate; /* rate in 500kb/s */ | ||
7439 | u16 rt_channel; /* channel in mhz */ | ||
7440 | u16 rt_chbitmask; /* channel bitfield */ | ||
7441 | s8 rt_dbmsignal; /* signal in dbM, kluged to signed */ | ||
7442 | u8 rt_antenna; /* antenna number */ | ||
7443 | } *ipw_rt; | ||
7444 | 7603 | ||
7445 | short len = le16_to_cpu(pkt->u.frame.length); | 7604 | short len = le16_to_cpu(pkt->u.frame.length); |
7446 | 7605 | ||
@@ -7494,9 +7653,11 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, | |||
7494 | /* Big bitfield of all the fields we provide in radiotap */ | 7653 | /* Big bitfield of all the fields we provide in radiotap */ |
7495 | ipw_rt->rt_hdr.it_present = | 7654 | ipw_rt->rt_hdr.it_present = |
7496 | ((1 << IEEE80211_RADIOTAP_FLAGS) | | 7655 | ((1 << IEEE80211_RADIOTAP_FLAGS) | |
7656 | (1 << IEEE80211_RADIOTAP_TSFT) | | ||
7497 | (1 << IEEE80211_RADIOTAP_RATE) | | 7657 | (1 << IEEE80211_RADIOTAP_RATE) | |
7498 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | 7658 | (1 << IEEE80211_RADIOTAP_CHANNEL) | |
7499 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | | 7659 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | |
7660 | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | | ||
7500 | (1 << IEEE80211_RADIOTAP_ANTENNA)); | 7661 | (1 << IEEE80211_RADIOTAP_ANTENNA)); |
7501 | 7662 | ||
7502 | /* Zero the flags, we'll add to them as we go */ | 7663 | /* Zero the flags, we'll add to them as we go */ |
@@ -7582,6 +7743,217 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, | |||
7582 | } | 7743 | } |
7583 | #endif | 7744 | #endif |
7584 | 7745 | ||
7746 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
7747 | #define ieee80211_is_probe_response(fc) \ | ||
7748 | ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \ | ||
7749 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP ) | ||
7750 | |||
7751 | #define ieee80211_is_management(fc) \ | ||
7752 | ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) | ||
7753 | |||
7754 | #define ieee80211_is_control(fc) \ | ||
7755 | ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) | ||
7756 | |||
7757 | #define ieee80211_is_data(fc) \ | ||
7758 | ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) | ||
7759 | |||
7760 | #define ieee80211_is_assoc_request(fc) \ | ||
7761 | ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ) | ||
7762 | |||
7763 | #define ieee80211_is_reassoc_request(fc) \ | ||
7764 | ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) | ||
7765 | |||
7766 | static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, | ||
7767 | struct ipw_rx_mem_buffer *rxb, | ||
7768 | struct ieee80211_rx_stats *stats) | ||
7769 | { | ||
7770 | struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; | ||
7771 | struct ipw_rx_frame *frame = &pkt->u.frame; | ||
7772 | struct ipw_rt_hdr *ipw_rt; | ||
7773 | |||
7774 | /* First cache any information we need before we overwrite | ||
7775 | * the information provided in the skb from the hardware */ | ||
7776 | struct ieee80211_hdr *hdr; | ||
7777 | u16 channel = frame->received_channel; | ||
7778 | u8 phy_flags = frame->antennaAndPhy; | ||
7779 | s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM; | ||
7780 | s8 noise = frame->noise; | ||
7781 | u8 rate = frame->rate; | ||
7782 | short len = le16_to_cpu(pkt->u.frame.length); | ||
7783 | u64 tsf = 0; | ||
7784 | struct sk_buff *skb; | ||
7785 | int hdr_only = 0; | ||
7786 | u16 filter = priv->prom_priv->filter; | ||
7787 | |||
7788 | /* If the filter is set to not include Rx frames then return */ | ||
7789 | if (filter & IPW_PROM_NO_RX) | ||
7790 | return; | ||
7791 | |||
7792 | /* We received data from the HW, so stop the watchdog */ | ||
7793 | priv->prom_net_dev->trans_start = jiffies; | ||
7794 | |||
7795 | if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { | ||
7796 | priv->prom_priv->ieee->stats.rx_errors++; | ||
7797 | IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); | ||
7798 | return; | ||
7799 | } | ||
7800 | |||
7801 | /* We only process data packets if the interface is open */ | ||
7802 | if (unlikely(!netif_running(priv->prom_net_dev))) { | ||
7803 | priv->prom_priv->ieee->stats.rx_dropped++; | ||
7804 | IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); | ||
7805 | return; | ||
7806 | } | ||
7807 | |||
7808 | /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use | ||
7809 | * that now */ | ||
7810 | if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) { | ||
7811 | /* FIXME: Should alloc bigger skb instead */ | ||
7812 | priv->prom_priv->ieee->stats.rx_dropped++; | ||
7813 | IPW_DEBUG_DROP("Dropping too large packet in monitor\n"); | ||
7814 | return; | ||
7815 | } | ||
7816 | |||
7817 | hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE; | ||
7818 | if (ieee80211_is_management(hdr->frame_ctl)) { | ||
7819 | if (filter & IPW_PROM_NO_MGMT) | ||
7820 | return; | ||
7821 | if (filter & IPW_PROM_MGMT_HEADER_ONLY) | ||
7822 | hdr_only = 1; | ||
7823 | } else if (ieee80211_is_control(hdr->frame_ctl)) { | ||
7824 | if (filter & IPW_PROM_NO_CTL) | ||
7825 | return; | ||
7826 | if (filter & IPW_PROM_CTL_HEADER_ONLY) | ||
7827 | hdr_only = 1; | ||
7828 | } else if (ieee80211_is_data(hdr->frame_ctl)) { | ||
7829 | if (filter & IPW_PROM_NO_DATA) | ||
7830 | return; | ||
7831 | if (filter & IPW_PROM_DATA_HEADER_ONLY) | ||
7832 | hdr_only = 1; | ||
7833 | } | ||
7834 | |||
7835 | /* Copy the SKB since this is for the promiscuous side */ | ||
7836 | skb = skb_copy(rxb->skb, GFP_ATOMIC); | ||
7837 | if (skb == NULL) { | ||
7838 | IPW_ERROR("skb_clone failed for promiscuous copy.\n"); | ||
7839 | return; | ||
7840 | } | ||
7841 | |||
7842 | /* copy the frame data to write after where the radiotap header goes */ | ||
7843 | ipw_rt = (void *)skb->data; | ||
7844 | |||
7845 | if (hdr_only) | ||
7846 | len = ieee80211_get_hdrlen(hdr->frame_ctl); | ||
7847 | |||
7848 | memcpy(ipw_rt->payload, hdr, len); | ||
7849 | |||
7850 | /* Zero the radiotap static buffer ... We only need to zero the bytes | ||
7851 | * NOT part of our real header, saves a little time. | ||
7852 | * | ||
7853 | * No longer necessary since we fill in all our data. Purge before | ||
7854 | * merging patch officially. | ||
7855 | * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0, | ||
7856 | * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr)); | ||
7857 | */ | ||
7858 | |||
7859 | ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; | ||
7860 | ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ | ||
7861 | ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt); /* total header+data */ | ||
7862 | |||
7863 | /* Set the size of the skb to the size of the frame */ | ||
7864 | skb_put(skb, ipw_rt->rt_hdr.it_len + len); | ||
7865 | |||
7866 | /* Big bitfield of all the fields we provide in radiotap */ | ||
7867 | ipw_rt->rt_hdr.it_present = | ||
7868 | ((1 << IEEE80211_RADIOTAP_FLAGS) | | ||
7869 | (1 << IEEE80211_RADIOTAP_TSFT) | | ||
7870 | (1 << IEEE80211_RADIOTAP_RATE) | | ||
7871 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | ||
7872 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | | ||
7873 | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | | ||
7874 | (1 << IEEE80211_RADIOTAP_ANTENNA)); | ||
7875 | |||
7876 | /* Zero the flags, we'll add to them as we go */ | ||
7877 | ipw_rt->rt_flags = 0; | ||
7878 | |||
7879 | ipw_rt->rt_tsf = tsf; | ||
7880 | |||
7881 | /* Convert to DBM */ | ||
7882 | ipw_rt->rt_dbmsignal = signal; | ||
7883 | ipw_rt->rt_dbmnoise = noise; | ||
7884 | |||
7885 | /* Convert the channel data and set the flags */ | ||
7886 | ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel)); | ||
7887 | if (channel > 14) { /* 802.11a */ | ||
7888 | ipw_rt->rt_chbitmask = | ||
7889 | cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); | ||
7890 | } else if (phy_flags & (1 << 5)) { /* 802.11b */ | ||
7891 | ipw_rt->rt_chbitmask = | ||
7892 | cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); | ||
7893 | } else { /* 802.11g */ | ||
7894 | ipw_rt->rt_chbitmask = | ||
7895 | (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); | ||
7896 | } | ||
7897 | |||
7898 | /* set the rate in multiples of 500k/s */ | ||
7899 | switch (rate) { | ||
7900 | case IPW_TX_RATE_1MB: | ||
7901 | ipw_rt->rt_rate = 2; | ||
7902 | break; | ||
7903 | case IPW_TX_RATE_2MB: | ||
7904 | ipw_rt->rt_rate = 4; | ||
7905 | break; | ||
7906 | case IPW_TX_RATE_5MB: | ||
7907 | ipw_rt->rt_rate = 10; | ||
7908 | break; | ||
7909 | case IPW_TX_RATE_6MB: | ||
7910 | ipw_rt->rt_rate = 12; | ||
7911 | break; | ||
7912 | case IPW_TX_RATE_9MB: | ||
7913 | ipw_rt->rt_rate = 18; | ||
7914 | break; | ||
7915 | case IPW_TX_RATE_11MB: | ||
7916 | ipw_rt->rt_rate = 22; | ||
7917 | break; | ||
7918 | case IPW_TX_RATE_12MB: | ||
7919 | ipw_rt->rt_rate = 24; | ||
7920 | break; | ||
7921 | case IPW_TX_RATE_18MB: | ||
7922 | ipw_rt->rt_rate = 36; | ||
7923 | break; | ||
7924 | case IPW_TX_RATE_24MB: | ||
7925 | ipw_rt->rt_rate = 48; | ||
7926 | break; | ||
7927 | case IPW_TX_RATE_36MB: | ||
7928 | ipw_rt->rt_rate = 72; | ||
7929 | break; | ||
7930 | case IPW_TX_RATE_48MB: | ||
7931 | ipw_rt->rt_rate = 96; | ||
7932 | break; | ||
7933 | case IPW_TX_RATE_54MB: | ||
7934 | ipw_rt->rt_rate = 108; | ||
7935 | break; | ||
7936 | default: | ||
7937 | ipw_rt->rt_rate = 0; | ||
7938 | break; | ||
7939 | } | ||
7940 | |||
7941 | /* antenna number */ | ||
7942 | ipw_rt->rt_antenna = (phy_flags & 3); | ||
7943 | |||
7944 | /* set the preamble flag if we have it */ | ||
7945 | if (phy_flags & (1 << 6)) | ||
7946 | ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; | ||
7947 | |||
7948 | IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len); | ||
7949 | |||
7950 | if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) { | ||
7951 | priv->prom_priv->ieee->stats.rx_errors++; | ||
7952 | dev_kfree_skb_any(skb); | ||
7953 | } | ||
7954 | } | ||
7955 | #endif | ||
7956 | |||
7585 | static int is_network_packet(struct ipw_priv *priv, | 7957 | static int is_network_packet(struct ipw_priv *priv, |
7586 | struct ieee80211_hdr_4addr *header) | 7958 | struct ieee80211_hdr_4addr *header) |
7587 | { | 7959 | { |
@@ -7808,15 +8180,21 @@ static void ipw_rx(struct ipw_priv *priv) | |||
7808 | 8180 | ||
7809 | priv->rx_packets++; | 8181 | priv->rx_packets++; |
7810 | 8182 | ||
8183 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
8184 | if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) | ||
8185 | ipw_handle_promiscuous_rx(priv, rxb, &stats); | ||
8186 | #endif | ||
8187 | |||
7811 | #ifdef CONFIG_IPW2200_MONITOR | 8188 | #ifdef CONFIG_IPW2200_MONITOR |
7812 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { | 8189 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { |
7813 | #ifdef CONFIG_IEEE80211_RADIOTAP | 8190 | #ifdef CONFIG_IPW2200_RADIOTAP |
7814 | ipw_handle_data_packet_monitor(priv, | 8191 | |
7815 | rxb, | 8192 | ipw_handle_data_packet_monitor(priv, |
7816 | &stats); | 8193 | rxb, |
8194 | &stats); | ||
7817 | #else | 8195 | #else |
7818 | ipw_handle_data_packet(priv, rxb, | 8196 | ipw_handle_data_packet(priv, rxb, |
7819 | &stats); | 8197 | &stats); |
7820 | #endif | 8198 | #endif |
7821 | break; | 8199 | break; |
7822 | } | 8200 | } |
@@ -7837,9 +8215,9 @@ static void ipw_rx(struct ipw_priv *priv) | |||
7837 | if (network_packet && priv->assoc_network) { | 8215 | if (network_packet && priv->assoc_network) { |
7838 | priv->assoc_network->stats.rssi = | 8216 | priv->assoc_network->stats.rssi = |
7839 | stats.rssi; | 8217 | stats.rssi; |
7840 | average_add(&priv->average_rssi, | 8218 | priv->exp_avg_rssi = |
7841 | stats.rssi); | 8219 | exponential_average(priv->exp_avg_rssi, |
7842 | priv->last_rx_rssi = stats.rssi; | 8220 | stats.rssi, DEPTH_RSSI); |
7843 | } | 8221 | } |
7844 | 8222 | ||
7845 | IPW_DEBUG_RX("Frame: len=%u\n", | 8223 | IPW_DEBUG_RX("Frame: len=%u\n", |
@@ -7982,10 +8360,10 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option) | |||
7982 | IPW_DEBUG_INFO("Bind to static channel %d\n", channel); | 8360 | IPW_DEBUG_INFO("Bind to static channel %d\n", channel); |
7983 | /* TODO: Validate that provided channel is in range */ | 8361 | /* TODO: Validate that provided channel is in range */ |
7984 | } | 8362 | } |
7985 | #ifdef CONFIG_IPW_QOS | 8363 | #ifdef CONFIG_IPW2200_QOS |
7986 | ipw_qos_init(priv, qos_enable, qos_burst_enable, | 8364 | ipw_qos_init(priv, qos_enable, qos_burst_enable, |
7987 | burst_duration_CCK, burst_duration_OFDM); | 8365 | burst_duration_CCK, burst_duration_OFDM); |
7988 | #endif /* CONFIG_IPW_QOS */ | 8366 | #endif /* CONFIG_IPW2200_QOS */ |
7989 | 8367 | ||
7990 | switch (mode) { | 8368 | switch (mode) { |
7991 | case 1: | 8369 | case 1: |
@@ -7996,7 +8374,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option) | |||
7996 | #ifdef CONFIG_IPW2200_MONITOR | 8374 | #ifdef CONFIG_IPW2200_MONITOR |
7997 | case 2: | 8375 | case 2: |
7998 | priv->ieee->iw_mode = IW_MODE_MONITOR; | 8376 | priv->ieee->iw_mode = IW_MODE_MONITOR; |
7999 | #ifdef CONFIG_IEEE80211_RADIOTAP | 8377 | #ifdef CONFIG_IPW2200_RADIOTAP |
8000 | priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; | 8378 | priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; |
8001 | #else | 8379 | #else |
8002 | priv->net_dev->type = ARPHRD_IEEE80211; | 8380 | priv->net_dev->type = ARPHRD_IEEE80211; |
@@ -8251,7 +8629,7 @@ static int ipw_wx_set_mode(struct net_device *dev, | |||
8251 | priv->net_dev->type = ARPHRD_ETHER; | 8629 | priv->net_dev->type = ARPHRD_ETHER; |
8252 | 8630 | ||
8253 | if (wrqu->mode == IW_MODE_MONITOR) | 8631 | if (wrqu->mode == IW_MODE_MONITOR) |
8254 | #ifdef CONFIG_IEEE80211_RADIOTAP | 8632 | #ifdef CONFIG_IPW2200_RADIOTAP |
8255 | priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; | 8633 | priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; |
8256 | #else | 8634 | #else |
8257 | priv->net_dev->type = ARPHRD_IEEE80211; | 8635 | priv->net_dev->type = ARPHRD_IEEE80211; |
@@ -8379,7 +8757,8 @@ static int ipw_wx_get_range(struct net_device *dev, | |||
8379 | /* Event capability (kernel + driver) */ | 8757 | /* Event capability (kernel + driver) */ |
8380 | range->event_capa[0] = (IW_EVENT_CAPA_K_0 | | 8758 | range->event_capa[0] = (IW_EVENT_CAPA_K_0 | |
8381 | IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | | 8759 | IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | |
8382 | IW_EVENT_CAPA_MASK(SIOCGIWAP)); | 8760 | IW_EVENT_CAPA_MASK(SIOCGIWAP) | |
8761 | IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); | ||
8383 | range->event_capa[1] = IW_EVENT_CAPA_K_1; | 8762 | range->event_capa[1] = IW_EVENT_CAPA_K_1; |
8384 | 8763 | ||
8385 | range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | | 8764 | range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | |
@@ -8734,6 +9113,7 @@ static int ipw_wx_get_rate(struct net_device *dev, | |||
8734 | struct ipw_priv *priv = ieee80211_priv(dev); | 9113 | struct ipw_priv *priv = ieee80211_priv(dev); |
8735 | mutex_lock(&priv->mutex); | 9114 | mutex_lock(&priv->mutex); |
8736 | wrqu->bitrate.value = priv->last_rate; | 9115 | wrqu->bitrate.value = priv->last_rate; |
9116 | wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0; | ||
8737 | mutex_unlock(&priv->mutex); | 9117 | mutex_unlock(&priv->mutex); |
8738 | IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); | 9118 | IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); |
8739 | return 0; | 9119 | return 0; |
@@ -9351,7 +9731,7 @@ static int ipw_wx_set_monitor(struct net_device *dev, | |||
9351 | IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); | 9731 | IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); |
9352 | if (enable) { | 9732 | if (enable) { |
9353 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { | 9733 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { |
9354 | #ifdef CONFIG_IEEE80211_RADIOTAP | 9734 | #ifdef CONFIG_IPW2200_RADIOTAP |
9355 | priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; | 9735 | priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; |
9356 | #else | 9736 | #else |
9357 | priv->net_dev->type = ARPHRD_IEEE80211; | 9737 | priv->net_dev->type = ARPHRD_IEEE80211; |
@@ -9579,8 +9959,8 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev) | |||
9579 | } | 9959 | } |
9580 | 9960 | ||
9581 | wstats->qual.qual = priv->quality; | 9961 | wstats->qual.qual = priv->quality; |
9582 | wstats->qual.level = average_value(&priv->average_rssi); | 9962 | wstats->qual.level = priv->exp_avg_rssi; |
9583 | wstats->qual.noise = average_value(&priv->average_noise); | 9963 | wstats->qual.noise = priv->exp_avg_noise; |
9584 | wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | | 9964 | wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | |
9585 | IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM; | 9965 | IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM; |
9586 | 9966 | ||
@@ -9608,7 +9988,9 @@ static void init_sys_config(struct ipw_sys_config *sys_config) | |||
9608 | sys_config->disable_unicast_decryption = 1; | 9988 | sys_config->disable_unicast_decryption = 1; |
9609 | sys_config->exclude_multicast_unencrypted = 0; | 9989 | sys_config->exclude_multicast_unencrypted = 0; |
9610 | sys_config->disable_multicast_decryption = 1; | 9990 | sys_config->disable_multicast_decryption = 1; |
9611 | sys_config->antenna_diversity = CFG_SYS_ANTENNA_SLOW_DIV; | 9991 | if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B) |
9992 | antenna = CFG_SYS_ANTENNA_BOTH; | ||
9993 | sys_config->antenna_diversity = antenna; | ||
9612 | sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ | 9994 | sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ |
9613 | sys_config->dot11g_auto_detection = 0; | 9995 | sys_config->dot11g_auto_detection = 0; |
9614 | sys_config->enable_cts_to_self = 0; | 9996 | sys_config->enable_cts_to_self = 0; |
@@ -9647,11 +10029,11 @@ we need to heavily modify the ieee80211_skb_to_txb. | |||
9647 | static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, | 10029 | static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, |
9648 | int pri) | 10030 | int pri) |
9649 | { | 10031 | { |
9650 | struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *) | 10032 | struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *) |
9651 | txb->fragments[0]->data; | 10033 | txb->fragments[0]->data; |
9652 | int i = 0; | 10034 | int i = 0; |
9653 | struct tfd_frame *tfd; | 10035 | struct tfd_frame *tfd; |
9654 | #ifdef CONFIG_IPW_QOS | 10036 | #ifdef CONFIG_IPW2200_QOS |
9655 | int tx_id = ipw_get_tx_queue_number(priv, pri); | 10037 | int tx_id = ipw_get_tx_queue_number(priv, pri); |
9656 | struct clx2_tx_queue *txq = &priv->txq[tx_id]; | 10038 | struct clx2_tx_queue *txq = &priv->txq[tx_id]; |
9657 | #else | 10039 | #else |
@@ -9662,9 +10044,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, | |||
9662 | u16 remaining_bytes; | 10044 | u16 remaining_bytes; |
9663 | int fc; | 10045 | int fc; |
9664 | 10046 | ||
10047 | hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); | ||
9665 | switch (priv->ieee->iw_mode) { | 10048 | switch (priv->ieee->iw_mode) { |
9666 | case IW_MODE_ADHOC: | 10049 | case IW_MODE_ADHOC: |
9667 | hdr_len = IEEE80211_3ADDR_LEN; | ||
9668 | unicast = !is_multicast_ether_addr(hdr->addr1); | 10050 | unicast = !is_multicast_ether_addr(hdr->addr1); |
9669 | id = ipw_find_station(priv, hdr->addr1); | 10051 | id = ipw_find_station(priv, hdr->addr1); |
9670 | if (id == IPW_INVALID_STATION) { | 10052 | if (id == IPW_INVALID_STATION) { |
@@ -9681,7 +10063,6 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, | |||
9681 | case IW_MODE_INFRA: | 10063 | case IW_MODE_INFRA: |
9682 | default: | 10064 | default: |
9683 | unicast = !is_multicast_ether_addr(hdr->addr3); | 10065 | unicast = !is_multicast_ether_addr(hdr->addr3); |
9684 | hdr_len = IEEE80211_3ADDR_LEN; | ||
9685 | id = 0; | 10066 | id = 0; |
9686 | break; | 10067 | break; |
9687 | } | 10068 | } |
@@ -9759,9 +10140,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, | |||
9759 | /* No hardware encryption */ | 10140 | /* No hardware encryption */ |
9760 | tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP; | 10141 | tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP; |
9761 | 10142 | ||
9762 | #ifdef CONFIG_IPW_QOS | 10143 | #ifdef CONFIG_IPW2200_QOS |
9763 | ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data), unicast); | 10144 | if (fc & IEEE80211_STYPE_QOS_DATA) |
9764 | #endif /* CONFIG_IPW_QOS */ | 10145 | ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data)); |
10146 | #endif /* CONFIG_IPW2200_QOS */ | ||
9765 | 10147 | ||
9766 | /* payload */ | 10148 | /* payload */ |
9767 | tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2), | 10149 | tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2), |
@@ -9841,12 +10223,12 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, | |||
9841 | static int ipw_net_is_queue_full(struct net_device *dev, int pri) | 10223 | static int ipw_net_is_queue_full(struct net_device *dev, int pri) |
9842 | { | 10224 | { |
9843 | struct ipw_priv *priv = ieee80211_priv(dev); | 10225 | struct ipw_priv *priv = ieee80211_priv(dev); |
9844 | #ifdef CONFIG_IPW_QOS | 10226 | #ifdef CONFIG_IPW2200_QOS |
9845 | int tx_id = ipw_get_tx_queue_number(priv, pri); | 10227 | int tx_id = ipw_get_tx_queue_number(priv, pri); |
9846 | struct clx2_tx_queue *txq = &priv->txq[tx_id]; | 10228 | struct clx2_tx_queue *txq = &priv->txq[tx_id]; |
9847 | #else | 10229 | #else |
9848 | struct clx2_tx_queue *txq = &priv->txq[0]; | 10230 | struct clx2_tx_queue *txq = &priv->txq[0]; |
9849 | #endif /* CONFIG_IPW_QOS */ | 10231 | #endif /* CONFIG_IPW2200_QOS */ |
9850 | 10232 | ||
9851 | if (ipw_queue_space(&txq->q) < txq->q.high_mark) | 10233 | if (ipw_queue_space(&txq->q) < txq->q.high_mark) |
9852 | return 1; | 10234 | return 1; |
@@ -9854,6 +10236,88 @@ static int ipw_net_is_queue_full(struct net_device *dev, int pri) | |||
9854 | return 0; | 10236 | return 0; |
9855 | } | 10237 | } |
9856 | 10238 | ||
10239 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
10240 | static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, | ||
10241 | struct ieee80211_txb *txb) | ||
10242 | { | ||
10243 | struct ieee80211_rx_stats dummystats; | ||
10244 | struct ieee80211_hdr *hdr; | ||
10245 | u8 n; | ||
10246 | u16 filter = priv->prom_priv->filter; | ||
10247 | int hdr_only = 0; | ||
10248 | |||
10249 | if (filter & IPW_PROM_NO_TX) | ||
10250 | return; | ||
10251 | |||
10252 | memset(&dummystats, 0, sizeof(dummystats)); | ||
10253 | |||
10254 | /* Filtering of fragment chains is done agains the first fragment */ | ||
10255 | hdr = (void *)txb->fragments[0]->data; | ||
10256 | if (ieee80211_is_management(hdr->frame_ctl)) { | ||
10257 | if (filter & IPW_PROM_NO_MGMT) | ||
10258 | return; | ||
10259 | if (filter & IPW_PROM_MGMT_HEADER_ONLY) | ||
10260 | hdr_only = 1; | ||
10261 | } else if (ieee80211_is_control(hdr->frame_ctl)) { | ||
10262 | if (filter & IPW_PROM_NO_CTL) | ||
10263 | return; | ||
10264 | if (filter & IPW_PROM_CTL_HEADER_ONLY) | ||
10265 | hdr_only = 1; | ||
10266 | } else if (ieee80211_is_data(hdr->frame_ctl)) { | ||
10267 | if (filter & IPW_PROM_NO_DATA) | ||
10268 | return; | ||
10269 | if (filter & IPW_PROM_DATA_HEADER_ONLY) | ||
10270 | hdr_only = 1; | ||
10271 | } | ||
10272 | |||
10273 | for(n=0; n<txb->nr_frags; ++n) { | ||
10274 | struct sk_buff *src = txb->fragments[n]; | ||
10275 | struct sk_buff *dst; | ||
10276 | struct ieee80211_radiotap_header *rt_hdr; | ||
10277 | int len; | ||
10278 | |||
10279 | if (hdr_only) { | ||
10280 | hdr = (void *)src->data; | ||
10281 | len = ieee80211_get_hdrlen(hdr->frame_ctl); | ||
10282 | } else | ||
10283 | len = src->len; | ||
10284 | |||
10285 | dst = alloc_skb( | ||
10286 | len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC); | ||
10287 | if (!dst) continue; | ||
10288 | |||
10289 | rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr)); | ||
10290 | |||
10291 | rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION; | ||
10292 | rt_hdr->it_pad = 0; | ||
10293 | rt_hdr->it_present = 0; /* after all, it's just an idea */ | ||
10294 | rt_hdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL); | ||
10295 | |||
10296 | *(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16( | ||
10297 | ieee80211chan2mhz(priv->channel)); | ||
10298 | if (priv->channel > 14) /* 802.11a */ | ||
10299 | *(u16*)skb_put(dst, sizeof(u16)) = | ||
10300 | cpu_to_le16(IEEE80211_CHAN_OFDM | | ||
10301 | IEEE80211_CHAN_5GHZ); | ||
10302 | else if (priv->ieee->mode == IEEE_B) /* 802.11b */ | ||
10303 | *(u16*)skb_put(dst, sizeof(u16)) = | ||
10304 | cpu_to_le16(IEEE80211_CHAN_CCK | | ||
10305 | IEEE80211_CHAN_2GHZ); | ||
10306 | else /* 802.11g */ | ||
10307 | *(u16*)skb_put(dst, sizeof(u16)) = | ||
10308 | cpu_to_le16(IEEE80211_CHAN_OFDM | | ||
10309 | IEEE80211_CHAN_2GHZ); | ||
10310 | |||
10311 | rt_hdr->it_len = dst->len; | ||
10312 | |||
10313 | memcpy(skb_put(dst, len), src->data, len); | ||
10314 | |||
10315 | if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats)) | ||
10316 | dev_kfree_skb_any(dst); | ||
10317 | } | ||
10318 | } | ||
10319 | #endif | ||
10320 | |||
9857 | static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, | 10321 | static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, |
9858 | struct net_device *dev, int pri) | 10322 | struct net_device *dev, int pri) |
9859 | { | 10323 | { |
@@ -9871,6 +10335,11 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, | |||
9871 | goto fail_unlock; | 10335 | goto fail_unlock; |
9872 | } | 10336 | } |
9873 | 10337 | ||
10338 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
10339 | if (rtap_iface && netif_running(priv->prom_net_dev)) | ||
10340 | ipw_handle_promiscuous_tx(priv, txb); | ||
10341 | #endif | ||
10342 | |||
9874 | ret = ipw_tx_skb(priv, txb, pri); | 10343 | ret = ipw_tx_skb(priv, txb, pri); |
9875 | if (ret == NETDEV_TX_OK) | 10344 | if (ret == NETDEV_TX_OK) |
9876 | __ipw_led_activity_on(priv); | 10345 | __ipw_led_activity_on(priv); |
@@ -10169,10 +10638,10 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv) | |||
10169 | INIT_WORK(&priv->merge_networks, | 10638 | INIT_WORK(&priv->merge_networks, |
10170 | (void (*)(void *))ipw_merge_adhoc_network, priv); | 10639 | (void (*)(void *))ipw_merge_adhoc_network, priv); |
10171 | 10640 | ||
10172 | #ifdef CONFIG_IPW_QOS | 10641 | #ifdef CONFIG_IPW2200_QOS |
10173 | INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate, | 10642 | INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate, |
10174 | priv); | 10643 | priv); |
10175 | #endif /* CONFIG_IPW_QOS */ | 10644 | #endif /* CONFIG_IPW2200_QOS */ |
10176 | 10645 | ||
10177 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | 10646 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) |
10178 | ipw_irq_tasklet, (unsigned long)priv); | 10647 | ipw_irq_tasklet, (unsigned long)priv); |
@@ -10318,12 +10787,21 @@ static int ipw_config(struct ipw_priv *priv) | |||
10318 | |= CFG_BT_COEXISTENCE_OOB; | 10787 | |= CFG_BT_COEXISTENCE_OOB; |
10319 | } | 10788 | } |
10320 | 10789 | ||
10790 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
10791 | if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { | ||
10792 | priv->sys_config.accept_all_data_frames = 1; | ||
10793 | priv->sys_config.accept_non_directed_frames = 1; | ||
10794 | priv->sys_config.accept_all_mgmt_bcpr = 1; | ||
10795 | priv->sys_config.accept_all_mgmt_frames = 1; | ||
10796 | } | ||
10797 | #endif | ||
10798 | |||
10321 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) | 10799 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) |
10322 | priv->sys_config.answer_broadcast_ssid_probe = 1; | 10800 | priv->sys_config.answer_broadcast_ssid_probe = 1; |
10323 | else | 10801 | else |
10324 | priv->sys_config.answer_broadcast_ssid_probe = 0; | 10802 | priv->sys_config.answer_broadcast_ssid_probe = 0; |
10325 | 10803 | ||
10326 | if (ipw_send_system_config(priv, &priv->sys_config)) | 10804 | if (ipw_send_system_config(priv)) |
10327 | goto error; | 10805 | goto error; |
10328 | 10806 | ||
10329 | init_supported_rates(priv, &priv->rates); | 10807 | init_supported_rates(priv, &priv->rates); |
@@ -10335,10 +10813,10 @@ static int ipw_config(struct ipw_priv *priv) | |||
10335 | if (ipw_send_rts_threshold(priv, priv->rts_threshold)) | 10813 | if (ipw_send_rts_threshold(priv, priv->rts_threshold)) |
10336 | goto error; | 10814 | goto error; |
10337 | } | 10815 | } |
10338 | #ifdef CONFIG_IPW_QOS | 10816 | #ifdef CONFIG_IPW2200_QOS |
10339 | IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n"); | 10817 | IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n"); |
10340 | ipw_qos_activate(priv, NULL); | 10818 | ipw_qos_activate(priv, NULL); |
10341 | #endif /* CONFIG_IPW_QOS */ | 10819 | #endif /* CONFIG_IPW2200_QOS */ |
10342 | 10820 | ||
10343 | if (ipw_set_random_seed(priv)) | 10821 | if (ipw_set_random_seed(priv)) |
10344 | goto error; | 10822 | goto error; |
@@ -10639,6 +11117,7 @@ static int ipw_up(struct ipw_priv *priv) | |||
10639 | if (priv->cmdlog == NULL) { | 11117 | if (priv->cmdlog == NULL) { |
10640 | IPW_ERROR("Error allocating %d command log entries.\n", | 11118 | IPW_ERROR("Error allocating %d command log entries.\n", |
10641 | cmdlog); | 11119 | cmdlog); |
11120 | return -ENOMEM; | ||
10642 | } else { | 11121 | } else { |
10643 | memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog); | 11122 | memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog); |
10644 | priv->cmdlog_len = cmdlog; | 11123 | priv->cmdlog_len = cmdlog; |
@@ -10860,6 +11339,10 @@ static struct attribute *ipw_sysfs_entries[] = { | |||
10860 | &dev_attr_led.attr, | 11339 | &dev_attr_led.attr, |
10861 | &dev_attr_speed_scan.attr, | 11340 | &dev_attr_speed_scan.attr, |
10862 | &dev_attr_net_stats.attr, | 11341 | &dev_attr_net_stats.attr, |
11342 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
11343 | &dev_attr_rtap_iface.attr, | ||
11344 | &dev_attr_rtap_filter.attr, | ||
11345 | #endif | ||
10863 | NULL | 11346 | NULL |
10864 | }; | 11347 | }; |
10865 | 11348 | ||
@@ -10868,6 +11351,109 @@ static struct attribute_group ipw_attribute_group = { | |||
10868 | .attrs = ipw_sysfs_entries, | 11351 | .attrs = ipw_sysfs_entries, |
10869 | }; | 11352 | }; |
10870 | 11353 | ||
11354 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
11355 | static int ipw_prom_open(struct net_device *dev) | ||
11356 | { | ||
11357 | struct ipw_prom_priv *prom_priv = ieee80211_priv(dev); | ||
11358 | struct ipw_priv *priv = prom_priv->priv; | ||
11359 | |||
11360 | IPW_DEBUG_INFO("prom dev->open\n"); | ||
11361 | netif_carrier_off(dev); | ||
11362 | netif_stop_queue(dev); | ||
11363 | |||
11364 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { | ||
11365 | priv->sys_config.accept_all_data_frames = 1; | ||
11366 | priv->sys_config.accept_non_directed_frames = 1; | ||
11367 | priv->sys_config.accept_all_mgmt_bcpr = 1; | ||
11368 | priv->sys_config.accept_all_mgmt_frames = 1; | ||
11369 | |||
11370 | ipw_send_system_config(priv); | ||
11371 | } | ||
11372 | |||
11373 | return 0; | ||
11374 | } | ||
11375 | |||
11376 | static int ipw_prom_stop(struct net_device *dev) | ||
11377 | { | ||
11378 | struct ipw_prom_priv *prom_priv = ieee80211_priv(dev); | ||
11379 | struct ipw_priv *priv = prom_priv->priv; | ||
11380 | |||
11381 | IPW_DEBUG_INFO("prom dev->stop\n"); | ||
11382 | |||
11383 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { | ||
11384 | priv->sys_config.accept_all_data_frames = 0; | ||
11385 | priv->sys_config.accept_non_directed_frames = 0; | ||
11386 | priv->sys_config.accept_all_mgmt_bcpr = 0; | ||
11387 | priv->sys_config.accept_all_mgmt_frames = 0; | ||
11388 | |||
11389 | ipw_send_system_config(priv); | ||
11390 | } | ||
11391 | |||
11392 | return 0; | ||
11393 | } | ||
11394 | |||
11395 | static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
11396 | { | ||
11397 | IPW_DEBUG_INFO("prom dev->xmit\n"); | ||
11398 | netif_stop_queue(dev); | ||
11399 | return -EOPNOTSUPP; | ||
11400 | } | ||
11401 | |||
11402 | static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev) | ||
11403 | { | ||
11404 | struct ipw_prom_priv *prom_priv = ieee80211_priv(dev); | ||
11405 | return &prom_priv->ieee->stats; | ||
11406 | } | ||
11407 | |||
11408 | static int ipw_prom_alloc(struct ipw_priv *priv) | ||
11409 | { | ||
11410 | int rc = 0; | ||
11411 | |||
11412 | if (priv->prom_net_dev) | ||
11413 | return -EPERM; | ||
11414 | |||
11415 | priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv)); | ||
11416 | if (priv->prom_net_dev == NULL) | ||
11417 | return -ENOMEM; | ||
11418 | |||
11419 | priv->prom_priv = ieee80211_priv(priv->prom_net_dev); | ||
11420 | priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev); | ||
11421 | priv->prom_priv->priv = priv; | ||
11422 | |||
11423 | strcpy(priv->prom_net_dev->name, "rtap%d"); | ||
11424 | |||
11425 | priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP; | ||
11426 | priv->prom_net_dev->open = ipw_prom_open; | ||
11427 | priv->prom_net_dev->stop = ipw_prom_stop; | ||
11428 | priv->prom_net_dev->get_stats = ipw_prom_get_stats; | ||
11429 | priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit; | ||
11430 | |||
11431 | priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; | ||
11432 | |||
11433 | rc = register_netdev(priv->prom_net_dev); | ||
11434 | if (rc) { | ||
11435 | free_ieee80211(priv->prom_net_dev); | ||
11436 | priv->prom_net_dev = NULL; | ||
11437 | return rc; | ||
11438 | } | ||
11439 | |||
11440 | return 0; | ||
11441 | } | ||
11442 | |||
11443 | static void ipw_prom_free(struct ipw_priv *priv) | ||
11444 | { | ||
11445 | if (!priv->prom_net_dev) | ||
11446 | return; | ||
11447 | |||
11448 | unregister_netdev(priv->prom_net_dev); | ||
11449 | free_ieee80211(priv->prom_net_dev); | ||
11450 | |||
11451 | priv->prom_net_dev = NULL; | ||
11452 | } | ||
11453 | |||
11454 | #endif | ||
11455 | |||
11456 | |||
10871 | static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 11457 | static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
10872 | { | 11458 | { |
10873 | int err = 0; | 11459 | int err = 0; |
@@ -10959,11 +11545,12 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
10959 | priv->ieee->set_security = shim__set_security; | 11545 | priv->ieee->set_security = shim__set_security; |
10960 | priv->ieee->is_queue_full = ipw_net_is_queue_full; | 11546 | priv->ieee->is_queue_full = ipw_net_is_queue_full; |
10961 | 11547 | ||
10962 | #ifdef CONFIG_IPW_QOS | 11548 | #ifdef CONFIG_IPW2200_QOS |
11549 | priv->ieee->is_qos_active = ipw_is_qos_active; | ||
10963 | priv->ieee->handle_probe_response = ipw_handle_beacon; | 11550 | priv->ieee->handle_probe_response = ipw_handle_beacon; |
10964 | priv->ieee->handle_beacon = ipw_handle_probe_response; | 11551 | priv->ieee->handle_beacon = ipw_handle_probe_response; |
10965 | priv->ieee->handle_assoc_response = ipw_handle_assoc_response; | 11552 | priv->ieee->handle_assoc_response = ipw_handle_assoc_response; |
10966 | #endif /* CONFIG_IPW_QOS */ | 11553 | #endif /* CONFIG_IPW2200_QOS */ |
10967 | 11554 | ||
10968 | priv->ieee->perfect_rssi = -20; | 11555 | priv->ieee->perfect_rssi = -20; |
10969 | priv->ieee->worst_rssi = -85; | 11556 | priv->ieee->worst_rssi = -85; |
@@ -10997,6 +11584,18 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
10997 | goto out_remove_sysfs; | 11584 | goto out_remove_sysfs; |
10998 | } | 11585 | } |
10999 | 11586 | ||
11587 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
11588 | if (rtap_iface) { | ||
11589 | err = ipw_prom_alloc(priv); | ||
11590 | if (err) { | ||
11591 | IPW_ERROR("Failed to register promiscuous network " | ||
11592 | "device (error %d).\n", err); | ||
11593 | unregister_netdev(priv->net_dev); | ||
11594 | goto out_remove_sysfs; | ||
11595 | } | ||
11596 | } | ||
11597 | #endif | ||
11598 | |||
11000 | printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg " | 11599 | printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg " |
11001 | "channels, %d 802.11a channels)\n", | 11600 | "channels, %d 802.11a channels)\n", |
11002 | priv->ieee->geo.name, priv->ieee->geo.bg_channels, | 11601 | priv->ieee->geo.name, priv->ieee->geo.bg_channels, |
@@ -11076,6 +11675,10 @@ static void ipw_pci_remove(struct pci_dev *pdev) | |||
11076 | priv->error = NULL; | 11675 | priv->error = NULL; |
11077 | } | 11676 | } |
11078 | 11677 | ||
11678 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
11679 | ipw_prom_free(priv); | ||
11680 | #endif | ||
11681 | |||
11079 | free_irq(pdev->irq, priv); | 11682 | free_irq(pdev->irq, priv); |
11080 | iounmap(priv->hw_base); | 11683 | iounmap(priv->hw_base); |
11081 | pci_release_regions(pdev); | 11684 | pci_release_regions(pdev); |
@@ -11200,7 +11803,12 @@ MODULE_PARM_DESC(debug, "debug output mask"); | |||
11200 | module_param(channel, int, 0444); | 11803 | module_param(channel, int, 0444); |
11201 | MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); | 11804 | MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); |
11202 | 11805 | ||
11203 | #ifdef CONFIG_IPW_QOS | 11806 | #ifdef CONFIG_IPW2200_PROMISCUOUS |
11807 | module_param(rtap_iface, int, 0444); | ||
11808 | MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)"); | ||
11809 | #endif | ||
11810 | |||
11811 | #ifdef CONFIG_IPW2200_QOS | ||
11204 | module_param(qos_enable, int, 0444); | 11812 | module_param(qos_enable, int, 0444); |
11205 | MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis"); | 11813 | MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis"); |
11206 | 11814 | ||
@@ -11215,7 +11823,7 @@ MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value"); | |||
11215 | 11823 | ||
11216 | module_param(burst_duration_OFDM, int, 0444); | 11824 | module_param(burst_duration_OFDM, int, 0444); |
11217 | MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value"); | 11825 | MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value"); |
11218 | #endif /* CONFIG_IPW_QOS */ | 11826 | #endif /* CONFIG_IPW2200_QOS */ |
11219 | 11827 | ||
11220 | #ifdef CONFIG_IPW2200_MONITOR | 11828 | #ifdef CONFIG_IPW2200_MONITOR |
11221 | module_param(mode, int, 0444); | 11829 | module_param(mode, int, 0444); |
@@ -11238,5 +11846,8 @@ MODULE_PARM_DESC(cmdlog, | |||
11238 | module_param(roaming, int, 0444); | 11846 | module_param(roaming, int, 0444); |
11239 | MODULE_PARM_DESC(roaming, "enable roaming support (default on)"); | 11847 | MODULE_PARM_DESC(roaming, "enable roaming support (default on)"); |
11240 | 11848 | ||
11849 | module_param(antenna, int, 0444); | ||
11850 | MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)"); | ||
11851 | |||
11241 | module_exit(ipw_exit); | 11852 | module_exit(ipw_exit); |
11242 | module_init(ipw_init); | 11853 | module_init(ipw_init); |
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index 4b9804900702..6044c0be2c80 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h | |||
@@ -789,7 +789,7 @@ struct ipw_sys_config { | |||
789 | u8 bt_coexist_collision_thr; | 789 | u8 bt_coexist_collision_thr; |
790 | u8 silence_threshold; | 790 | u8 silence_threshold; |
791 | u8 accept_all_mgmt_bcpr; | 791 | u8 accept_all_mgmt_bcpr; |
792 | u8 accept_all_mgtm_frames; | 792 | u8 accept_all_mgmt_frames; |
793 | u8 pass_noise_stats_to_host; | 793 | u8 pass_noise_stats_to_host; |
794 | u8 reserved3; | 794 | u8 reserved3; |
795 | } __attribute__ ((packed)); | 795 | } __attribute__ ((packed)); |
@@ -1122,6 +1122,52 @@ struct ipw_fw_error { | |||
1122 | u8 payload[0]; | 1122 | u8 payload[0]; |
1123 | } __attribute__ ((packed)); | 1123 | } __attribute__ ((packed)); |
1124 | 1124 | ||
1125 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
1126 | |||
1127 | enum ipw_prom_filter { | ||
1128 | IPW_PROM_CTL_HEADER_ONLY = (1 << 0), | ||
1129 | IPW_PROM_MGMT_HEADER_ONLY = (1 << 1), | ||
1130 | IPW_PROM_DATA_HEADER_ONLY = (1 << 2), | ||
1131 | IPW_PROM_ALL_HEADER_ONLY = 0xf, /* bits 0..3 */ | ||
1132 | IPW_PROM_NO_TX = (1 << 4), | ||
1133 | IPW_PROM_NO_RX = (1 << 5), | ||
1134 | IPW_PROM_NO_CTL = (1 << 6), | ||
1135 | IPW_PROM_NO_MGMT = (1 << 7), | ||
1136 | IPW_PROM_NO_DATA = (1 << 8), | ||
1137 | }; | ||
1138 | |||
1139 | struct ipw_priv; | ||
1140 | struct ipw_prom_priv { | ||
1141 | struct ipw_priv *priv; | ||
1142 | struct ieee80211_device *ieee; | ||
1143 | enum ipw_prom_filter filter; | ||
1144 | int tx_packets; | ||
1145 | int rx_packets; | ||
1146 | }; | ||
1147 | #endif | ||
1148 | |||
1149 | #if defined(CONFIG_IPW2200_RADIOTAP) || defined(CONFIG_IPW2200_PROMISCUOUS) | ||
1150 | /* Magic struct that slots into the radiotap header -- no reason | ||
1151 | * to build this manually element by element, we can write it much | ||
1152 | * more efficiently than we can parse it. ORDER MATTERS HERE | ||
1153 | * | ||
1154 | * When sent to us via the simulated Rx interface in sysfs, the entire | ||
1155 | * structure is provided regardless of any bits unset. | ||
1156 | */ | ||
1157 | struct ipw_rt_hdr { | ||
1158 | struct ieee80211_radiotap_header rt_hdr; | ||
1159 | u64 rt_tsf; /* TSF */ | ||
1160 | u8 rt_flags; /* radiotap packet flags */ | ||
1161 | u8 rt_rate; /* rate in 500kb/s */ | ||
1162 | u16 rt_channel; /* channel in mhz */ | ||
1163 | u16 rt_chbitmask; /* channel bitfield */ | ||
1164 | s8 rt_dbmsignal; /* signal in dbM, kluged to signed */ | ||
1165 | s8 rt_dbmnoise; | ||
1166 | u8 rt_antenna; /* antenna number */ | ||
1167 | u8 payload[0]; /* payload... */ | ||
1168 | } __attribute__ ((packed)); | ||
1169 | #endif | ||
1170 | |||
1125 | struct ipw_priv { | 1171 | struct ipw_priv { |
1126 | /* ieee device used by generic ieee processing code */ | 1172 | /* ieee device used by generic ieee processing code */ |
1127 | struct ieee80211_device *ieee; | 1173 | struct ieee80211_device *ieee; |
@@ -1133,6 +1179,12 @@ struct ipw_priv { | |||
1133 | struct pci_dev *pci_dev; | 1179 | struct pci_dev *pci_dev; |
1134 | struct net_device *net_dev; | 1180 | struct net_device *net_dev; |
1135 | 1181 | ||
1182 | #ifdef CONFIG_IPW2200_PROMISCUOUS | ||
1183 | /* Promiscuous mode */ | ||
1184 | struct ipw_prom_priv *prom_priv; | ||
1185 | struct net_device *prom_net_dev; | ||
1186 | #endif | ||
1187 | |||
1136 | /* pci hardware address support */ | 1188 | /* pci hardware address support */ |
1137 | void __iomem *hw_base; | 1189 | void __iomem *hw_base; |
1138 | unsigned long hw_len; | 1190 | unsigned long hw_len; |
@@ -1153,11 +1205,9 @@ struct ipw_priv { | |||
1153 | u32 config; | 1205 | u32 config; |
1154 | u32 capability; | 1206 | u32 capability; |
1155 | 1207 | ||
1156 | u8 last_rx_rssi; | ||
1157 | u8 last_noise; | ||
1158 | struct average average_missed_beacons; | 1208 | struct average average_missed_beacons; |
1159 | struct average average_rssi; | 1209 | s16 exp_avg_rssi; |
1160 | struct average average_noise; | 1210 | s16 exp_avg_noise; |
1161 | u32 port_type; | 1211 | u32 port_type; |
1162 | int rx_bufs_min; /**< minimum number of bufs in Rx queue */ | 1212 | int rx_bufs_min; /**< minimum number of bufs in Rx queue */ |
1163 | int rx_pend_max; /**< maximum pending buffers for one IRQ */ | 1213 | int rx_pend_max; /**< maximum pending buffers for one IRQ */ |
@@ -1308,6 +1358,29 @@ struct ipw_priv { | |||
1308 | 1358 | ||
1309 | /* debug macros */ | 1359 | /* debug macros */ |
1310 | 1360 | ||
1361 | /* Debug and printf string expansion helpers for printing bitfields */ | ||
1362 | #define BIT_FMT8 "%c%c%c%c-%c%c%c%c" | ||
1363 | #define BIT_FMT16 BIT_FMT8 ":" BIT_FMT8 | ||
1364 | #define BIT_FMT32 BIT_FMT16 " " BIT_FMT16 | ||
1365 | |||
1366 | #define BITC(x,y) (((x>>y)&1)?'1':'0') | ||
1367 | #define BIT_ARG8(x) \ | ||
1368 | BITC(x,7),BITC(x,6),BITC(x,5),BITC(x,4),\ | ||
1369 | BITC(x,3),BITC(x,2),BITC(x,1),BITC(x,0) | ||
1370 | |||
1371 | #define BIT_ARG16(x) \ | ||
1372 | BITC(x,15),BITC(x,14),BITC(x,13),BITC(x,12),\ | ||
1373 | BITC(x,11),BITC(x,10),BITC(x,9),BITC(x,8),\ | ||
1374 | BIT_ARG8(x) | ||
1375 | |||
1376 | #define BIT_ARG32(x) \ | ||
1377 | BITC(x,31),BITC(x,30),BITC(x,29),BITC(x,28),\ | ||
1378 | BITC(x,27),BITC(x,26),BITC(x,25),BITC(x,24),\ | ||
1379 | BITC(x,23),BITC(x,22),BITC(x,21),BITC(x,20),\ | ||
1380 | BITC(x,19),BITC(x,18),BITC(x,17),BITC(x,16),\ | ||
1381 | BIT_ARG16(x) | ||
1382 | |||
1383 | |||
1311 | #ifdef CONFIG_IPW2200_DEBUG | 1384 | #ifdef CONFIG_IPW2200_DEBUG |
1312 | #define IPW_DEBUG(level, fmt, args...) \ | 1385 | #define IPW_DEBUG(level, fmt, args...) \ |
1313 | do { if (ipw_debug_level & (level)) \ | 1386 | do { if (ipw_debug_level & (level)) \ |
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index c2d0b09e0418..b563decf599e 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c | |||
@@ -201,41 +201,12 @@ static struct { | |||
201 | /* Data types */ | 201 | /* Data types */ |
202 | /********************************************************************/ | 202 | /********************************************************************/ |
203 | 203 | ||
204 | /* Used in Event handling. | 204 | /* Beginning of the Tx descriptor, used in TxExc handling */ |
205 | * We avoid nested structures as they break on ARM -- Moustafa */ | 205 | struct hermes_txexc_data { |
206 | struct hermes_tx_descriptor_802_11 { | 206 | struct hermes_tx_descriptor desc; |
207 | /* hermes_tx_descriptor */ | ||
208 | __le16 status; | ||
209 | __le16 reserved1; | ||
210 | __le16 reserved2; | ||
211 | __le32 sw_support; | ||
212 | u8 retry_count; | ||
213 | u8 tx_rate; | ||
214 | __le16 tx_control; | ||
215 | |||
216 | /* ieee80211_hdr */ | ||
217 | __le16 frame_ctl; | 207 | __le16 frame_ctl; |
218 | __le16 duration_id; | 208 | __le16 duration_id; |
219 | u8 addr1[ETH_ALEN]; | 209 | u8 addr1[ETH_ALEN]; |
220 | u8 addr2[ETH_ALEN]; | ||
221 | u8 addr3[ETH_ALEN]; | ||
222 | __le16 seq_ctl; | ||
223 | u8 addr4[ETH_ALEN]; | ||
224 | |||
225 | __le16 data_len; | ||
226 | |||
227 | /* ethhdr */ | ||
228 | u8 h_dest[ETH_ALEN]; /* destination eth addr */ | ||
229 | u8 h_source[ETH_ALEN]; /* source ether addr */ | ||
230 | __be16 h_proto; /* packet type ID field */ | ||
231 | |||
232 | /* p8022_hdr */ | ||
233 | u8 dsap; | ||
234 | u8 ssap; | ||
235 | u8 ctrl; | ||
236 | u8 oui[3]; | ||
237 | |||
238 | __be16 ethertype; | ||
239 | } __attribute__ ((packed)); | 210 | } __attribute__ ((packed)); |
240 | 211 | ||
241 | /* Rx frame header except compatibility 802.3 header */ | 212 | /* Rx frame header except compatibility 802.3 header */ |
@@ -450,53 +421,39 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev) | |||
450 | hermes_t *hw = &priv->hw; | 421 | hermes_t *hw = &priv->hw; |
451 | int err = 0; | 422 | int err = 0; |
452 | u16 txfid = priv->txfid; | 423 | u16 txfid = priv->txfid; |
453 | char *p; | ||
454 | struct ethhdr *eh; | 424 | struct ethhdr *eh; |
455 | int len, data_len, data_off; | 425 | int data_off; |
456 | struct hermes_tx_descriptor desc; | 426 | struct hermes_tx_descriptor desc; |
457 | unsigned long flags; | 427 | unsigned long flags; |
458 | 428 | ||
459 | TRACE_ENTER(dev->name); | ||
460 | |||
461 | if (! netif_running(dev)) { | 429 | if (! netif_running(dev)) { |
462 | printk(KERN_ERR "%s: Tx on stopped device!\n", | 430 | printk(KERN_ERR "%s: Tx on stopped device!\n", |
463 | dev->name); | 431 | dev->name); |
464 | TRACE_EXIT(dev->name); | 432 | return NETDEV_TX_BUSY; |
465 | return 1; | ||
466 | } | 433 | } |
467 | 434 | ||
468 | if (netif_queue_stopped(dev)) { | 435 | if (netif_queue_stopped(dev)) { |
469 | printk(KERN_DEBUG "%s: Tx while transmitter busy!\n", | 436 | printk(KERN_DEBUG "%s: Tx while transmitter busy!\n", |
470 | dev->name); | 437 | dev->name); |
471 | TRACE_EXIT(dev->name); | 438 | return NETDEV_TX_BUSY; |
472 | return 1; | ||
473 | } | 439 | } |
474 | 440 | ||
475 | if (orinoco_lock(priv, &flags) != 0) { | 441 | if (orinoco_lock(priv, &flags) != 0) { |
476 | printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n", | 442 | printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n", |
477 | dev->name); | 443 | dev->name); |
478 | TRACE_EXIT(dev->name); | 444 | return NETDEV_TX_BUSY; |
479 | return 1; | ||
480 | } | 445 | } |
481 | 446 | ||
482 | if (! netif_carrier_ok(dev) || (priv->iw_mode == IW_MODE_MONITOR)) { | 447 | if (! netif_carrier_ok(dev) || (priv->iw_mode == IW_MODE_MONITOR)) { |
483 | /* Oops, the firmware hasn't established a connection, | 448 | /* Oops, the firmware hasn't established a connection, |
484 | silently drop the packet (this seems to be the | 449 | silently drop the packet (this seems to be the |
485 | safest approach). */ | 450 | safest approach). */ |
486 | stats->tx_errors++; | 451 | goto drop; |
487 | orinoco_unlock(priv, &flags); | ||
488 | dev_kfree_skb(skb); | ||
489 | TRACE_EXIT(dev->name); | ||
490 | return 0; | ||
491 | } | 452 | } |
492 | 453 | ||
493 | /* Length of the packet body */ | 454 | /* Check packet length */ |
494 | /* FIXME: what if the skb is smaller than this? */ | 455 | if (skb->len < ETH_HLEN) |
495 | len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN); | 456 | goto drop; |
496 | skb = skb_padto(skb, len); | ||
497 | if (skb == NULL) | ||
498 | goto fail; | ||
499 | len -= ETH_HLEN; | ||
500 | 457 | ||
501 | eh = (struct ethhdr *)skb->data; | 458 | eh = (struct ethhdr *)skb->data; |
502 | 459 | ||
@@ -507,8 +464,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev) | |||
507 | if (net_ratelimit()) | 464 | if (net_ratelimit()) |
508 | printk(KERN_ERR "%s: Error %d writing Tx descriptor " | 465 | printk(KERN_ERR "%s: Error %d writing Tx descriptor " |
509 | "to BAP\n", dev->name, err); | 466 | "to BAP\n", dev->name, err); |
510 | stats->tx_errors++; | 467 | goto busy; |
511 | goto fail; | ||
512 | } | 468 | } |
513 | 469 | ||
514 | /* Clear the 802.11 header and data length fields - some | 470 | /* Clear the 802.11 header and data length fields - some |
@@ -519,50 +475,38 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev) | |||
519 | 475 | ||
520 | /* Encapsulate Ethernet-II frames */ | 476 | /* Encapsulate Ethernet-II frames */ |
521 | if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */ | 477 | if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */ |
522 | struct header_struct hdr; | 478 | struct header_struct { |
523 | data_len = len; | 479 | struct ethhdr eth; /* 802.3 header */ |
524 | data_off = HERMES_802_3_OFFSET + sizeof(hdr); | 480 | u8 encap[6]; /* 802.2 header */ |
525 | p = skb->data + ETH_HLEN; | 481 | } __attribute__ ((packed)) hdr; |
526 | 482 | ||
527 | /* 802.3 header */ | 483 | /* Strip destination and source from the data */ |
528 | memcpy(hdr.dest, eh->h_dest, ETH_ALEN); | 484 | skb_pull(skb, 2 * ETH_ALEN); |
529 | memcpy(hdr.src, eh->h_source, ETH_ALEN); | 485 | data_off = HERMES_802_2_OFFSET + sizeof(encaps_hdr); |
530 | hdr.len = htons(data_len + ENCAPS_OVERHEAD); | 486 | |
531 | 487 | /* And move them to a separate header */ | |
532 | /* 802.2 header */ | 488 | memcpy(&hdr.eth, eh, 2 * ETH_ALEN); |
533 | memcpy(&hdr.dsap, &encaps_hdr, sizeof(encaps_hdr)); | 489 | hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len); |
534 | 490 | memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr)); | |
535 | hdr.ethertype = eh->h_proto; | 491 | |
536 | err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr), | 492 | err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr), |
537 | txfid, HERMES_802_3_OFFSET); | 493 | txfid, HERMES_802_3_OFFSET); |
538 | if (err) { | 494 | if (err) { |
539 | if (net_ratelimit()) | 495 | if (net_ratelimit()) |
540 | printk(KERN_ERR "%s: Error %d writing packet " | 496 | printk(KERN_ERR "%s: Error %d writing packet " |
541 | "header to BAP\n", dev->name, err); | 497 | "header to BAP\n", dev->name, err); |
542 | stats->tx_errors++; | 498 | goto busy; |
543 | goto fail; | ||
544 | } | 499 | } |
545 | /* Actual xfer length - allow for padding */ | ||
546 | len = ALIGN(data_len, 2); | ||
547 | if (len < ETH_ZLEN - ETH_HLEN) | ||
548 | len = ETH_ZLEN - ETH_HLEN; | ||
549 | } else { /* IEEE 802.3 frame */ | 500 | } else { /* IEEE 802.3 frame */ |
550 | data_len = len + ETH_HLEN; | ||
551 | data_off = HERMES_802_3_OFFSET; | 501 | data_off = HERMES_802_3_OFFSET; |
552 | p = skb->data; | ||
553 | /* Actual xfer length - round up for odd length packets */ | ||
554 | len = ALIGN(data_len, 2); | ||
555 | if (len < ETH_ZLEN) | ||
556 | len = ETH_ZLEN; | ||
557 | } | 502 | } |
558 | 503 | ||
559 | err = hermes_bap_pwrite_pad(hw, USER_BAP, p, data_len, len, | 504 | err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len, |
560 | txfid, data_off); | 505 | txfid, data_off); |
561 | if (err) { | 506 | if (err) { |
562 | printk(KERN_ERR "%s: Error %d writing packet to BAP\n", | 507 | printk(KERN_ERR "%s: Error %d writing packet to BAP\n", |
563 | dev->name, err); | 508 | dev->name, err); |
564 | stats->tx_errors++; | 509 | goto busy; |
565 | goto fail; | ||
566 | } | 510 | } |
567 | 511 | ||
568 | /* Finally, we actually initiate the send */ | 512 | /* Finally, we actually initiate the send */ |
@@ -575,25 +519,27 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev) | |||
575 | if (net_ratelimit()) | 519 | if (net_ratelimit()) |
576 | printk(KERN_ERR "%s: Error %d transmitting packet\n", | 520 | printk(KERN_ERR "%s: Error %d transmitting packet\n", |
577 | dev->name, err); | 521 | dev->name, err); |
578 | stats->tx_errors++; | 522 | goto busy; |
579 | goto fail; | ||
580 | } | 523 | } |
581 | 524 | ||
582 | dev->trans_start = jiffies; | 525 | dev->trans_start = jiffies; |
583 | stats->tx_bytes += data_off + data_len; | 526 | stats->tx_bytes += data_off + skb->len; |
527 | goto ok; | ||
584 | 528 | ||
585 | orinoco_unlock(priv, &flags); | 529 | drop: |
530 | stats->tx_errors++; | ||
531 | stats->tx_dropped++; | ||
586 | 532 | ||
533 | ok: | ||
534 | orinoco_unlock(priv, &flags); | ||
587 | dev_kfree_skb(skb); | 535 | dev_kfree_skb(skb); |
536 | return NETDEV_TX_OK; | ||
588 | 537 | ||
589 | TRACE_EXIT(dev->name); | 538 | busy: |
590 | 539 | if (err == -EIO) | |
591 | return 0; | 540 | schedule_work(&priv->reset_work); |
592 | fail: | ||
593 | TRACE_EXIT(dev->name); | ||
594 | |||
595 | orinoco_unlock(priv, &flags); | 541 | orinoco_unlock(priv, &flags); |
596 | return err; | 542 | return NETDEV_TX_BUSY; |
597 | } | 543 | } |
598 | 544 | ||
599 | static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw) | 545 | static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw) |
@@ -629,7 +575,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw) | |||
629 | struct net_device_stats *stats = &priv->stats; | 575 | struct net_device_stats *stats = &priv->stats; |
630 | u16 fid = hermes_read_regn(hw, TXCOMPLFID); | 576 | u16 fid = hermes_read_regn(hw, TXCOMPLFID); |
631 | u16 status; | 577 | u16 status; |
632 | struct hermes_tx_descriptor_802_11 hdr; | 578 | struct hermes_txexc_data hdr; |
633 | int err = 0; | 579 | int err = 0; |
634 | 580 | ||
635 | if (fid == DUMMY_FID) | 581 | if (fid == DUMMY_FID) |
@@ -637,8 +583,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw) | |||
637 | 583 | ||
638 | /* Read part of the frame header - we need status and addr1 */ | 584 | /* Read part of the frame header - we need status and addr1 */ |
639 | err = hermes_bap_pread(hw, IRQ_BAP, &hdr, | 585 | err = hermes_bap_pread(hw, IRQ_BAP, &hdr, |
640 | offsetof(struct hermes_tx_descriptor_802_11, | 586 | sizeof(struct hermes_txexc_data), |
641 | addr2), | ||
642 | fid, 0); | 587 | fid, 0); |
643 | 588 | ||
644 | hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID); | 589 | hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID); |
@@ -658,7 +603,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw) | |||
658 | * exceeded, because that's the only status that really mean | 603 | * exceeded, because that's the only status that really mean |
659 | * that this particular node went away. | 604 | * that this particular node went away. |
660 | * Other errors means that *we* screwed up. - Jean II */ | 605 | * Other errors means that *we* screwed up. - Jean II */ |
661 | status = le16_to_cpu(hdr.status); | 606 | status = le16_to_cpu(hdr.desc.status); |
662 | if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) { | 607 | if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) { |
663 | union iwreq_data wrqu; | 608 | union iwreq_data wrqu; |
664 | 609 | ||
@@ -1398,16 +1343,12 @@ int __orinoco_down(struct net_device *dev) | |||
1398 | return 0; | 1343 | return 0; |
1399 | } | 1344 | } |
1400 | 1345 | ||
1401 | int orinoco_reinit_firmware(struct net_device *dev) | 1346 | static int orinoco_allocate_fid(struct net_device *dev) |
1402 | { | 1347 | { |
1403 | struct orinoco_private *priv = netdev_priv(dev); | 1348 | struct orinoco_private *priv = netdev_priv(dev); |
1404 | struct hermes *hw = &priv->hw; | 1349 | struct hermes *hw = &priv->hw; |
1405 | int err; | 1350 | int err; |
1406 | 1351 | ||
1407 | err = hermes_init(hw); | ||
1408 | if (err) | ||
1409 | return err; | ||
1410 | |||
1411 | err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid); | 1352 | err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid); |
1412 | if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) { | 1353 | if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) { |
1413 | /* Try workaround for old Symbol firmware bug */ | 1354 | /* Try workaround for old Symbol firmware bug */ |
@@ -1426,6 +1367,19 @@ int orinoco_reinit_firmware(struct net_device *dev) | |||
1426 | return err; | 1367 | return err; |
1427 | } | 1368 | } |
1428 | 1369 | ||
1370 | int orinoco_reinit_firmware(struct net_device *dev) | ||
1371 | { | ||
1372 | struct orinoco_private *priv = netdev_priv(dev); | ||
1373 | struct hermes *hw = &priv->hw; | ||
1374 | int err; | ||
1375 | |||
1376 | err = hermes_init(hw); | ||
1377 | if (!err) | ||
1378 | err = orinoco_allocate_fid(dev); | ||
1379 | |||
1380 | return err; | ||
1381 | } | ||
1382 | |||
1429 | static int __orinoco_hw_set_bitrate(struct orinoco_private *priv) | 1383 | static int __orinoco_hw_set_bitrate(struct orinoco_private *priv) |
1430 | { | 1384 | { |
1431 | hermes_t *hw = &priv->hw; | 1385 | hermes_t *hw = &priv->hw; |
@@ -2272,14 +2226,12 @@ static int orinoco_init(struct net_device *dev) | |||
2272 | u16 reclen; | 2226 | u16 reclen; |
2273 | int len; | 2227 | int len; |
2274 | 2228 | ||
2275 | TRACE_ENTER(dev->name); | ||
2276 | |||
2277 | /* No need to lock, the hw_unavailable flag is already set in | 2229 | /* No need to lock, the hw_unavailable flag is already set in |
2278 | * alloc_orinocodev() */ | 2230 | * alloc_orinocodev() */ |
2279 | priv->nicbuf_size = IEEE80211_FRAME_LEN + ETH_HLEN; | 2231 | priv->nicbuf_size = IEEE80211_FRAME_LEN + ETH_HLEN; |
2280 | 2232 | ||
2281 | /* Initialize the firmware */ | 2233 | /* Initialize the firmware */ |
2282 | err = orinoco_reinit_firmware(dev); | 2234 | err = hermes_init(hw); |
2283 | if (err != 0) { | 2235 | if (err != 0) { |
2284 | printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n", | 2236 | printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n", |
2285 | dev->name, err); | 2237 | dev->name, err); |
@@ -2337,6 +2289,13 @@ static int orinoco_init(struct net_device *dev) | |||
2337 | 2289 | ||
2338 | printk(KERN_DEBUG "%s: Station name \"%s\"\n", dev->name, priv->nick); | 2290 | printk(KERN_DEBUG "%s: Station name \"%s\"\n", dev->name, priv->nick); |
2339 | 2291 | ||
2292 | err = orinoco_allocate_fid(dev); | ||
2293 | if (err) { | ||
2294 | printk(KERN_ERR "%s: failed to allocate NIC buffer!\n", | ||
2295 | dev->name); | ||
2296 | goto out; | ||
2297 | } | ||
2298 | |||
2340 | /* Get allowed channels */ | 2299 | /* Get allowed channels */ |
2341 | err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST, | 2300 | err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST, |
2342 | &priv->channel_mask); | 2301 | &priv->channel_mask); |
@@ -2427,7 +2386,6 @@ static int orinoco_init(struct net_device *dev) | |||
2427 | printk(KERN_DEBUG "%s: ready\n", dev->name); | 2386 | printk(KERN_DEBUG "%s: ready\n", dev->name); |
2428 | 2387 | ||
2429 | out: | 2388 | out: |
2430 | TRACE_EXIT(dev->name); | ||
2431 | return err; | 2389 | return err; |
2432 | } | 2390 | } |
2433 | 2391 | ||
@@ -2795,8 +2753,6 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev, | |||
2795 | int numrates; | 2753 | int numrates; |
2796 | int i, k; | 2754 | int i, k; |
2797 | 2755 | ||
2798 | TRACE_ENTER(dev->name); | ||
2799 | |||
2800 | rrq->length = sizeof(struct iw_range); | 2756 | rrq->length = sizeof(struct iw_range); |
2801 | memset(range, 0, sizeof(struct iw_range)); | 2757 | memset(range, 0, sizeof(struct iw_range)); |
2802 | 2758 | ||
@@ -2886,8 +2842,6 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev, | |||
2886 | IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); | 2842 | IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); |
2887 | IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP); | 2843 | IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP); |
2888 | 2844 | ||
2889 | TRACE_EXIT(dev->name); | ||
2890 | |||
2891 | return 0; | 2845 | return 0; |
2892 | } | 2846 | } |
2893 | 2847 | ||
@@ -3069,8 +3023,6 @@ static int orinoco_ioctl_getessid(struct net_device *dev, | |||
3069 | int err = 0; | 3023 | int err = 0; |
3070 | unsigned long flags; | 3024 | unsigned long flags; |
3071 | 3025 | ||
3072 | TRACE_ENTER(dev->name); | ||
3073 | |||
3074 | if (netif_running(dev)) { | 3026 | if (netif_running(dev)) { |
3075 | err = orinoco_hw_get_essid(priv, &active, essidbuf); | 3027 | err = orinoco_hw_get_essid(priv, &active, essidbuf); |
3076 | if (err) | 3028 | if (err) |
@@ -3085,8 +3037,6 @@ static int orinoco_ioctl_getessid(struct net_device *dev, | |||
3085 | erq->flags = 1; | 3037 | erq->flags = 1; |
3086 | erq->length = strlen(essidbuf) + 1; | 3038 | erq->length = strlen(essidbuf) + 1; |
3087 | 3039 | ||
3088 | TRACE_EXIT(dev->name); | ||
3089 | |||
3090 | return 0; | 3040 | return 0; |
3091 | } | 3041 | } |
3092 | 3042 | ||
@@ -4347,69 +4297,6 @@ static struct ethtool_ops orinoco_ethtool_ops = { | |||
4347 | }; | 4297 | }; |
4348 | 4298 | ||
4349 | /********************************************************************/ | 4299 | /********************************************************************/ |
4350 | /* Debugging */ | ||
4351 | /********************************************************************/ | ||
4352 | |||
4353 | #if 0 | ||
4354 | static void show_rx_frame(struct orinoco_rxframe_hdr *frame) | ||
4355 | { | ||
4356 | printk(KERN_DEBUG "RX descriptor:\n"); | ||
4357 | printk(KERN_DEBUG " status = 0x%04x\n", frame->desc.status); | ||
4358 | printk(KERN_DEBUG " time = 0x%08x\n", frame->desc.time); | ||
4359 | printk(KERN_DEBUG " silence = 0x%02x\n", frame->desc.silence); | ||
4360 | printk(KERN_DEBUG " signal = 0x%02x\n", frame->desc.signal); | ||
4361 | printk(KERN_DEBUG " rate = 0x%02x\n", frame->desc.rate); | ||
4362 | printk(KERN_DEBUG " rxflow = 0x%02x\n", frame->desc.rxflow); | ||
4363 | printk(KERN_DEBUG " reserved = 0x%08x\n", frame->desc.reserved); | ||
4364 | |||
4365 | printk(KERN_DEBUG "IEEE 802.11 header:\n"); | ||
4366 | printk(KERN_DEBUG " frame_ctl = 0x%04x\n", | ||
4367 | frame->p80211.frame_ctl); | ||
4368 | printk(KERN_DEBUG " duration_id = 0x%04x\n", | ||
4369 | frame->p80211.duration_id); | ||
4370 | printk(KERN_DEBUG " addr1 = %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
4371 | frame->p80211.addr1[0], frame->p80211.addr1[1], | ||
4372 | frame->p80211.addr1[2], frame->p80211.addr1[3], | ||
4373 | frame->p80211.addr1[4], frame->p80211.addr1[5]); | ||
4374 | printk(KERN_DEBUG " addr2 = %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
4375 | frame->p80211.addr2[0], frame->p80211.addr2[1], | ||
4376 | frame->p80211.addr2[2], frame->p80211.addr2[3], | ||
4377 | frame->p80211.addr2[4], frame->p80211.addr2[5]); | ||
4378 | printk(KERN_DEBUG " addr3 = %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
4379 | frame->p80211.addr3[0], frame->p80211.addr3[1], | ||
4380 | frame->p80211.addr3[2], frame->p80211.addr3[3], | ||
4381 | frame->p80211.addr3[4], frame->p80211.addr3[5]); | ||
4382 | printk(KERN_DEBUG " seq_ctl = 0x%04x\n", | ||
4383 | frame->p80211.seq_ctl); | ||
4384 | printk(KERN_DEBUG " addr4 = %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
4385 | frame->p80211.addr4[0], frame->p80211.addr4[1], | ||
4386 | frame->p80211.addr4[2], frame->p80211.addr4[3], | ||
4387 | frame->p80211.addr4[4], frame->p80211.addr4[5]); | ||
4388 | printk(KERN_DEBUG " data_len = 0x%04x\n", | ||
4389 | frame->p80211.data_len); | ||
4390 | |||
4391 | printk(KERN_DEBUG "IEEE 802.3 header:\n"); | ||
4392 | printk(KERN_DEBUG " dest = %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
4393 | frame->p8023.h_dest[0], frame->p8023.h_dest[1], | ||
4394 | frame->p8023.h_dest[2], frame->p8023.h_dest[3], | ||
4395 | frame->p8023.h_dest[4], frame->p8023.h_dest[5]); | ||
4396 | printk(KERN_DEBUG " src = %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
4397 | frame->p8023.h_source[0], frame->p8023.h_source[1], | ||
4398 | frame->p8023.h_source[2], frame->p8023.h_source[3], | ||
4399 | frame->p8023.h_source[4], frame->p8023.h_source[5]); | ||
4400 | printk(KERN_DEBUG " len = 0x%04x\n", frame->p8023.h_proto); | ||
4401 | |||
4402 | printk(KERN_DEBUG "IEEE 802.2 LLC/SNAP header:\n"); | ||
4403 | printk(KERN_DEBUG " DSAP = 0x%02x\n", frame->p8022.dsap); | ||
4404 | printk(KERN_DEBUG " SSAP = 0x%02x\n", frame->p8022.ssap); | ||
4405 | printk(KERN_DEBUG " ctrl = 0x%02x\n", frame->p8022.ctrl); | ||
4406 | printk(KERN_DEBUG " OUI = %02x:%02x:%02x\n", | ||
4407 | frame->p8022.oui[0], frame->p8022.oui[1], frame->p8022.oui[2]); | ||
4408 | printk(KERN_DEBUG " ethertype = 0x%04x\n", frame->ethertype); | ||
4409 | } | ||
4410 | #endif /* 0 */ | ||
4411 | |||
4412 | /********************************************************************/ | ||
4413 | /* Module initialization */ | 4300 | /* Module initialization */ |
4414 | /********************************************************************/ | 4301 | /********************************************************************/ |
4415 | 4302 | ||
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h index f5d856db92a1..16db3e14b7d2 100644 --- a/drivers/net/wireless/orinoco.h +++ b/drivers/net/wireless/orinoco.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #ifndef _ORINOCO_H | 7 | #ifndef _ORINOCO_H |
8 | #define _ORINOCO_H | 8 | #define _ORINOCO_H |
9 | 9 | ||
10 | #define DRIVER_VERSION "0.15rc3" | 10 | #define DRIVER_VERSION "0.15" |
11 | 11 | ||
12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
13 | #include <linux/wireless.h> | 13 | #include <linux/wireless.h> |
@@ -30,20 +30,6 @@ struct orinoco_key { | |||
30 | char data[ORINOCO_MAX_KEY_SIZE]; | 30 | char data[ORINOCO_MAX_KEY_SIZE]; |
31 | } __attribute__ ((packed)); | 31 | } __attribute__ ((packed)); |
32 | 32 | ||
33 | struct header_struct { | ||
34 | /* 802.3 */ | ||
35 | u8 dest[ETH_ALEN]; | ||
36 | u8 src[ETH_ALEN]; | ||
37 | __be16 len; | ||
38 | /* 802.2 */ | ||
39 | u8 dsap; | ||
40 | u8 ssap; | ||
41 | u8 ctrl; | ||
42 | /* SNAP */ | ||
43 | u8 oui[3]; | ||
44 | unsigned short ethertype; | ||
45 | } __attribute__ ((packed)); | ||
46 | |||
47 | typedef enum { | 33 | typedef enum { |
48 | FIRMWARE_TYPE_AGERE, | 34 | FIRMWARE_TYPE_AGERE, |
49 | FIRMWARE_TYPE_INTERSIL, | 35 | FIRMWARE_TYPE_INTERSIL, |
@@ -132,9 +118,6 @@ extern int orinoco_debug; | |||
132 | #define DEBUG(n, args...) do { } while (0) | 118 | #define DEBUG(n, args...) do { } while (0) |
133 | #endif /* ORINOCO_DEBUG */ | 119 | #endif /* ORINOCO_DEBUG */ |
134 | 120 | ||
135 | #define TRACE_ENTER(devname) DEBUG(2, "%s: -> %s()\n", devname, __FUNCTION__); | ||
136 | #define TRACE_EXIT(devname) DEBUG(2, "%s: <- %s()\n", devname, __FUNCTION__); | ||
137 | |||
138 | /********************************************************************/ | 121 | /********************************************************************/ |
139 | /* Exported prototypes */ | 122 | /* Exported prototypes */ |
140 | /********************************************************************/ | 123 | /********************************************************************/ |
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c index 434f7d7ad841..b2aec4d9fbb1 100644 --- a/drivers/net/wireless/orinoco_cs.c +++ b/drivers/net/wireless/orinoco_cs.c | |||
@@ -147,14 +147,11 @@ static void orinoco_cs_detach(struct pcmcia_device *link) | |||
147 | { | 147 | { |
148 | struct net_device *dev = link->priv; | 148 | struct net_device *dev = link->priv; |
149 | 149 | ||
150 | if (link->dev_node) | ||
151 | unregister_netdev(dev); | ||
152 | |||
150 | orinoco_cs_release(link); | 153 | orinoco_cs_release(link); |
151 | 154 | ||
152 | DEBUG(0, PFX "detach: link=%p link->dev_node=%p\n", link, link->dev_node); | ||
153 | if (link->dev_node) { | ||
154 | DEBUG(0, PFX "About to unregister net device %p\n", | ||
155 | dev); | ||
156 | unregister_netdev(dev); | ||
157 | } | ||
158 | free_orinocodev(dev); | 155 | free_orinocodev(dev); |
159 | } /* orinoco_cs_detach */ | 156 | } /* orinoco_cs_detach */ |
160 | 157 | ||
@@ -178,13 +175,10 @@ orinoco_cs_config(struct pcmcia_device *link) | |||
178 | int last_fn, last_ret; | 175 | int last_fn, last_ret; |
179 | u_char buf[64]; | 176 | u_char buf[64]; |
180 | config_info_t conf; | 177 | config_info_t conf; |
181 | cisinfo_t info; | ||
182 | tuple_t tuple; | 178 | tuple_t tuple; |
183 | cisparse_t parse; | 179 | cisparse_t parse; |
184 | void __iomem *mem; | 180 | void __iomem *mem; |
185 | 181 | ||
186 | CS_CHECK(ValidateCIS, pcmcia_validate_cis(link, &info)); | ||
187 | |||
188 | /* | 182 | /* |
189 | * This reads the card's CONFIG tuple to find its | 183 | * This reads the card's CONFIG tuple to find its |
190 | * configuration registers. | 184 | * configuration registers. |
@@ -234,12 +228,6 @@ orinoco_cs_config(struct pcmcia_device *link) | |||
234 | goto next_entry; | 228 | goto next_entry; |
235 | link->conf.ConfigIndex = cfg->index; | 229 | link->conf.ConfigIndex = cfg->index; |
236 | 230 | ||
237 | /* Does this card need audio output? */ | ||
238 | if (cfg->flags & CISTPL_CFTABLE_AUDIO) { | ||
239 | link->conf.Attributes |= CONF_ENABLE_SPKR; | ||
240 | link->conf.Status = CCSR_AUDIO_ENA; | ||
241 | } | ||
242 | |||
243 | /* Use power settings for Vcc and Vpp if present */ | 231 | /* Use power settings for Vcc and Vpp if present */ |
244 | /* Note that the CIS values need to be rescaled */ | 232 | /* Note that the CIS values need to be rescaled */ |
245 | if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { | 233 | if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { |
@@ -355,19 +343,10 @@ orinoco_cs_config(struct pcmcia_device *link) | |||
355 | net_device has been registered */ | 343 | net_device has been registered */ |
356 | 344 | ||
357 | /* Finally, report what we've done */ | 345 | /* Finally, report what we've done */ |
358 | printk(KERN_DEBUG "%s: index 0x%02x: ", | 346 | printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io " |
359 | dev->name, link->conf.ConfigIndex); | 347 | "0x%04x-0x%04x\n", dev->name, dev->class_dev.dev->bus_id, |
360 | if (link->conf.Vpp) | 348 | link->irq.AssignedIRQ, link->io.BasePort1, |
361 | printk(", Vpp %d.%d", link->conf.Vpp / 10, | 349 | link->io.BasePort1 + link->io.NumPorts1 - 1); |
362 | link->conf.Vpp % 10); | ||
363 | printk(", irq %d", link->irq.AssignedIRQ); | ||
364 | if (link->io.NumPorts1) | ||
365 | printk(", io 0x%04x-0x%04x", link->io.BasePort1, | ||
366 | link->io.BasePort1 + link->io.NumPorts1 - 1); | ||
367 | if (link->io.NumPorts2) | ||
368 | printk(" & 0x%04x-0x%04x", link->io.BasePort2, | ||
369 | link->io.BasePort2 + link->io.NumPorts2 - 1); | ||
370 | printk("\n"); | ||
371 | 350 | ||
372 | return 0; | 351 | return 0; |
373 | 352 | ||
@@ -436,7 +415,6 @@ static int orinoco_cs_resume(struct pcmcia_device *link) | |||
436 | struct orinoco_private *priv = netdev_priv(dev); | 415 | struct orinoco_private *priv = netdev_priv(dev); |
437 | struct orinoco_pccard *card = priv->card; | 416 | struct orinoco_pccard *card = priv->card; |
438 | int err = 0; | 417 | int err = 0; |
439 | unsigned long flags; | ||
440 | 418 | ||
441 | if (! test_bit(0, &card->hard_reset_in_progress)) { | 419 | if (! test_bit(0, &card->hard_reset_in_progress)) { |
442 | err = orinoco_reinit_firmware(dev); | 420 | err = orinoco_reinit_firmware(dev); |
@@ -446,7 +424,7 @@ static int orinoco_cs_resume(struct pcmcia_device *link) | |||
446 | return -EIO; | 424 | return -EIO; |
447 | } | 425 | } |
448 | 426 | ||
449 | spin_lock_irqsave(&priv->lock, flags); | 427 | spin_lock(&priv->lock); |
450 | 428 | ||
451 | netif_device_attach(dev); | 429 | netif_device_attach(dev); |
452 | priv->hw_unavailable--; | 430 | priv->hw_unavailable--; |
@@ -458,10 +436,10 @@ static int orinoco_cs_resume(struct pcmcia_device *link) | |||
458 | dev->name, err); | 436 | dev->name, err); |
459 | } | 437 | } |
460 | 438 | ||
461 | spin_unlock_irqrestore(&priv->lock, flags); | 439 | spin_unlock(&priv->lock); |
462 | } | 440 | } |
463 | 441 | ||
464 | return 0; | 442 | return err; |
465 | } | 443 | } |
466 | 444 | ||
467 | 445 | ||
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c index d1a670b35338..74b9d5b2ba9e 100644 --- a/drivers/net/wireless/orinoco_nortel.c +++ b/drivers/net/wireless/orinoco_nortel.c | |||
@@ -1,9 +1,8 @@ | |||
1 | /* orinoco_nortel.c | 1 | /* orinoco_nortel.c |
2 | * | 2 | * |
3 | * Driver for Prism II devices which would usually be driven by orinoco_cs, | 3 | * Driver for Prism II devices which would usually be driven by orinoco_cs, |
4 | * but are connected to the PCI bus by a PCI-to-PCMCIA adapter used in | 4 | * but are connected to the PCI bus by a PCI-to-PCMCIA adapter used in |
5 | * Nortel emobility, Symbol LA-4113 and Symbol LA-4123. | 5 | * Nortel emobility, Symbol LA-4113 and Symbol LA-4123. |
6 | * but are connected to the PCI bus by a Nortel PCI-PCMCIA-Adapter. | ||
7 | * | 6 | * |
8 | * Copyright (C) 2002 Tobias Hoffmann | 7 | * Copyright (C) 2002 Tobias Hoffmann |
9 | * (C) 2003 Christoph Jungegger <disdos@traum404.de> | 8 | * (C) 2003 Christoph Jungegger <disdos@traum404.de> |
@@ -50,67 +49,62 @@ | |||
50 | #include <pcmcia/cisreg.h> | 49 | #include <pcmcia/cisreg.h> |
51 | 50 | ||
52 | #include "orinoco.h" | 51 | #include "orinoco.h" |
52 | #include "orinoco_pci.h" | ||
53 | 53 | ||
54 | #define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */ | 54 | #define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */ |
55 | #define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ | 55 | #define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ |
56 | 56 | ||
57 | 57 | ||
58 | /* Nortel specific data */ | ||
59 | struct nortel_pci_card { | ||
60 | unsigned long iobase1; | ||
61 | unsigned long iobase2; | ||
62 | }; | ||
63 | |||
64 | /* | 58 | /* |
65 | * Do a soft reset of the PCI card using the Configuration Option Register | 59 | * Do a soft reset of the card using the Configuration Option Register |
66 | * We need this to get going... | 60 | * We need this to get going... |
67 | * This is the part of the code that is strongly inspired from wlan-ng | 61 | * This is the part of the code that is strongly inspired from wlan-ng |
68 | * | 62 | * |
69 | * Note bis : Don't try to access HERMES_CMD during the reset phase. | 63 | * Note bis : Don't try to access HERMES_CMD during the reset phase. |
70 | * It just won't work ! | 64 | * It just won't work ! |
71 | */ | 65 | */ |
72 | static int nortel_pci_cor_reset(struct orinoco_private *priv) | 66 | static int orinoco_nortel_cor_reset(struct orinoco_private *priv) |
73 | { | 67 | { |
74 | struct nortel_pci_card *card = priv->card; | 68 | struct orinoco_pci_card *card = priv->card; |
75 | 69 | ||
76 | /* Assert the reset until the card notice */ | 70 | /* Assert the reset until the card notices */ |
77 | outw_p(8, card->iobase1 + 2); | 71 | iowrite16(8, card->bridge_io + 2); |
78 | inw(card->iobase2 + COR_OFFSET); | 72 | ioread16(card->attr_io + COR_OFFSET); |
79 | outw_p(0x80, card->iobase2 + COR_OFFSET); | 73 | iowrite16(0x80, card->attr_io + COR_OFFSET); |
80 | mdelay(1); | 74 | mdelay(1); |
81 | 75 | ||
82 | /* Give time for the card to recover from this hard effort */ | 76 | /* Give time for the card to recover from this hard effort */ |
83 | outw_p(0, card->iobase2 + COR_OFFSET); | 77 | iowrite16(0, card->attr_io + COR_OFFSET); |
84 | outw_p(0, card->iobase2 + COR_OFFSET); | 78 | iowrite16(0, card->attr_io + COR_OFFSET); |
85 | mdelay(1); | 79 | mdelay(1); |
86 | 80 | ||
87 | /* set COR as usual */ | 81 | /* Set COR as usual */ |
88 | outw_p(COR_VALUE, card->iobase2 + COR_OFFSET); | 82 | iowrite16(COR_VALUE, card->attr_io + COR_OFFSET); |
89 | outw_p(COR_VALUE, card->iobase2 + COR_OFFSET); | 83 | iowrite16(COR_VALUE, card->attr_io + COR_OFFSET); |
90 | mdelay(1); | 84 | mdelay(1); |
91 | 85 | ||
92 | outw_p(0x228, card->iobase1 + 2); | 86 | iowrite16(0x228, card->bridge_io + 2); |
93 | 87 | ||
94 | return 0; | 88 | return 0; |
95 | } | 89 | } |
96 | 90 | ||
97 | static int nortel_pci_hw_init(struct nortel_pci_card *card) | 91 | static int orinoco_nortel_hw_init(struct orinoco_pci_card *card) |
98 | { | 92 | { |
99 | int i; | 93 | int i; |
100 | u32 reg; | 94 | u32 reg; |
101 | 95 | ||
102 | /* setup bridge */ | 96 | /* Setup bridge */ |
103 | if (inw(card->iobase1) & 1) { | 97 | if (ioread16(card->bridge_io) & 1) { |
104 | printk(KERN_ERR PFX "brg1 answer1 wrong\n"); | 98 | printk(KERN_ERR PFX "brg1 answer1 wrong\n"); |
105 | return -EBUSY; | 99 | return -EBUSY; |
106 | } | 100 | } |
107 | outw_p(0x118, card->iobase1 + 2); | 101 | iowrite16(0x118, card->bridge_io + 2); |
108 | outw_p(0x108, card->iobase1 + 2); | 102 | iowrite16(0x108, card->bridge_io + 2); |
109 | mdelay(30); | 103 | mdelay(30); |
110 | outw_p(0x8, card->iobase1 + 2); | 104 | iowrite16(0x8, card->bridge_io + 2); |
111 | for (i = 0; i < 30; i++) { | 105 | for (i = 0; i < 30; i++) { |
112 | mdelay(30); | 106 | mdelay(30); |
113 | if (inw(card->iobase1) & 0x10) { | 107 | if (ioread16(card->bridge_io) & 0x10) { |
114 | break; | 108 | break; |
115 | } | 109 | } |
116 | } | 110 | } |
@@ -118,42 +112,42 @@ static int nortel_pci_hw_init(struct nortel_pci_card *card) | |||
118 | printk(KERN_ERR PFX "brg1 timed out\n"); | 112 | printk(KERN_ERR PFX "brg1 timed out\n"); |
119 | return -EBUSY; | 113 | return -EBUSY; |
120 | } | 114 | } |
121 | if (inw(card->iobase2 + 0xe0) & 1) { | 115 | if (ioread16(card->attr_io + COR_OFFSET) & 1) { |
122 | printk(KERN_ERR PFX "brg2 answer1 wrong\n"); | 116 | printk(KERN_ERR PFX "brg2 answer1 wrong\n"); |
123 | return -EBUSY; | 117 | return -EBUSY; |
124 | } | 118 | } |
125 | if (inw(card->iobase2 + 0xe2) & 1) { | 119 | if (ioread16(card->attr_io + COR_OFFSET + 2) & 1) { |
126 | printk(KERN_ERR PFX "brg2 answer2 wrong\n"); | 120 | printk(KERN_ERR PFX "brg2 answer2 wrong\n"); |
127 | return -EBUSY; | 121 | return -EBUSY; |
128 | } | 122 | } |
129 | if (inw(card->iobase2 + 0xe4) & 1) { | 123 | if (ioread16(card->attr_io + COR_OFFSET + 4) & 1) { |
130 | printk(KERN_ERR PFX "brg2 answer3 wrong\n"); | 124 | printk(KERN_ERR PFX "brg2 answer3 wrong\n"); |
131 | return -EBUSY; | 125 | return -EBUSY; |
132 | } | 126 | } |
133 | 127 | ||
134 | /* set the PCMCIA COR-Register */ | 128 | /* Set the PCMCIA COR register */ |
135 | outw_p(COR_VALUE, card->iobase2 + COR_OFFSET); | 129 | iowrite16(COR_VALUE, card->attr_io + COR_OFFSET); |
136 | mdelay(1); | 130 | mdelay(1); |
137 | reg = inw(card->iobase2 + COR_OFFSET); | 131 | reg = ioread16(card->attr_io + COR_OFFSET); |
138 | if (reg != COR_VALUE) { | 132 | if (reg != COR_VALUE) { |
139 | printk(KERN_ERR PFX "Error setting COR value (reg=%x)\n", | 133 | printk(KERN_ERR PFX "Error setting COR value (reg=%x)\n", |
140 | reg); | 134 | reg); |
141 | return -EBUSY; | 135 | return -EBUSY; |
142 | } | 136 | } |
143 | 137 | ||
144 | /* set leds */ | 138 | /* Set LEDs */ |
145 | outw_p(1, card->iobase1 + 10); | 139 | iowrite16(1, card->bridge_io + 10); |
146 | return 0; | 140 | return 0; |
147 | } | 141 | } |
148 | 142 | ||
149 | static int nortel_pci_init_one(struct pci_dev *pdev, | 143 | static int orinoco_nortel_init_one(struct pci_dev *pdev, |
150 | const struct pci_device_id *ent) | 144 | const struct pci_device_id *ent) |
151 | { | 145 | { |
152 | int err; | 146 | int err; |
153 | struct orinoco_private *priv; | 147 | struct orinoco_private *priv; |
154 | struct nortel_pci_card *card; | 148 | struct orinoco_pci_card *card; |
155 | struct net_device *dev; | 149 | struct net_device *dev; |
156 | void __iomem *iomem; | 150 | void __iomem *hermes_io, *bridge_io, *attr_io; |
157 | 151 | ||
158 | err = pci_enable_device(pdev); | 152 | err = pci_enable_device(pdev); |
159 | if (err) { | 153 | if (err) { |
@@ -162,19 +156,34 @@ static int nortel_pci_init_one(struct pci_dev *pdev, | |||
162 | } | 156 | } |
163 | 157 | ||
164 | err = pci_request_regions(pdev, DRIVER_NAME); | 158 | err = pci_request_regions(pdev, DRIVER_NAME); |
165 | if (err != 0) { | 159 | if (err) { |
166 | printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); | 160 | printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); |
167 | goto fail_resources; | 161 | goto fail_resources; |
168 | } | 162 | } |
169 | 163 | ||
170 | iomem = pci_iomap(pdev, 2, 0); | 164 | bridge_io = pci_iomap(pdev, 0, 0); |
171 | if (!iomem) { | 165 | if (!bridge_io) { |
172 | err = -ENOMEM; | 166 | printk(KERN_ERR PFX "Cannot map bridge registers\n"); |
173 | goto fail_map_io; | 167 | err = -EIO; |
168 | goto fail_map_bridge; | ||
169 | } | ||
170 | |||
171 | attr_io = pci_iomap(pdev, 1, 0); | ||
172 | if (!attr_io) { | ||
173 | printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n"); | ||
174 | err = -EIO; | ||
175 | goto fail_map_attr; | ||
176 | } | ||
177 | |||
178 | hermes_io = pci_iomap(pdev, 2, 0); | ||
179 | if (!hermes_io) { | ||
180 | printk(KERN_ERR PFX "Cannot map chipset registers\n"); | ||
181 | err = -EIO; | ||
182 | goto fail_map_hermes; | ||
174 | } | 183 | } |
175 | 184 | ||
176 | /* Allocate network device */ | 185 | /* Allocate network device */ |
177 | dev = alloc_orinocodev(sizeof(*card), nortel_pci_cor_reset); | 186 | dev = alloc_orinocodev(sizeof(*card), orinoco_nortel_cor_reset); |
178 | if (!dev) { | 187 | if (!dev) { |
179 | printk(KERN_ERR PFX "Cannot allocate network device\n"); | 188 | printk(KERN_ERR PFX "Cannot allocate network device\n"); |
180 | err = -ENOMEM; | 189 | err = -ENOMEM; |
@@ -183,16 +192,12 @@ static int nortel_pci_init_one(struct pci_dev *pdev, | |||
183 | 192 | ||
184 | priv = netdev_priv(dev); | 193 | priv = netdev_priv(dev); |
185 | card = priv->card; | 194 | card = priv->card; |
186 | card->iobase1 = pci_resource_start(pdev, 0); | 195 | card->bridge_io = bridge_io; |
187 | card->iobase2 = pci_resource_start(pdev, 1); | 196 | card->attr_io = attr_io; |
188 | dev->base_addr = pci_resource_start(pdev, 2); | ||
189 | SET_MODULE_OWNER(dev); | 197 | SET_MODULE_OWNER(dev); |
190 | SET_NETDEV_DEV(dev, &pdev->dev); | 198 | SET_NETDEV_DEV(dev, &pdev->dev); |
191 | 199 | ||
192 | hermes_struct_init(&priv->hw, iomem, HERMES_16BIT_REGSPACING); | 200 | hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); |
193 | |||
194 | printk(KERN_DEBUG PFX "Detected Nortel PCI device at %s irq:%d, " | ||
195 | "io addr:0x%lx\n", pci_name(pdev), pdev->irq, dev->base_addr); | ||
196 | 201 | ||
197 | err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, | 202 | err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, |
198 | dev->name, dev); | 203 | dev->name, dev); |
@@ -201,21 +206,19 @@ static int nortel_pci_init_one(struct pci_dev *pdev, | |||
201 | err = -EBUSY; | 206 | err = -EBUSY; |
202 | goto fail_irq; | 207 | goto fail_irq; |
203 | } | 208 | } |
204 | dev->irq = pdev->irq; | ||
205 | 209 | ||
206 | err = nortel_pci_hw_init(card); | 210 | err = orinoco_nortel_hw_init(card); |
207 | if (err) { | 211 | if (err) { |
208 | printk(KERN_ERR PFX "Hardware initialization failed\n"); | 212 | printk(KERN_ERR PFX "Hardware initialization failed\n"); |
209 | goto fail; | 213 | goto fail; |
210 | } | 214 | } |
211 | 215 | ||
212 | err = nortel_pci_cor_reset(priv); | 216 | err = orinoco_nortel_cor_reset(priv); |
213 | if (err) { | 217 | if (err) { |
214 | printk(KERN_ERR PFX "Initial reset failed\n"); | 218 | printk(KERN_ERR PFX "Initial reset failed\n"); |
215 | goto fail; | 219 | goto fail; |
216 | } | 220 | } |
217 | 221 | ||
218 | |||
219 | err = register_netdev(dev); | 222 | err = register_netdev(dev); |
220 | if (err) { | 223 | if (err) { |
221 | printk(KERN_ERR PFX "Cannot register network device\n"); | 224 | printk(KERN_ERR PFX "Cannot register network device\n"); |
@@ -223,6 +226,8 @@ static int nortel_pci_init_one(struct pci_dev *pdev, | |||
223 | } | 226 | } |
224 | 227 | ||
225 | pci_set_drvdata(pdev, dev); | 228 | pci_set_drvdata(pdev, dev); |
229 | printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name, | ||
230 | pci_name(pdev)); | ||
226 | 231 | ||
227 | return 0; | 232 | return 0; |
228 | 233 | ||
@@ -234,9 +239,15 @@ static int nortel_pci_init_one(struct pci_dev *pdev, | |||
234 | free_orinocodev(dev); | 239 | free_orinocodev(dev); |
235 | 240 | ||
236 | fail_alloc: | 241 | fail_alloc: |
237 | pci_iounmap(pdev, iomem); | 242 | pci_iounmap(pdev, hermes_io); |
238 | 243 | ||
239 | fail_map_io: | 244 | fail_map_hermes: |
245 | pci_iounmap(pdev, attr_io); | ||
246 | |||
247 | fail_map_attr: | ||
248 | pci_iounmap(pdev, bridge_io); | ||
249 | |||
250 | fail_map_bridge: | ||
240 | pci_release_regions(pdev); | 251 | pci_release_regions(pdev); |
241 | 252 | ||
242 | fail_resources: | 253 | fail_resources: |
@@ -245,26 +256,27 @@ static int nortel_pci_init_one(struct pci_dev *pdev, | |||
245 | return err; | 256 | return err; |
246 | } | 257 | } |
247 | 258 | ||
248 | static void __devexit nortel_pci_remove_one(struct pci_dev *pdev) | 259 | static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev) |
249 | { | 260 | { |
250 | struct net_device *dev = pci_get_drvdata(pdev); | 261 | struct net_device *dev = pci_get_drvdata(pdev); |
251 | struct orinoco_private *priv = netdev_priv(dev); | 262 | struct orinoco_private *priv = netdev_priv(dev); |
252 | struct nortel_pci_card *card = priv->card; | 263 | struct orinoco_pci_card *card = priv->card; |
253 | 264 | ||
254 | /* clear leds */ | 265 | /* Clear LEDs */ |
255 | outw_p(0, card->iobase1 + 10); | 266 | iowrite16(0, card->bridge_io + 10); |
256 | 267 | ||
257 | unregister_netdev(dev); | 268 | unregister_netdev(dev); |
258 | free_irq(dev->irq, dev); | 269 | free_irq(pdev->irq, dev); |
259 | pci_set_drvdata(pdev, NULL); | 270 | pci_set_drvdata(pdev, NULL); |
260 | free_orinocodev(dev); | 271 | free_orinocodev(dev); |
261 | pci_iounmap(pdev, priv->hw.iobase); | 272 | pci_iounmap(pdev, priv->hw.iobase); |
273 | pci_iounmap(pdev, card->attr_io); | ||
274 | pci_iounmap(pdev, card->bridge_io); | ||
262 | pci_release_regions(pdev); | 275 | pci_release_regions(pdev); |
263 | pci_disable_device(pdev); | 276 | pci_disable_device(pdev); |
264 | } | 277 | } |
265 | 278 | ||
266 | 279 | static struct pci_device_id orinoco_nortel_id_table[] = { | |
267 | static struct pci_device_id nortel_pci_id_table[] = { | ||
268 | /* Nortel emobility PCI */ | 280 | /* Nortel emobility PCI */ |
269 | {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,}, | 281 | {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,}, |
270 | /* Symbol LA-4123 PCI */ | 282 | /* Symbol LA-4123 PCI */ |
@@ -272,13 +284,15 @@ static struct pci_device_id nortel_pci_id_table[] = { | |||
272 | {0,}, | 284 | {0,}, |
273 | }; | 285 | }; |
274 | 286 | ||
275 | MODULE_DEVICE_TABLE(pci, nortel_pci_id_table); | 287 | MODULE_DEVICE_TABLE(pci, orinoco_nortel_id_table); |
276 | 288 | ||
277 | static struct pci_driver nortel_pci_driver = { | 289 | static struct pci_driver orinoco_nortel_driver = { |
278 | .name = DRIVER_NAME, | 290 | .name = DRIVER_NAME, |
279 | .id_table = nortel_pci_id_table, | 291 | .id_table = orinoco_nortel_id_table, |
280 | .probe = nortel_pci_init_one, | 292 | .probe = orinoco_nortel_init_one, |
281 | .remove = __devexit_p(nortel_pci_remove_one), | 293 | .remove = __devexit_p(orinoco_nortel_remove_one), |
294 | .suspend = orinoco_pci_suspend, | ||
295 | .resume = orinoco_pci_resume, | ||
282 | }; | 296 | }; |
283 | 297 | ||
284 | static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION | 298 | static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION |
@@ -288,20 +302,19 @@ MODULE_DESCRIPTION | |||
288 | ("Driver for wireless LAN cards using the Nortel PCI bridge"); | 302 | ("Driver for wireless LAN cards using the Nortel PCI bridge"); |
289 | MODULE_LICENSE("Dual MPL/GPL"); | 303 | MODULE_LICENSE("Dual MPL/GPL"); |
290 | 304 | ||
291 | static int __init nortel_pci_init(void) | 305 | static int __init orinoco_nortel_init(void) |
292 | { | 306 | { |
293 | printk(KERN_DEBUG "%s\n", version); | 307 | printk(KERN_DEBUG "%s\n", version); |
294 | return pci_module_init(&nortel_pci_driver); | 308 | return pci_module_init(&orinoco_nortel_driver); |
295 | } | 309 | } |
296 | 310 | ||
297 | static void __exit nortel_pci_exit(void) | 311 | static void __exit orinoco_nortel_exit(void) |
298 | { | 312 | { |
299 | pci_unregister_driver(&nortel_pci_driver); | 313 | pci_unregister_driver(&orinoco_nortel_driver); |
300 | ssleep(1); | ||
301 | } | 314 | } |
302 | 315 | ||
303 | module_init(nortel_pci_init); | 316 | module_init(orinoco_nortel_init); |
304 | module_exit(nortel_pci_exit); | 317 | module_exit(orinoco_nortel_exit); |
305 | 318 | ||
306 | /* | 319 | /* |
307 | * Local variables: | 320 | * Local variables: |
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c index 5362c214fc8e..1c105f40f8d5 100644 --- a/drivers/net/wireless/orinoco_pci.c +++ b/drivers/net/wireless/orinoco_pci.c | |||
@@ -1,11 +1,11 @@ | |||
1 | /* orinoco_pci.c | 1 | /* orinoco_pci.c |
2 | * | 2 | * |
3 | * Driver for Prism II devices that have a direct PCI interface | 3 | * Driver for Prism 2.5/3 devices that have a direct PCI interface |
4 | * (i.e., not in a Pcmcia or PLX bridge) | 4 | * (i.e. these are not PCMCIA cards in a PCMCIA-to-PCI bridge). |
5 | * | 5 | * The card contains only one PCI region, which contains all the usual |
6 | * Specifically here we're talking about the Linksys WMP11 | 6 | * hermes registers, as well as the COR register. |
7 | * | 7 | * |
8 | * Current maintainers (as of 29 September 2003) are: | 8 | * Current maintainers are: |
9 | * Pavel Roskin <proski AT gnu.org> | 9 | * Pavel Roskin <proski AT gnu.org> |
10 | * and David Gibson <hermes AT gibson.dropbear.id.au> | 10 | * and David Gibson <hermes AT gibson.dropbear.id.au> |
11 | * | 11 | * |
@@ -41,54 +41,6 @@ | |||
41 | * under either the MPL or the GPL. | 41 | * under either the MPL or the GPL. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | /* | ||
45 | * Theory of operation... | ||
46 | * ------------------- | ||
47 | * Maybe you had a look in orinoco_plx. Well, this is totally different... | ||
48 | * | ||
49 | * The card contains only one PCI region, which contains all the usual | ||
50 | * hermes registers. | ||
51 | * | ||
52 | * The driver will memory map this region in normal memory. Because | ||
53 | * the hermes registers are mapped in normal memory and not in ISA I/O | ||
54 | * post space, we can't use the usual inw/outw macros and we need to | ||
55 | * use readw/writew. | ||
56 | * This slight difference force us to compile our own version of | ||
57 | * hermes.c with the register access macro changed. That's a bit | ||
58 | * hackish but works fine. | ||
59 | * | ||
60 | * Note that the PCI region is pretty big (4K). That's much more than | ||
61 | * the usual set of hermes register (0x0 -> 0x3E). I've got a strong | ||
62 | * suspicion that the whole memory space of the adapter is in fact in | ||
63 | * this region. Accessing directly the adapter memory instead of going | ||
64 | * through the usual register would speed up significantely the | ||
65 | * operations... | ||
66 | * | ||
67 | * Finally, the card looks like this : | ||
68 | ----------------------- | ||
69 | Bus 0, device 14, function 0: | ||
70 | Network controller: PCI device 1260:3873 (Harris Semiconductor) (rev 1). | ||
71 | IRQ 11. | ||
72 | Master Capable. Latency=248. | ||
73 | Prefetchable 32 bit memory at 0xffbcc000 [0xffbccfff]. | ||
74 | ----------------------- | ||
75 | 00:0e.0 Network controller: Harris Semiconductor: Unknown device 3873 (rev 01) | ||
76 | Subsystem: Unknown device 1737:3874 | ||
77 | Control: I/O+ Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR- FastB2B- | ||
78 | Status: Cap+ 66Mhz- UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR- | ||
79 | Latency: 248 set, cache line size 08 | ||
80 | Interrupt: pin A routed to IRQ 11 | ||
81 | Region 0: Memory at ffbcc000 (32-bit, prefetchable) [size=4K] | ||
82 | Capabilities: [dc] Power Management version 2 | ||
83 | Flags: PMEClk- AuxPwr- DSI- D1+ D2+ PME+ | ||
84 | Status: D0 PME-Enable- DSel=0 DScale=0 PME- | ||
85 | ----------------------- | ||
86 | * | ||
87 | * That's all.. | ||
88 | * | ||
89 | * Jean II | ||
90 | */ | ||
91 | |||
92 | #define DRIVER_NAME "orinoco_pci" | 44 | #define DRIVER_NAME "orinoco_pci" |
93 | #define PFX DRIVER_NAME ": " | 45 | #define PFX DRIVER_NAME ": " |
94 | 46 | ||
@@ -100,12 +52,14 @@ | |||
100 | #include <linux/pci.h> | 52 | #include <linux/pci.h> |
101 | 53 | ||
102 | #include "orinoco.h" | 54 | #include "orinoco.h" |
55 | #include "orinoco_pci.h" | ||
103 | 56 | ||
104 | /* All the magic there is from wlan-ng */ | 57 | /* Offset of the COR register of the PCI card */ |
105 | /* Magic offset of the reset register of the PCI card */ | ||
106 | #define HERMES_PCI_COR (0x26) | 58 | #define HERMES_PCI_COR (0x26) |
107 | /* Magic bitmask to reset the card */ | 59 | |
60 | /* Bitmask to reset the card */ | ||
108 | #define HERMES_PCI_COR_MASK (0x0080) | 61 | #define HERMES_PCI_COR_MASK (0x0080) |
62 | |||
109 | /* Magic timeouts for doing the reset. | 63 | /* Magic timeouts for doing the reset. |
110 | * Those times are straight from wlan-ng, and it is claimed that they | 64 | * Those times are straight from wlan-ng, and it is claimed that they |
111 | * are necessary. Alan will kill me. Take your time and grab a coffee. */ | 65 | * are necessary. Alan will kill me. Take your time and grab a coffee. */ |
@@ -113,13 +67,8 @@ | |||
113 | #define HERMES_PCI_COR_OFFT (500) /* ms */ | 67 | #define HERMES_PCI_COR_OFFT (500) /* ms */ |
114 | #define HERMES_PCI_COR_BUSYT (500) /* ms */ | 68 | #define HERMES_PCI_COR_BUSYT (500) /* ms */ |
115 | 69 | ||
116 | /* Orinoco PCI specific data */ | ||
117 | struct orinoco_pci_card { | ||
118 | void __iomem *pci_ioaddr; | ||
119 | }; | ||
120 | |||
121 | /* | 70 | /* |
122 | * Do a soft reset of the PCI card using the Configuration Option Register | 71 | * Do a soft reset of the card using the Configuration Option Register |
123 | * We need this to get going... | 72 | * We need this to get going... |
124 | * This is the part of the code that is strongly inspired from wlan-ng | 73 | * This is the part of the code that is strongly inspired from wlan-ng |
125 | * | 74 | * |
@@ -131,14 +80,13 @@ struct orinoco_pci_card { | |||
131 | * Note bis : Don't try to access HERMES_CMD during the reset phase. | 80 | * Note bis : Don't try to access HERMES_CMD during the reset phase. |
132 | * It just won't work ! | 81 | * It just won't work ! |
133 | */ | 82 | */ |
134 | static int | 83 | static int orinoco_pci_cor_reset(struct orinoco_private *priv) |
135 | orinoco_pci_cor_reset(struct orinoco_private *priv) | ||
136 | { | 84 | { |
137 | hermes_t *hw = &priv->hw; | 85 | hermes_t *hw = &priv->hw; |
138 | unsigned long timeout; | 86 | unsigned long timeout; |
139 | u16 reg; | 87 | u16 reg; |
140 | 88 | ||
141 | /* Assert the reset until the card notice */ | 89 | /* Assert the reset until the card notices */ |
142 | hermes_write_regn(hw, PCI_COR, HERMES_PCI_COR_MASK); | 90 | hermes_write_regn(hw, PCI_COR, HERMES_PCI_COR_MASK); |
143 | mdelay(HERMES_PCI_COR_ONT); | 91 | mdelay(HERMES_PCI_COR_ONT); |
144 | 92 | ||
@@ -163,19 +111,14 @@ orinoco_pci_cor_reset(struct orinoco_private *priv) | |||
163 | return 0; | 111 | return 0; |
164 | } | 112 | } |
165 | 113 | ||
166 | /* | ||
167 | * Initialise a card. Mostly similar to PLX code. | ||
168 | */ | ||
169 | static int orinoco_pci_init_one(struct pci_dev *pdev, | 114 | static int orinoco_pci_init_one(struct pci_dev *pdev, |
170 | const struct pci_device_id *ent) | 115 | const struct pci_device_id *ent) |
171 | { | 116 | { |
172 | int err = 0; | 117 | int err; |
173 | unsigned long pci_iorange; | 118 | struct orinoco_private *priv; |
174 | u16 __iomem *pci_ioaddr = NULL; | ||
175 | unsigned long pci_iolen; | ||
176 | struct orinoco_private *priv = NULL; | ||
177 | struct orinoco_pci_card *card; | 119 | struct orinoco_pci_card *card; |
178 | struct net_device *dev = NULL; | 120 | struct net_device *dev; |
121 | void __iomem *hermes_io; | ||
179 | 122 | ||
180 | err = pci_enable_device(pdev); | 123 | err = pci_enable_device(pdev); |
181 | if (err) { | 124 | if (err) { |
@@ -184,39 +127,32 @@ static int orinoco_pci_init_one(struct pci_dev *pdev, | |||
184 | } | 127 | } |
185 | 128 | ||
186 | err = pci_request_regions(pdev, DRIVER_NAME); | 129 | err = pci_request_regions(pdev, DRIVER_NAME); |
187 | if (err != 0) { | 130 | if (err) { |
188 | printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); | 131 | printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); |
189 | goto fail_resources; | 132 | goto fail_resources; |
190 | } | 133 | } |
191 | 134 | ||
192 | /* Resource 0 is mapped to the hermes registers */ | 135 | hermes_io = pci_iomap(pdev, 0, 0); |
193 | pci_iorange = pci_resource_start(pdev, 0); | 136 | if (!hermes_io) { |
194 | pci_iolen = pci_resource_len(pdev, 0); | 137 | printk(KERN_ERR PFX "Cannot remap chipset registers\n"); |
195 | pci_ioaddr = ioremap(pci_iorange, pci_iolen); | 138 | err = -EIO; |
196 | if (!pci_iorange) { | 139 | goto fail_map_hermes; |
197 | printk(KERN_ERR PFX "Cannot remap hardware registers\n"); | ||
198 | goto fail_map; | ||
199 | } | 140 | } |
200 | 141 | ||
201 | /* Allocate network device */ | 142 | /* Allocate network device */ |
202 | dev = alloc_orinocodev(sizeof(*card), orinoco_pci_cor_reset); | 143 | dev = alloc_orinocodev(sizeof(*card), orinoco_pci_cor_reset); |
203 | if (! dev) { | 144 | if (!dev) { |
145 | printk(KERN_ERR PFX "Cannot allocate network device\n"); | ||
204 | err = -ENOMEM; | 146 | err = -ENOMEM; |
205 | goto fail_alloc; | 147 | goto fail_alloc; |
206 | } | 148 | } |
207 | 149 | ||
208 | priv = netdev_priv(dev); | 150 | priv = netdev_priv(dev); |
209 | card = priv->card; | 151 | card = priv->card; |
210 | card->pci_ioaddr = pci_ioaddr; | ||
211 | dev->mem_start = pci_iorange; | ||
212 | dev->mem_end = pci_iorange + pci_iolen - 1; | ||
213 | SET_MODULE_OWNER(dev); | 152 | SET_MODULE_OWNER(dev); |
214 | SET_NETDEV_DEV(dev, &pdev->dev); | 153 | SET_NETDEV_DEV(dev, &pdev->dev); |
215 | 154 | ||
216 | hermes_struct_init(&priv->hw, pci_ioaddr, HERMES_32BIT_REGSPACING); | 155 | hermes_struct_init(&priv->hw, hermes_io, HERMES_32BIT_REGSPACING); |
217 | |||
218 | printk(KERN_DEBUG PFX "Detected device %s, mem:0x%lx-0x%lx, irq %d\n", | ||
219 | pci_name(pdev), dev->mem_start, dev->mem_end, pdev->irq); | ||
220 | 156 | ||
221 | err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, | 157 | err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, |
222 | dev->name, dev); | 158 | dev->name, dev); |
@@ -225,9 +161,7 @@ static int orinoco_pci_init_one(struct pci_dev *pdev, | |||
225 | err = -EBUSY; | 161 | err = -EBUSY; |
226 | goto fail_irq; | 162 | goto fail_irq; |
227 | } | 163 | } |
228 | dev->irq = pdev->irq; | ||
229 | 164 | ||
230 | /* Perform a COR reset to start the card */ | ||
231 | err = orinoco_pci_cor_reset(priv); | 165 | err = orinoco_pci_cor_reset(priv); |
232 | if (err) { | 166 | if (err) { |
233 | printk(KERN_ERR PFX "Initial reset failed\n"); | 167 | printk(KERN_ERR PFX "Initial reset failed\n"); |
@@ -236,11 +170,13 @@ static int orinoco_pci_init_one(struct pci_dev *pdev, | |||
236 | 170 | ||
237 | err = register_netdev(dev); | 171 | err = register_netdev(dev); |
238 | if (err) { | 172 | if (err) { |
239 | printk(KERN_ERR PFX "Failed to register net device\n"); | 173 | printk(KERN_ERR PFX "Cannot register network device\n"); |
240 | goto fail; | 174 | goto fail; |
241 | } | 175 | } |
242 | 176 | ||
243 | pci_set_drvdata(pdev, dev); | 177 | pci_set_drvdata(pdev, dev); |
178 | printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name, | ||
179 | pci_name(pdev)); | ||
244 | 180 | ||
245 | return 0; | 181 | return 0; |
246 | 182 | ||
@@ -252,9 +188,9 @@ static int orinoco_pci_init_one(struct pci_dev *pdev, | |||
252 | free_orinocodev(dev); | 188 | free_orinocodev(dev); |
253 | 189 | ||
254 | fail_alloc: | 190 | fail_alloc: |
255 | iounmap(pci_ioaddr); | 191 | pci_iounmap(pdev, hermes_io); |
256 | 192 | ||
257 | fail_map: | 193 | fail_map_hermes: |
258 | pci_release_regions(pdev); | 194 | pci_release_regions(pdev); |
259 | 195 | ||
260 | fail_resources: | 196 | fail_resources: |
@@ -267,87 +203,17 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev) | |||
267 | { | 203 | { |
268 | struct net_device *dev = pci_get_drvdata(pdev); | 204 | struct net_device *dev = pci_get_drvdata(pdev); |
269 | struct orinoco_private *priv = netdev_priv(dev); | 205 | struct orinoco_private *priv = netdev_priv(dev); |
270 | struct orinoco_pci_card *card = priv->card; | ||
271 | 206 | ||
272 | unregister_netdev(dev); | 207 | unregister_netdev(dev); |
273 | free_irq(dev->irq, dev); | 208 | free_irq(pdev->irq, dev); |
274 | pci_set_drvdata(pdev, NULL); | 209 | pci_set_drvdata(pdev, NULL); |
275 | free_orinocodev(dev); | 210 | free_orinocodev(dev); |
276 | iounmap(card->pci_ioaddr); | 211 | pci_iounmap(pdev, priv->hw.iobase); |
277 | pci_release_regions(pdev); | 212 | pci_release_regions(pdev); |
278 | pci_disable_device(pdev); | 213 | pci_disable_device(pdev); |
279 | } | 214 | } |
280 | 215 | ||
281 | static int orinoco_pci_suspend(struct pci_dev *pdev, pm_message_t state) | 216 | static struct pci_device_id orinoco_pci_id_table[] = { |
282 | { | ||
283 | struct net_device *dev = pci_get_drvdata(pdev); | ||
284 | struct orinoco_private *priv = netdev_priv(dev); | ||
285 | unsigned long flags; | ||
286 | int err; | ||
287 | |||
288 | |||
289 | err = orinoco_lock(priv, &flags); | ||
290 | if (err) { | ||
291 | printk(KERN_ERR "%s: hw_unavailable on orinoco_pci_suspend\n", | ||
292 | dev->name); | ||
293 | return err; | ||
294 | } | ||
295 | |||
296 | err = __orinoco_down(dev); | ||
297 | if (err) | ||
298 | printk(KERN_WARNING "%s: orinoco_pci_suspend(): Error %d downing interface\n", | ||
299 | dev->name, err); | ||
300 | |||
301 | netif_device_detach(dev); | ||
302 | |||
303 | priv->hw_unavailable++; | ||
304 | |||
305 | orinoco_unlock(priv, &flags); | ||
306 | |||
307 | pci_save_state(pdev); | ||
308 | pci_set_power_state(pdev, PCI_D3hot); | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static int orinoco_pci_resume(struct pci_dev *pdev) | ||
314 | { | ||
315 | struct net_device *dev = pci_get_drvdata(pdev); | ||
316 | struct orinoco_private *priv = netdev_priv(dev); | ||
317 | unsigned long flags; | ||
318 | int err; | ||
319 | |||
320 | printk(KERN_DEBUG "%s: Orinoco-PCI waking up\n", dev->name); | ||
321 | |||
322 | pci_set_power_state(pdev, 0); | ||
323 | pci_restore_state(pdev); | ||
324 | |||
325 | err = orinoco_reinit_firmware(dev); | ||
326 | if (err) { | ||
327 | printk(KERN_ERR "%s: Error %d re-initializing firmware on orinoco_pci_resume()\n", | ||
328 | dev->name, err); | ||
329 | return err; | ||
330 | } | ||
331 | |||
332 | spin_lock_irqsave(&priv->lock, flags); | ||
333 | |||
334 | netif_device_attach(dev); | ||
335 | |||
336 | priv->hw_unavailable--; | ||
337 | |||
338 | if (priv->open && (! priv->hw_unavailable)) { | ||
339 | err = __orinoco_up(dev); | ||
340 | if (err) | ||
341 | printk(KERN_ERR "%s: Error %d restarting card on orinoco_pci_resume()\n", | ||
342 | dev->name, err); | ||
343 | } | ||
344 | |||
345 | spin_unlock_irqrestore(&priv->lock, flags); | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | static struct pci_device_id orinoco_pci_pci_id_table[] = { | ||
351 | /* Intersil Prism 3 */ | 217 | /* Intersil Prism 3 */ |
352 | {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,}, | 218 | {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,}, |
353 | /* Intersil Prism 2.5 */ | 219 | /* Intersil Prism 2.5 */ |
@@ -357,11 +223,11 @@ static struct pci_device_id orinoco_pci_pci_id_table[] = { | |||
357 | {0,}, | 223 | {0,}, |
358 | }; | 224 | }; |
359 | 225 | ||
360 | MODULE_DEVICE_TABLE(pci, orinoco_pci_pci_id_table); | 226 | MODULE_DEVICE_TABLE(pci, orinoco_pci_id_table); |
361 | 227 | ||
362 | static struct pci_driver orinoco_pci_driver = { | 228 | static struct pci_driver orinoco_pci_driver = { |
363 | .name = DRIVER_NAME, | 229 | .name = DRIVER_NAME, |
364 | .id_table = orinoco_pci_pci_id_table, | 230 | .id_table = orinoco_pci_id_table, |
365 | .probe = orinoco_pci_init_one, | 231 | .probe = orinoco_pci_init_one, |
366 | .remove = __devexit_p(orinoco_pci_remove_one), | 232 | .remove = __devexit_p(orinoco_pci_remove_one), |
367 | .suspend = orinoco_pci_suspend, | 233 | .suspend = orinoco_pci_suspend, |
diff --git a/drivers/net/wireless/orinoco_pci.h b/drivers/net/wireless/orinoco_pci.h new file mode 100644 index 000000000000..7eb1e08113e0 --- /dev/null +++ b/drivers/net/wireless/orinoco_pci.h | |||
@@ -0,0 +1,104 @@ | |||
1 | /* orinoco_pci.h | ||
2 | * | ||
3 | * Common code for all Orinoco drivers for PCI devices, including | ||
4 | * both native PCI and PCMCIA-to-PCI bridges. | ||
5 | * | ||
6 | * Copyright (C) 2005, Pavel Roskin. | ||
7 | * See orinoco.c for license. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ORINOCO_PCI_H | ||
11 | #define _ORINOCO_PCI_H | ||
12 | |||
13 | #include <linux/netdevice.h> | ||
14 | |||
15 | /* Driver specific data */ | ||
16 | struct orinoco_pci_card { | ||
17 | void __iomem *bridge_io; | ||
18 | void __iomem *attr_io; | ||
19 | }; | ||
20 | |||
21 | #ifdef CONFIG_PM | ||
22 | static int orinoco_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
23 | { | ||
24 | struct net_device *dev = pci_get_drvdata(pdev); | ||
25 | struct orinoco_private *priv = netdev_priv(dev); | ||
26 | unsigned long flags; | ||
27 | int err; | ||
28 | |||
29 | err = orinoco_lock(priv, &flags); | ||
30 | if (err) { | ||
31 | printk(KERN_ERR "%s: cannot lock hardware for suspend\n", | ||
32 | dev->name); | ||
33 | return err; | ||
34 | } | ||
35 | |||
36 | err = __orinoco_down(dev); | ||
37 | if (err) | ||
38 | printk(KERN_WARNING "%s: error %d bringing interface down " | ||
39 | "for suspend\n", dev->name, err); | ||
40 | |||
41 | netif_device_detach(dev); | ||
42 | |||
43 | priv->hw_unavailable++; | ||
44 | |||
45 | orinoco_unlock(priv, &flags); | ||
46 | |||
47 | free_irq(pdev->irq, dev); | ||
48 | pci_save_state(pdev); | ||
49 | pci_disable_device(pdev); | ||
50 | pci_set_power_state(pdev, PCI_D3hot); | ||
51 | |||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static int orinoco_pci_resume(struct pci_dev *pdev) | ||
56 | { | ||
57 | struct net_device *dev = pci_get_drvdata(pdev); | ||
58 | struct orinoco_private *priv = netdev_priv(dev); | ||
59 | unsigned long flags; | ||
60 | int err; | ||
61 | |||
62 | pci_set_power_state(pdev, 0); | ||
63 | pci_enable_device(pdev); | ||
64 | pci_restore_state(pdev); | ||
65 | |||
66 | err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, | ||
67 | dev->name, dev); | ||
68 | if (err) { | ||
69 | printk(KERN_ERR "%s: cannot re-allocate IRQ on resume\n", | ||
70 | dev->name); | ||
71 | pci_disable_device(pdev); | ||
72 | return -EBUSY; | ||
73 | } | ||
74 | |||
75 | err = orinoco_reinit_firmware(dev); | ||
76 | if (err) { | ||
77 | printk(KERN_ERR "%s: error %d re-initializing firmware " | ||
78 | "on resume\n", dev->name, err); | ||
79 | return err; | ||
80 | } | ||
81 | |||
82 | spin_lock_irqsave(&priv->lock, flags); | ||
83 | |||
84 | netif_device_attach(dev); | ||
85 | |||
86 | priv->hw_unavailable--; | ||
87 | |||
88 | if (priv->open && (! priv->hw_unavailable)) { | ||
89 | err = __orinoco_up(dev); | ||
90 | if (err) | ||
91 | printk(KERN_ERR "%s: Error %d restarting card on resume\n", | ||
92 | dev->name, err); | ||
93 | } | ||
94 | |||
95 | spin_unlock_irqrestore(&priv->lock, flags); | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | #else | ||
100 | #define orinoco_pci_suspend NULL | ||
101 | #define orinoco_pci_resume NULL | ||
102 | #endif | ||
103 | |||
104 | #endif /* _ORINOCO_PCI_H */ | ||
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c index 210e73776545..84f696c77551 100644 --- a/drivers/net/wireless/orinoco_plx.c +++ b/drivers/net/wireless/orinoco_plx.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * Driver for Prism II devices which would usually be driven by orinoco_cs, | 3 | * Driver for Prism II devices which would usually be driven by orinoco_cs, |
4 | * but are connected to the PCI bus by a PLX9052. | 4 | * but are connected to the PCI bus by a PLX9052. |
5 | * | 5 | * |
6 | * Current maintainers (as of 29 September 2003) are: | 6 | * Current maintainers are: |
7 | * Pavel Roskin <proski AT gnu.org> | 7 | * Pavel Roskin <proski AT gnu.org> |
8 | * and David Gibson <hermes AT gibson.dropbear.id.au> | 8 | * and David Gibson <hermes AT gibson.dropbear.id.au> |
9 | * | 9 | * |
@@ -30,38 +30,18 @@ | |||
30 | * other provisions required by the GPL. If you do not delete the | 30 | * other provisions required by the GPL. If you do not delete the |
31 | * provisions above, a recipient may use your version of this file | 31 | * provisions above, a recipient may use your version of this file |
32 | * under either the MPL or the GPL. | 32 | * under either the MPL or the GPL. |
33 | |||
34 | * Caution: this is experimental and probably buggy. For success and | ||
35 | * failure reports for different cards and adaptors, see | ||
36 | * orinoco_plx_pci_id_table near the end of the file. If you have a | ||
37 | * card we don't have the PCI id for, and looks like it should work, | ||
38 | * drop me mail with the id and "it works"/"it doesn't work". | ||
39 | * | ||
40 | * Note: if everything gets detected fine but it doesn't actually send | ||
41 | * or receive packets, your first port of call should probably be to | ||
42 | * try newer firmware in the card. Especially if you're doing Ad-Hoc | ||
43 | * modes. | ||
44 | * | ||
45 | * The actual driving is done by orinoco.c, this is just resource | ||
46 | * allocation stuff. The explanation below is courtesy of Ryan Niemi | ||
47 | * on the linux-wlan-ng list at | ||
48 | * http://archives.neohapsis.com/archives/dev/linux-wlan/2001-q1/0026.html | ||
49 | * | 33 | * |
50 | * The PLX9052-based cards (WL11000 and several others) are a | 34 | * Here's the general details on how the PLX9052 adapter works: |
51 | * different beast than the usual PCMCIA-based PRISM2 configuration | ||
52 | * expected by wlan-ng. Here's the general details on how the WL11000 | ||
53 | * PCI adapter works: | ||
54 | * | 35 | * |
55 | * - Two PCI I/O address spaces, one 0x80 long which contains the | 36 | * - Two PCI I/O address spaces, one 0x80 long which contains the |
56 | * PLX9052 registers, and one that's 0x40 long mapped to the PCMCIA | 37 | * PLX9052 registers, and one that's 0x40 long mapped to the PCMCIA |
57 | * slot I/O address space. | 38 | * slot I/O address space. |
58 | * | 39 | * |
59 | * - One PCI memory address space, mapped to the PCMCIA memory space | 40 | * - One PCI memory address space, mapped to the PCMCIA attribute space |
60 | * (containing the CIS). | 41 | * (containing the CIS). |
61 | * | 42 | * |
62 | * After identifying the I/O and memory space, you can read through | 43 | * Using the later, you can read through the CIS data to make sure the |
63 | * the memory space to confirm the CIS's device ID or manufacturer ID | 44 | * card is compatible with the driver. Keep in mind that the PCMCIA |
64 | * to make sure it's the expected card. qKeep in mind that the PCMCIA | ||
65 | * spec specifies the CIS as the lower 8 bits of each word read from | 45 | * spec specifies the CIS as the lower 8 bits of each word read from |
66 | * the CIS, so to read the bytes of the CIS, read every other byte | 46 | * the CIS, so to read the bytes of the CIS, read every other byte |
67 | * (0,2,4,...). Passing that test, you need to enable the I/O address | 47 | * (0,2,4,...). Passing that test, you need to enable the I/O address |
@@ -71,7 +51,7 @@ | |||
71 | * within the PCI memory space. Write 0x41 to the COR register to | 51 | * within the PCI memory space. Write 0x41 to the COR register to |
72 | * enable I/O mode and to select level triggered interrupts. To | 52 | * enable I/O mode and to select level triggered interrupts. To |
73 | * confirm you actually succeeded, read the COR register back and make | 53 | * confirm you actually succeeded, read the COR register back and make |
74 | * sure it actually got set to 0x41, incase you have an unexpected | 54 | * sure it actually got set to 0x41, in case you have an unexpected |
75 | * card inserted. | 55 | * card inserted. |
76 | * | 56 | * |
77 | * Following that, you can treat the second PCI I/O address space (the | 57 | * Following that, you can treat the second PCI I/O address space (the |
@@ -101,16 +81,6 @@ | |||
101 | * that, I've hot-swapped a number of times during debugging and | 81 | * that, I've hot-swapped a number of times during debugging and |
102 | * driver development for various reasons (stuck WAIT# line after the | 82 | * driver development for various reasons (stuck WAIT# line after the |
103 | * radio card's firmware locks up). | 83 | * radio card's firmware locks up). |
104 | * | ||
105 | * Hope this is enough info for someone to add PLX9052 support to the | ||
106 | * wlan-ng card. In the case of the WL11000, the PCI ID's are | ||
107 | * 0x1639/0x0200, with matching subsystem ID's. Other PLX9052-based | ||
108 | * manufacturers other than Eumitcom (or on cards other than the | ||
109 | * WL11000) may have different PCI ID's. | ||
110 | * | ||
111 | * If anyone needs any more specific info, let me know. I haven't had | ||
112 | * time to implement support myself yet, and with the way things are | ||
113 | * going, might not have time for a while.. | ||
114 | */ | 84 | */ |
115 | 85 | ||
116 | #define DRIVER_NAME "orinoco_plx" | 86 | #define DRIVER_NAME "orinoco_plx" |
@@ -125,6 +95,7 @@ | |||
125 | #include <pcmcia/cisreg.h> | 95 | #include <pcmcia/cisreg.h> |
126 | 96 | ||
127 | #include "orinoco.h" | 97 | #include "orinoco.h" |
98 | #include "orinoco_pci.h" | ||
128 | 99 | ||
129 | #define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */ | 100 | #define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */ |
130 | #define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ | 101 | #define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ |
@@ -134,30 +105,20 @@ | |||
134 | #define PLX_INTCSR 0x4c /* Interrupt Control & Status Register */ | 105 | #define PLX_INTCSR 0x4c /* Interrupt Control & Status Register */ |
135 | #define PLX_INTCSR_INTEN (1<<6) /* Interrupt Enable bit */ | 106 | #define PLX_INTCSR_INTEN (1<<6) /* Interrupt Enable bit */ |
136 | 107 | ||
137 | static const u8 cis_magic[] = { | ||
138 | 0x01, 0x03, 0x00, 0x00, 0xff, 0x17, 0x04, 0x67 | ||
139 | }; | ||
140 | |||
141 | /* Orinoco PLX specific data */ | ||
142 | struct orinoco_plx_card { | ||
143 | void __iomem *attr_mem; | ||
144 | }; | ||
145 | |||
146 | /* | 108 | /* |
147 | * Do a soft reset of the card using the Configuration Option Register | 109 | * Do a soft reset of the card using the Configuration Option Register |
148 | */ | 110 | */ |
149 | static int orinoco_plx_cor_reset(struct orinoco_private *priv) | 111 | static int orinoco_plx_cor_reset(struct orinoco_private *priv) |
150 | { | 112 | { |
151 | hermes_t *hw = &priv->hw; | 113 | hermes_t *hw = &priv->hw; |
152 | struct orinoco_plx_card *card = priv->card; | 114 | struct orinoco_pci_card *card = priv->card; |
153 | u8 __iomem *attr_mem = card->attr_mem; | ||
154 | unsigned long timeout; | 115 | unsigned long timeout; |
155 | u16 reg; | 116 | u16 reg; |
156 | 117 | ||
157 | writeb(COR_VALUE | COR_RESET, attr_mem + COR_OFFSET); | 118 | iowrite8(COR_VALUE | COR_RESET, card->attr_io + COR_OFFSET); |
158 | mdelay(1); | 119 | mdelay(1); |
159 | 120 | ||
160 | writeb(COR_VALUE, attr_mem + COR_OFFSET); | 121 | iowrite8(COR_VALUE, card->attr_io + COR_OFFSET); |
161 | mdelay(1); | 122 | mdelay(1); |
162 | 123 | ||
163 | /* Just in case, wait more until the card is no longer busy */ | 124 | /* Just in case, wait more until the card is no longer busy */ |
@@ -168,7 +129,7 @@ static int orinoco_plx_cor_reset(struct orinoco_private *priv) | |||
168 | reg = hermes_read_regn(hw, CMD); | 129 | reg = hermes_read_regn(hw, CMD); |
169 | } | 130 | } |
170 | 131 | ||
171 | /* Did we timeout ? */ | 132 | /* Still busy? */ |
172 | if (reg & HERMES_CMD_BUSY) { | 133 | if (reg & HERMES_CMD_BUSY) { |
173 | printk(KERN_ERR PFX "Busy timeout\n"); | 134 | printk(KERN_ERR PFX "Busy timeout\n"); |
174 | return -ETIMEDOUT; | 135 | return -ETIMEDOUT; |
@@ -177,20 +138,55 @@ static int orinoco_plx_cor_reset(struct orinoco_private *priv) | |||
177 | return 0; | 138 | return 0; |
178 | } | 139 | } |
179 | 140 | ||
141 | static int orinoco_plx_hw_init(struct orinoco_pci_card *card) | ||
142 | { | ||
143 | int i; | ||
144 | u32 csr_reg; | ||
145 | static const u8 cis_magic[] = { | ||
146 | 0x01, 0x03, 0x00, 0x00, 0xff, 0x17, 0x04, 0x67 | ||
147 | }; | ||
148 | |||
149 | printk(KERN_DEBUG PFX "CIS: "); | ||
150 | for (i = 0; i < 16; i++) { | ||
151 | printk("%02X:", ioread8(card->attr_io + (i << 1))); | ||
152 | } | ||
153 | printk("\n"); | ||
154 | |||
155 | /* Verify whether a supported PC card is present */ | ||
156 | /* FIXME: we probably need to be smarted about this */ | ||
157 | for (i = 0; i < sizeof(cis_magic); i++) { | ||
158 | if (cis_magic[i] != ioread8(card->attr_io + (i << 1))) { | ||
159 | printk(KERN_ERR PFX "The CIS value of Prism2 PC " | ||
160 | "card is unexpected\n"); | ||
161 | return -ENODEV; | ||
162 | } | ||
163 | } | ||
164 | |||
165 | /* bjoern: We need to tell the card to enable interrupts, in | ||
166 | case the serial eprom didn't do this already. See the | ||
167 | PLX9052 data book, p8-1 and 8-24 for reference. */ | ||
168 | csr_reg = ioread32(card->bridge_io + PLX_INTCSR); | ||
169 | if (!(csr_reg & PLX_INTCSR_INTEN)) { | ||
170 | csr_reg |= PLX_INTCSR_INTEN; | ||
171 | iowrite32(csr_reg, card->bridge_io + PLX_INTCSR); | ||
172 | csr_reg = ioread32(card->bridge_io + PLX_INTCSR); | ||
173 | if (!(csr_reg & PLX_INTCSR_INTEN)) { | ||
174 | printk(KERN_ERR PFX "Cannot enable interrupts\n"); | ||
175 | return -EIO; | ||
176 | } | ||
177 | } | ||
178 | |||
179 | return 0; | ||
180 | } | ||
180 | 181 | ||
181 | static int orinoco_plx_init_one(struct pci_dev *pdev, | 182 | static int orinoco_plx_init_one(struct pci_dev *pdev, |
182 | const struct pci_device_id *ent) | 183 | const struct pci_device_id *ent) |
183 | { | 184 | { |
184 | int err = 0; | 185 | int err; |
185 | u8 __iomem *attr_mem = NULL; | 186 | struct orinoco_private *priv; |
186 | u32 csr_reg, plx_addr; | 187 | struct orinoco_pci_card *card; |
187 | struct orinoco_private *priv = NULL; | 188 | struct net_device *dev; |
188 | struct orinoco_plx_card *card; | 189 | void __iomem *hermes_io, *attr_io, *bridge_io; |
189 | unsigned long pccard_ioaddr = 0; | ||
190 | unsigned long pccard_iolen = 0; | ||
191 | struct net_device *dev = NULL; | ||
192 | void __iomem *mem; | ||
193 | int i; | ||
194 | 190 | ||
195 | err = pci_enable_device(pdev); | 191 | err = pci_enable_device(pdev); |
196 | if (err) { | 192 | if (err) { |
@@ -199,30 +195,30 @@ static int orinoco_plx_init_one(struct pci_dev *pdev, | |||
199 | } | 195 | } |
200 | 196 | ||
201 | err = pci_request_regions(pdev, DRIVER_NAME); | 197 | err = pci_request_regions(pdev, DRIVER_NAME); |
202 | if (err != 0) { | 198 | if (err) { |
203 | printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); | 199 | printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); |
204 | goto fail_resources; | 200 | goto fail_resources; |
205 | } | 201 | } |
206 | 202 | ||
207 | /* Resource 1 is mapped to PLX-specific registers */ | 203 | bridge_io = pci_iomap(pdev, 1, 0); |
208 | plx_addr = pci_resource_start(pdev, 1); | 204 | if (!bridge_io) { |
205 | printk(KERN_ERR PFX "Cannot map bridge registers\n"); | ||
206 | err = -EIO; | ||
207 | goto fail_map_bridge; | ||
208 | } | ||
209 | 209 | ||
210 | /* Resource 2 is mapped to the PCMCIA attribute memory */ | 210 | attr_io = pci_iomap(pdev, 2, 0); |
211 | attr_mem = ioremap(pci_resource_start(pdev, 2), | 211 | if (!attr_io) { |
212 | pci_resource_len(pdev, 2)); | 212 | printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n"); |
213 | if (!attr_mem) { | 213 | err = -EIO; |
214 | printk(KERN_ERR PFX "Cannot remap PCMCIA space\n"); | ||
215 | goto fail_map_attr; | 214 | goto fail_map_attr; |
216 | } | 215 | } |
217 | 216 | ||
218 | /* Resource 3 is mapped to the PCMCIA I/O address space */ | 217 | hermes_io = pci_iomap(pdev, 3, 0); |
219 | pccard_ioaddr = pci_resource_start(pdev, 3); | 218 | if (!hermes_io) { |
220 | pccard_iolen = pci_resource_len(pdev, 3); | 219 | printk(KERN_ERR PFX "Cannot map chipset registers\n"); |
221 | 220 | err = -EIO; | |
222 | mem = pci_iomap(pdev, 3, 0); | 221 | goto fail_map_hermes; |
223 | if (!mem) { | ||
224 | err = -ENOMEM; | ||
225 | goto fail_map_io; | ||
226 | } | 222 | } |
227 | 223 | ||
228 | /* Allocate network device */ | 224 | /* Allocate network device */ |
@@ -235,16 +231,12 @@ static int orinoco_plx_init_one(struct pci_dev *pdev, | |||
235 | 231 | ||
236 | priv = netdev_priv(dev); | 232 | priv = netdev_priv(dev); |
237 | card = priv->card; | 233 | card = priv->card; |
238 | card->attr_mem = attr_mem; | 234 | card->bridge_io = bridge_io; |
239 | dev->base_addr = pccard_ioaddr; | 235 | card->attr_io = attr_io; |
240 | SET_MODULE_OWNER(dev); | 236 | SET_MODULE_OWNER(dev); |
241 | SET_NETDEV_DEV(dev, &pdev->dev); | 237 | SET_NETDEV_DEV(dev, &pdev->dev); |
242 | 238 | ||
243 | hermes_struct_init(&priv->hw, mem, HERMES_16BIT_REGSPACING); | 239 | hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); |
244 | |||
245 | printk(KERN_DEBUG PFX "Detected Orinoco/Prism2 PLX device " | ||
246 | "at %s irq:%d, io addr:0x%lx\n", pci_name(pdev), pdev->irq, | ||
247 | pccard_ioaddr); | ||
248 | 240 | ||
249 | err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, | 241 | err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, |
250 | dev->name, dev); | 242 | dev->name, dev); |
@@ -253,20 +245,11 @@ static int orinoco_plx_init_one(struct pci_dev *pdev, | |||
253 | err = -EBUSY; | 245 | err = -EBUSY; |
254 | goto fail_irq; | 246 | goto fail_irq; |
255 | } | 247 | } |
256 | dev->irq = pdev->irq; | ||
257 | 248 | ||
258 | /* bjoern: We need to tell the card to enable interrupts, in | 249 | err = orinoco_plx_hw_init(card); |
259 | case the serial eprom didn't do this already. See the | 250 | if (err) { |
260 | PLX9052 data book, p8-1 and 8-24 for reference. */ | 251 | printk(KERN_ERR PFX "Hardware initialization failed\n"); |
261 | csr_reg = inl(plx_addr + PLX_INTCSR); | 252 | goto fail; |
262 | if (!(csr_reg & PLX_INTCSR_INTEN)) { | ||
263 | csr_reg |= PLX_INTCSR_INTEN; | ||
264 | outl(csr_reg, plx_addr + PLX_INTCSR); | ||
265 | csr_reg = inl(plx_addr + PLX_INTCSR); | ||
266 | if (!(csr_reg & PLX_INTCSR_INTEN)) { | ||
267 | printk(KERN_ERR PFX "Cannot enable interrupts\n"); | ||
268 | goto fail; | ||
269 | } | ||
270 | } | 253 | } |
271 | 254 | ||
272 | err = orinoco_plx_cor_reset(priv); | 255 | err = orinoco_plx_cor_reset(priv); |
@@ -275,23 +258,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev, | |||
275 | goto fail; | 258 | goto fail; |
276 | } | 259 | } |
277 | 260 | ||
278 | printk(KERN_DEBUG PFX "CIS: "); | ||
279 | for (i = 0; i < 16; i++) { | ||
280 | printk("%02X:", readb(attr_mem + 2*i)); | ||
281 | } | ||
282 | printk("\n"); | ||
283 | |||
284 | /* Verify whether a supported PC card is present */ | ||
285 | /* FIXME: we probably need to be smarted about this */ | ||
286 | for (i = 0; i < sizeof(cis_magic); i++) { | ||
287 | if (cis_magic[i] != readb(attr_mem +2*i)) { | ||
288 | printk(KERN_ERR PFX "The CIS value of Prism2 PC " | ||
289 | "card is unexpected\n"); | ||
290 | err = -EIO; | ||
291 | goto fail; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | err = register_netdev(dev); | 261 | err = register_netdev(dev); |
296 | if (err) { | 262 | if (err) { |
297 | printk(KERN_ERR PFX "Cannot register network device\n"); | 263 | printk(KERN_ERR PFX "Cannot register network device\n"); |
@@ -299,6 +265,8 @@ static int orinoco_plx_init_one(struct pci_dev *pdev, | |||
299 | } | 265 | } |
300 | 266 | ||
301 | pci_set_drvdata(pdev, dev); | 267 | pci_set_drvdata(pdev, dev); |
268 | printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name, | ||
269 | pci_name(pdev)); | ||
302 | 270 | ||
303 | return 0; | 271 | return 0; |
304 | 272 | ||
@@ -310,12 +278,15 @@ static int orinoco_plx_init_one(struct pci_dev *pdev, | |||
310 | free_orinocodev(dev); | 278 | free_orinocodev(dev); |
311 | 279 | ||
312 | fail_alloc: | 280 | fail_alloc: |
313 | pci_iounmap(pdev, mem); | 281 | pci_iounmap(pdev, hermes_io); |
314 | 282 | ||
315 | fail_map_io: | 283 | fail_map_hermes: |
316 | iounmap(attr_mem); | 284 | pci_iounmap(pdev, attr_io); |
317 | 285 | ||
318 | fail_map_attr: | 286 | fail_map_attr: |
287 | pci_iounmap(pdev, bridge_io); | ||
288 | |||
289 | fail_map_bridge: | ||
319 | pci_release_regions(pdev); | 290 | pci_release_regions(pdev); |
320 | 291 | ||
321 | fail_resources: | 292 | fail_resources: |
@@ -328,23 +299,20 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev) | |||
328 | { | 299 | { |
329 | struct net_device *dev = pci_get_drvdata(pdev); | 300 | struct net_device *dev = pci_get_drvdata(pdev); |
330 | struct orinoco_private *priv = netdev_priv(dev); | 301 | struct orinoco_private *priv = netdev_priv(dev); |
331 | struct orinoco_plx_card *card = priv->card; | 302 | struct orinoco_pci_card *card = priv->card; |
332 | u8 __iomem *attr_mem = card->attr_mem; | ||
333 | |||
334 | BUG_ON(! dev); | ||
335 | 303 | ||
336 | unregister_netdev(dev); | 304 | unregister_netdev(dev); |
337 | free_irq(dev->irq, dev); | 305 | free_irq(pdev->irq, dev); |
338 | pci_set_drvdata(pdev, NULL); | 306 | pci_set_drvdata(pdev, NULL); |
339 | free_orinocodev(dev); | 307 | free_orinocodev(dev); |
340 | pci_iounmap(pdev, priv->hw.iobase); | 308 | pci_iounmap(pdev, priv->hw.iobase); |
341 | iounmap(attr_mem); | 309 | pci_iounmap(pdev, card->attr_io); |
310 | pci_iounmap(pdev, card->bridge_io); | ||
342 | pci_release_regions(pdev); | 311 | pci_release_regions(pdev); |
343 | pci_disable_device(pdev); | 312 | pci_disable_device(pdev); |
344 | } | 313 | } |
345 | 314 | ||
346 | 315 | static struct pci_device_id orinoco_plx_id_table[] = { | |
347 | static struct pci_device_id orinoco_plx_pci_id_table[] = { | ||
348 | {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */ | 316 | {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */ |
349 | {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */ | 317 | {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */ |
350 | {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */ | 318 | {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */ |
@@ -362,13 +330,15 @@ static struct pci_device_id orinoco_plx_pci_id_table[] = { | |||
362 | {0,}, | 330 | {0,}, |
363 | }; | 331 | }; |
364 | 332 | ||
365 | MODULE_DEVICE_TABLE(pci, orinoco_plx_pci_id_table); | 333 | MODULE_DEVICE_TABLE(pci, orinoco_plx_id_table); |
366 | 334 | ||
367 | static struct pci_driver orinoco_plx_driver = { | 335 | static struct pci_driver orinoco_plx_driver = { |
368 | .name = DRIVER_NAME, | 336 | .name = DRIVER_NAME, |
369 | .id_table = orinoco_plx_pci_id_table, | 337 | .id_table = orinoco_plx_id_table, |
370 | .probe = orinoco_plx_init_one, | 338 | .probe = orinoco_plx_init_one, |
371 | .remove = __devexit_p(orinoco_plx_remove_one), | 339 | .remove = __devexit_p(orinoco_plx_remove_one), |
340 | .suspend = orinoco_pci_suspend, | ||
341 | .resume = orinoco_pci_resume, | ||
372 | }; | 342 | }; |
373 | 343 | ||
374 | static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION | 344 | static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION |
@@ -388,7 +358,6 @@ static int __init orinoco_plx_init(void) | |||
388 | static void __exit orinoco_plx_exit(void) | 358 | static void __exit orinoco_plx_exit(void) |
389 | { | 359 | { |
390 | pci_unregister_driver(&orinoco_plx_driver); | 360 | pci_unregister_driver(&orinoco_plx_driver); |
391 | ssleep(1); | ||
392 | } | 361 | } |
393 | 362 | ||
394 | module_init(orinoco_plx_init); | 363 | module_init(orinoco_plx_init); |
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c index 5e68b7026186..d2b4decb7a7d 100644 --- a/drivers/net/wireless/orinoco_tmd.c +++ b/drivers/net/wireless/orinoco_tmd.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* orinoco_tmd.c | 1 | /* orinoco_tmd.c |
2 | * | 2 | * |
3 | * Driver for Prism II devices which would usually be driven by orinoco_cs, | 3 | * Driver for Prism II devices which would usually be driven by orinoco_cs, |
4 | * but are connected to the PCI bus by a TMD7160. | 4 | * but are connected to the PCI bus by a TMD7160. |
5 | * | 5 | * |
@@ -26,25 +26,13 @@ | |||
26 | * other provisions required by the GPL. If you do not delete the | 26 | * other provisions required by the GPL. If you do not delete the |
27 | * provisions above, a recipient may use your version of this file | 27 | * provisions above, a recipient may use your version of this file |
28 | * under either the MPL or the GPL. | 28 | * under either the MPL or the GPL. |
29 | |||
30 | * Caution: this is experimental and probably buggy. For success and | ||
31 | * failure reports for different cards and adaptors, see | ||
32 | * orinoco_tmd_pci_id_table near the end of the file. If you have a | ||
33 | * card we don't have the PCI id for, and looks like it should work, | ||
34 | * drop me mail with the id and "it works"/"it doesn't work". | ||
35 | * | ||
36 | * Note: if everything gets detected fine but it doesn't actually send | ||
37 | * or receive packets, your first port of call should probably be to | ||
38 | * try newer firmware in the card. Especially if you're doing Ad-Hoc | ||
39 | * modes | ||
40 | * | 29 | * |
41 | * The actual driving is done by orinoco.c, this is just resource | 30 | * The actual driving is done by orinoco.c, this is just resource |
42 | * allocation stuff. | 31 | * allocation stuff. |
43 | * | 32 | * |
44 | * This driver is modeled after the orinoco_plx driver. The main | 33 | * This driver is modeled after the orinoco_plx driver. The main |
45 | * difference is that the TMD chip has only IO port ranges and no | 34 | * difference is that the TMD chip has only IO port ranges and doesn't |
46 | * memory space, i.e. no access to the CIS. Compared to the PLX chip, | 35 | * provide access to the PCMCIA attribute space. |
47 | * the io range functionalities are exchanged. | ||
48 | * | 36 | * |
49 | * Pheecom sells cards with the TMD chip as "ASIC version" | 37 | * Pheecom sells cards with the TMD chip as "ASIC version" |
50 | */ | 38 | */ |
@@ -61,32 +49,26 @@ | |||
61 | #include <pcmcia/cisreg.h> | 49 | #include <pcmcia/cisreg.h> |
62 | 50 | ||
63 | #include "orinoco.h" | 51 | #include "orinoco.h" |
52 | #include "orinoco_pci.h" | ||
64 | 53 | ||
65 | #define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ | 54 | #define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ |
66 | #define COR_RESET (0x80) /* reset bit in the COR register */ | 55 | #define COR_RESET (0x80) /* reset bit in the COR register */ |
67 | #define TMD_RESET_TIME (500) /* milliseconds */ | 56 | #define TMD_RESET_TIME (500) /* milliseconds */ |
68 | 57 | ||
69 | /* Orinoco TMD specific data */ | ||
70 | struct orinoco_tmd_card { | ||
71 | u32 tmd_io; | ||
72 | }; | ||
73 | |||
74 | |||
75 | /* | 58 | /* |
76 | * Do a soft reset of the card using the Configuration Option Register | 59 | * Do a soft reset of the card using the Configuration Option Register |
77 | */ | 60 | */ |
78 | static int orinoco_tmd_cor_reset(struct orinoco_private *priv) | 61 | static int orinoco_tmd_cor_reset(struct orinoco_private *priv) |
79 | { | 62 | { |
80 | hermes_t *hw = &priv->hw; | 63 | hermes_t *hw = &priv->hw; |
81 | struct orinoco_tmd_card *card = priv->card; | 64 | struct orinoco_pci_card *card = priv->card; |
82 | u32 addr = card->tmd_io; | ||
83 | unsigned long timeout; | 65 | unsigned long timeout; |
84 | u16 reg; | 66 | u16 reg; |
85 | 67 | ||
86 | outb(COR_VALUE | COR_RESET, addr); | 68 | iowrite8(COR_VALUE | COR_RESET, card->bridge_io); |
87 | mdelay(1); | 69 | mdelay(1); |
88 | 70 | ||
89 | outb(COR_VALUE, addr); | 71 | iowrite8(COR_VALUE, card->bridge_io); |
90 | mdelay(1); | 72 | mdelay(1); |
91 | 73 | ||
92 | /* Just in case, wait more until the card is no longer busy */ | 74 | /* Just in case, wait more until the card is no longer busy */ |
@@ -97,7 +79,7 @@ static int orinoco_tmd_cor_reset(struct orinoco_private *priv) | |||
97 | reg = hermes_read_regn(hw, CMD); | 79 | reg = hermes_read_regn(hw, CMD); |
98 | } | 80 | } |
99 | 81 | ||
100 | /* Did we timeout ? */ | 82 | /* Still busy? */ |
101 | if (reg & HERMES_CMD_BUSY) { | 83 | if (reg & HERMES_CMD_BUSY) { |
102 | printk(KERN_ERR PFX "Busy timeout\n"); | 84 | printk(KERN_ERR PFX "Busy timeout\n"); |
103 | return -ETIMEDOUT; | 85 | return -ETIMEDOUT; |
@@ -110,11 +92,11 @@ static int orinoco_tmd_cor_reset(struct orinoco_private *priv) | |||
110 | static int orinoco_tmd_init_one(struct pci_dev *pdev, | 92 | static int orinoco_tmd_init_one(struct pci_dev *pdev, |
111 | const struct pci_device_id *ent) | 93 | const struct pci_device_id *ent) |
112 | { | 94 | { |
113 | int err = 0; | 95 | int err; |
114 | struct orinoco_private *priv = NULL; | 96 | struct orinoco_private *priv; |
115 | struct orinoco_tmd_card *card; | 97 | struct orinoco_pci_card *card; |
116 | struct net_device *dev = NULL; | 98 | struct net_device *dev; |
117 | void __iomem *mem; | 99 | void __iomem *hermes_io, *bridge_io; |
118 | 100 | ||
119 | err = pci_enable_device(pdev); | 101 | err = pci_enable_device(pdev); |
120 | if (err) { | 102 | if (err) { |
@@ -123,20 +105,28 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev, | |||
123 | } | 105 | } |
124 | 106 | ||
125 | err = pci_request_regions(pdev, DRIVER_NAME); | 107 | err = pci_request_regions(pdev, DRIVER_NAME); |
126 | if (err != 0) { | 108 | if (err) { |
127 | printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); | 109 | printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); |
128 | goto fail_resources; | 110 | goto fail_resources; |
129 | } | 111 | } |
130 | 112 | ||
131 | mem = pci_iomap(pdev, 2, 0); | 113 | bridge_io = pci_iomap(pdev, 1, 0); |
132 | if (! mem) { | 114 | if (!bridge_io) { |
133 | err = -ENOMEM; | 115 | printk(KERN_ERR PFX "Cannot map bridge registers\n"); |
134 | goto fail_iomap; | 116 | err = -EIO; |
117 | goto fail_map_bridge; | ||
118 | } | ||
119 | |||
120 | hermes_io = pci_iomap(pdev, 2, 0); | ||
121 | if (!hermes_io) { | ||
122 | printk(KERN_ERR PFX "Cannot map chipset registers\n"); | ||
123 | err = -EIO; | ||
124 | goto fail_map_hermes; | ||
135 | } | 125 | } |
136 | 126 | ||
137 | /* Allocate network device */ | 127 | /* Allocate network device */ |
138 | dev = alloc_orinocodev(sizeof(*card), orinoco_tmd_cor_reset); | 128 | dev = alloc_orinocodev(sizeof(*card), orinoco_tmd_cor_reset); |
139 | if (! dev) { | 129 | if (!dev) { |
140 | printk(KERN_ERR PFX "Cannot allocate network device\n"); | 130 | printk(KERN_ERR PFX "Cannot allocate network device\n"); |
141 | err = -ENOMEM; | 131 | err = -ENOMEM; |
142 | goto fail_alloc; | 132 | goto fail_alloc; |
@@ -144,16 +134,11 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev, | |||
144 | 134 | ||
145 | priv = netdev_priv(dev); | 135 | priv = netdev_priv(dev); |
146 | card = priv->card; | 136 | card = priv->card; |
147 | card->tmd_io = pci_resource_start(pdev, 1); | 137 | card->bridge_io = bridge_io; |
148 | dev->base_addr = pci_resource_start(pdev, 2); | ||
149 | SET_MODULE_OWNER(dev); | 138 | SET_MODULE_OWNER(dev); |
150 | SET_NETDEV_DEV(dev, &pdev->dev); | 139 | SET_NETDEV_DEV(dev, &pdev->dev); |
151 | 140 | ||
152 | hermes_struct_init(&priv->hw, mem, HERMES_16BIT_REGSPACING); | 141 | hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); |
153 | |||
154 | printk(KERN_DEBUG PFX "Detected Orinoco/Prism2 TMD device " | ||
155 | "at %s irq:%d, io addr:0x%lx\n", pci_name(pdev), pdev->irq, | ||
156 | dev->base_addr); | ||
157 | 142 | ||
158 | err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, | 143 | err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, |
159 | dev->name, dev); | 144 | dev->name, dev); |
@@ -162,7 +147,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev, | |||
162 | err = -EBUSY; | 147 | err = -EBUSY; |
163 | goto fail_irq; | 148 | goto fail_irq; |
164 | } | 149 | } |
165 | dev->irq = pdev->irq; | ||
166 | 150 | ||
167 | err = orinoco_tmd_cor_reset(priv); | 151 | err = orinoco_tmd_cor_reset(priv); |
168 | if (err) { | 152 | if (err) { |
@@ -177,6 +161,8 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev, | |||
177 | } | 161 | } |
178 | 162 | ||
179 | pci_set_drvdata(pdev, dev); | 163 | pci_set_drvdata(pdev, dev); |
164 | printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name, | ||
165 | pci_name(pdev)); | ||
180 | 166 | ||
181 | return 0; | 167 | return 0; |
182 | 168 | ||
@@ -188,9 +174,12 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev, | |||
188 | free_orinocodev(dev); | 174 | free_orinocodev(dev); |
189 | 175 | ||
190 | fail_alloc: | 176 | fail_alloc: |
191 | pci_iounmap(pdev, mem); | 177 | pci_iounmap(pdev, hermes_io); |
178 | |||
179 | fail_map_hermes: | ||
180 | pci_iounmap(pdev, bridge_io); | ||
192 | 181 | ||
193 | fail_iomap: | 182 | fail_map_bridge: |
194 | pci_release_regions(pdev); | 183 | pci_release_regions(pdev); |
195 | 184 | ||
196 | fail_resources: | 185 | fail_resources: |
@@ -203,31 +192,32 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev) | |||
203 | { | 192 | { |
204 | struct net_device *dev = pci_get_drvdata(pdev); | 193 | struct net_device *dev = pci_get_drvdata(pdev); |
205 | struct orinoco_private *priv = dev->priv; | 194 | struct orinoco_private *priv = dev->priv; |
206 | 195 | struct orinoco_pci_card *card = priv->card; | |
207 | BUG_ON(! dev); | ||
208 | 196 | ||
209 | unregister_netdev(dev); | 197 | unregister_netdev(dev); |
210 | free_irq(dev->irq, dev); | 198 | free_irq(pdev->irq, dev); |
211 | pci_set_drvdata(pdev, NULL); | 199 | pci_set_drvdata(pdev, NULL); |
212 | free_orinocodev(dev); | 200 | free_orinocodev(dev); |
213 | pci_iounmap(pdev, priv->hw.iobase); | 201 | pci_iounmap(pdev, priv->hw.iobase); |
202 | pci_iounmap(pdev, card->bridge_io); | ||
214 | pci_release_regions(pdev); | 203 | pci_release_regions(pdev); |
215 | pci_disable_device(pdev); | 204 | pci_disable_device(pdev); |
216 | } | 205 | } |
217 | 206 | ||
218 | 207 | static struct pci_device_id orinoco_tmd_id_table[] = { | |
219 | static struct pci_device_id orinoco_tmd_pci_id_table[] = { | ||
220 | {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */ | 208 | {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */ |
221 | {0,}, | 209 | {0,}, |
222 | }; | 210 | }; |
223 | 211 | ||
224 | MODULE_DEVICE_TABLE(pci, orinoco_tmd_pci_id_table); | 212 | MODULE_DEVICE_TABLE(pci, orinoco_tmd_id_table); |
225 | 213 | ||
226 | static struct pci_driver orinoco_tmd_driver = { | 214 | static struct pci_driver orinoco_tmd_driver = { |
227 | .name = DRIVER_NAME, | 215 | .name = DRIVER_NAME, |
228 | .id_table = orinoco_tmd_pci_id_table, | 216 | .id_table = orinoco_tmd_id_table, |
229 | .probe = orinoco_tmd_init_one, | 217 | .probe = orinoco_tmd_init_one, |
230 | .remove = __devexit_p(orinoco_tmd_remove_one), | 218 | .remove = __devexit_p(orinoco_tmd_remove_one), |
219 | .suspend = orinoco_pci_suspend, | ||
220 | .resume = orinoco_pci_resume, | ||
231 | }; | 221 | }; |
232 | 222 | ||
233 | static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION | 223 | static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION |
@@ -245,7 +235,6 @@ static int __init orinoco_tmd_init(void) | |||
245 | static void __exit orinoco_tmd_exit(void) | 235 | static void __exit orinoco_tmd_exit(void) |
246 | { | 236 | { |
247 | pci_unregister_driver(&orinoco_tmd_driver); | 237 | pci_unregister_driver(&orinoco_tmd_driver); |
248 | ssleep(1); | ||
249 | } | 238 | } |
250 | 239 | ||
251 | module_init(orinoco_tmd_init); | 240 | module_init(orinoco_tmd_init); |
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c index f7b77ce54d7b..7f9aa139c347 100644 --- a/drivers/net/wireless/spectrum_cs.c +++ b/drivers/net/wireless/spectrum_cs.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Driver for 802.11b cards using RAM-loadable Symbol firmware, such as | 2 | * Driver for 802.11b cards using RAM-loadable Symbol firmware, such as |
3 | * Symbol Wireless Networker LA4100, CompactFlash cards by Socket | 3 | * Symbol Wireless Networker LA4137, CompactFlash cards by Socket |
4 | * Communications and Intel PRO/Wireless 2011B. | 4 | * Communications and Intel PRO/Wireless 2011B. |
5 | * | 5 | * |
6 | * The driver implements Symbol firmware download. The rest is handled | 6 | * The driver implements Symbol firmware download. The rest is handled |
@@ -120,8 +120,8 @@ static void spectrum_cs_release(struct pcmcia_device *link); | |||
120 | * Each block has the following structure. | 120 | * Each block has the following structure. |
121 | */ | 121 | */ |
122 | struct dblock { | 122 | struct dblock { |
123 | __le32 _addr; /* adapter address where to write the block */ | 123 | __le32 addr; /* adapter address where to write the block */ |
124 | __le16 _len; /* length of the data only, in bytes */ | 124 | __le16 len; /* length of the data only, in bytes */ |
125 | char data[0]; /* data to be written */ | 125 | char data[0]; /* data to be written */ |
126 | } __attribute__ ((packed)); | 126 | } __attribute__ ((packed)); |
127 | 127 | ||
@@ -131,9 +131,9 @@ struct dblock { | |||
131 | * items with matching ID should be written. | 131 | * items with matching ID should be written. |
132 | */ | 132 | */ |
133 | struct pdr { | 133 | struct pdr { |
134 | __le32 _id; /* record ID */ | 134 | __le32 id; /* record ID */ |
135 | __le32 _addr; /* adapter address where to write the data */ | 135 | __le32 addr; /* adapter address where to write the data */ |
136 | __le32 _len; /* expected length of the data, in bytes */ | 136 | __le32 len; /* expected length of the data, in bytes */ |
137 | char next[0]; /* next PDR starts here */ | 137 | char next[0]; /* next PDR starts here */ |
138 | } __attribute__ ((packed)); | 138 | } __attribute__ ((packed)); |
139 | 139 | ||
@@ -144,8 +144,8 @@ struct pdr { | |||
144 | * be plugged into the secondary firmware. | 144 | * be plugged into the secondary firmware. |
145 | */ | 145 | */ |
146 | struct pdi { | 146 | struct pdi { |
147 | __le16 _len; /* length of ID and data, in words */ | 147 | __le16 len; /* length of ID and data, in words */ |
148 | __le16 _id; /* record ID */ | 148 | __le16 id; /* record ID */ |
149 | char data[0]; /* plug data */ | 149 | char data[0]; /* plug data */ |
150 | } __attribute__ ((packed)); | 150 | } __attribute__ ((packed)); |
151 | 151 | ||
@@ -154,44 +154,44 @@ struct pdi { | |||
154 | static inline u32 | 154 | static inline u32 |
155 | dblock_addr(const struct dblock *blk) | 155 | dblock_addr(const struct dblock *blk) |
156 | { | 156 | { |
157 | return le32_to_cpu(blk->_addr); | 157 | return le32_to_cpu(blk->addr); |
158 | } | 158 | } |
159 | 159 | ||
160 | static inline u32 | 160 | static inline u32 |
161 | dblock_len(const struct dblock *blk) | 161 | dblock_len(const struct dblock *blk) |
162 | { | 162 | { |
163 | return le16_to_cpu(blk->_len); | 163 | return le16_to_cpu(blk->len); |
164 | } | 164 | } |
165 | 165 | ||
166 | static inline u32 | 166 | static inline u32 |
167 | pdr_id(const struct pdr *pdr) | 167 | pdr_id(const struct pdr *pdr) |
168 | { | 168 | { |
169 | return le32_to_cpu(pdr->_id); | 169 | return le32_to_cpu(pdr->id); |
170 | } | 170 | } |
171 | 171 | ||
172 | static inline u32 | 172 | static inline u32 |
173 | pdr_addr(const struct pdr *pdr) | 173 | pdr_addr(const struct pdr *pdr) |
174 | { | 174 | { |
175 | return le32_to_cpu(pdr->_addr); | 175 | return le32_to_cpu(pdr->addr); |
176 | } | 176 | } |
177 | 177 | ||
178 | static inline u32 | 178 | static inline u32 |
179 | pdr_len(const struct pdr *pdr) | 179 | pdr_len(const struct pdr *pdr) |
180 | { | 180 | { |
181 | return le32_to_cpu(pdr->_len); | 181 | return le32_to_cpu(pdr->len); |
182 | } | 182 | } |
183 | 183 | ||
184 | static inline u32 | 184 | static inline u32 |
185 | pdi_id(const struct pdi *pdi) | 185 | pdi_id(const struct pdi *pdi) |
186 | { | 186 | { |
187 | return le16_to_cpu(pdi->_id); | 187 | return le16_to_cpu(pdi->id); |
188 | } | 188 | } |
189 | 189 | ||
190 | /* Return length of the data only, in bytes */ | 190 | /* Return length of the data only, in bytes */ |
191 | static inline u32 | 191 | static inline u32 |
192 | pdi_len(const struct pdi *pdi) | 192 | pdi_len(const struct pdi *pdi) |
193 | { | 193 | { |
194 | return 2 * (le16_to_cpu(pdi->_len) - 1); | 194 | return 2 * (le16_to_cpu(pdi->len) - 1); |
195 | } | 195 | } |
196 | 196 | ||
197 | 197 | ||
@@ -343,8 +343,7 @@ spectrum_plug_pdi(hermes_t *hw, struct pdr *first_pdr, struct pdi *pdi) | |||
343 | 343 | ||
344 | /* do the actual plugging */ | 344 | /* do the actual plugging */ |
345 | spectrum_aux_setaddr(hw, pdr_addr(pdr)); | 345 | spectrum_aux_setaddr(hw, pdr_addr(pdr)); |
346 | hermes_write_words(hw, HERMES_AUXDATA, pdi->data, | 346 | hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi)); |
347 | pdi_len(pdi) / 2); | ||
348 | 347 | ||
349 | return 0; | 348 | return 0; |
350 | } | 349 | } |
@@ -424,8 +423,8 @@ spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block) | |||
424 | 423 | ||
425 | while (dblock_addr(blk) != BLOCK_END) { | 424 | while (dblock_addr(blk) != BLOCK_END) { |
426 | spectrum_aux_setaddr(hw, blkaddr); | 425 | spectrum_aux_setaddr(hw, blkaddr); |
427 | hermes_write_words(hw, HERMES_AUXDATA, blk->data, | 426 | hermes_write_bytes(hw, HERMES_AUXDATA, blk->data, |
428 | blklen / 2); | 427 | blklen); |
429 | 428 | ||
430 | blk = (struct dblock *) &blk->data[blklen]; | 429 | blk = (struct dblock *) &blk->data[blklen]; |
431 | blkaddr = dblock_addr(blk); | 430 | blkaddr = dblock_addr(blk); |
@@ -626,14 +625,11 @@ static void spectrum_cs_detach(struct pcmcia_device *link) | |||
626 | { | 625 | { |
627 | struct net_device *dev = link->priv; | 626 | struct net_device *dev = link->priv; |
628 | 627 | ||
628 | if (link->dev_node) | ||
629 | unregister_netdev(dev); | ||
630 | |||
629 | spectrum_cs_release(link); | 631 | spectrum_cs_release(link); |
630 | 632 | ||
631 | DEBUG(0, PFX "detach: link=%p link->dev_node=%p\n", link, link->dev_node); | ||
632 | if (link->dev_node) { | ||
633 | DEBUG(0, PFX "About to unregister net device %p\n", | ||
634 | dev); | ||
635 | unregister_netdev(dev); | ||
636 | } | ||
637 | free_orinocodev(dev); | 633 | free_orinocodev(dev); |
638 | } /* spectrum_cs_detach */ | 634 | } /* spectrum_cs_detach */ |
639 | 635 | ||
@@ -653,13 +649,10 @@ spectrum_cs_config(struct pcmcia_device *link) | |||
653 | int last_fn, last_ret; | 649 | int last_fn, last_ret; |
654 | u_char buf[64]; | 650 | u_char buf[64]; |
655 | config_info_t conf; | 651 | config_info_t conf; |
656 | cisinfo_t info; | ||
657 | tuple_t tuple; | 652 | tuple_t tuple; |
658 | cisparse_t parse; | 653 | cisparse_t parse; |
659 | void __iomem *mem; | 654 | void __iomem *mem; |
660 | 655 | ||
661 | CS_CHECK(ValidateCIS, pcmcia_validate_cis(link, &info)); | ||
662 | |||
663 | /* | 656 | /* |
664 | * This reads the card's CONFIG tuple to find its | 657 | * This reads the card's CONFIG tuple to find its |
665 | * configuration registers. | 658 | * configuration registers. |
@@ -709,12 +702,6 @@ spectrum_cs_config(struct pcmcia_device *link) | |||
709 | goto next_entry; | 702 | goto next_entry; |
710 | link->conf.ConfigIndex = cfg->index; | 703 | link->conf.ConfigIndex = cfg->index; |
711 | 704 | ||
712 | /* Does this card need audio output? */ | ||
713 | if (cfg->flags & CISTPL_CFTABLE_AUDIO) { | ||
714 | link->conf.Attributes |= CONF_ENABLE_SPKR; | ||
715 | link->conf.Status = CCSR_AUDIO_ENA; | ||
716 | } | ||
717 | |||
718 | /* Use power settings for Vcc and Vpp if present */ | 705 | /* Use power settings for Vcc and Vpp if present */ |
719 | /* Note that the CIS values need to be rescaled */ | 706 | /* Note that the CIS values need to be rescaled */ |
720 | if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { | 707 | if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { |
@@ -835,19 +822,10 @@ spectrum_cs_config(struct pcmcia_device *link) | |||
835 | net_device has been registered */ | 822 | net_device has been registered */ |
836 | 823 | ||
837 | /* Finally, report what we've done */ | 824 | /* Finally, report what we've done */ |
838 | printk(KERN_DEBUG "%s: index 0x%02x: ", | 825 | printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io " |
839 | dev->name, link->conf.ConfigIndex); | 826 | "0x%04x-0x%04x\n", dev->name, dev->class_dev.dev->bus_id, |
840 | if (link->conf.Vpp) | 827 | link->irq.AssignedIRQ, link->io.BasePort1, |
841 | printk(", Vpp %d.%d", link->conf.Vpp / 10, | 828 | link->io.BasePort1 + link->io.NumPorts1 - 1); |
842 | link->conf.Vpp % 10); | ||
843 | printk(", irq %d", link->irq.AssignedIRQ); | ||
844 | if (link->io.NumPorts1) | ||
845 | printk(", io 0x%04x-0x%04x", link->io.BasePort1, | ||
846 | link->io.BasePort1 + link->io.NumPorts1 - 1); | ||
847 | if (link->io.NumPorts2) | ||
848 | printk(" & 0x%04x-0x%04x", link->io.BasePort2, | ||
849 | link->io.BasePort2 + link->io.NumPorts2 - 1); | ||
850 | printk("\n"); | ||
851 | 829 | ||
852 | return 0; | 830 | return 0; |
853 | 831 | ||
@@ -888,11 +866,10 @@ spectrum_cs_suspend(struct pcmcia_device *link) | |||
888 | { | 866 | { |
889 | struct net_device *dev = link->priv; | 867 | struct net_device *dev = link->priv; |
890 | struct orinoco_private *priv = netdev_priv(dev); | 868 | struct orinoco_private *priv = netdev_priv(dev); |
891 | unsigned long flags; | ||
892 | int err = 0; | 869 | int err = 0; |
893 | 870 | ||
894 | /* Mark the device as stopped, to block IO until later */ | 871 | /* Mark the device as stopped, to block IO until later */ |
895 | spin_lock_irqsave(&priv->lock, flags); | 872 | spin_lock(&priv->lock); |
896 | 873 | ||
897 | err = __orinoco_down(dev); | 874 | err = __orinoco_down(dev); |
898 | if (err) | 875 | if (err) |
@@ -902,9 +879,9 @@ spectrum_cs_suspend(struct pcmcia_device *link) | |||
902 | netif_device_detach(dev); | 879 | netif_device_detach(dev); |
903 | priv->hw_unavailable++; | 880 | priv->hw_unavailable++; |
904 | 881 | ||
905 | spin_unlock_irqrestore(&priv->lock, flags); | 882 | spin_unlock(&priv->lock); |
906 | 883 | ||
907 | return 0; | 884 | return err; |
908 | } | 885 | } |
909 | 886 | ||
910 | static int | 887 | static int |
@@ -932,7 +909,7 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION | |||
932 | " David Gibson <hermes@gibson.dropbear.id.au>, et al)"; | 909 | " David Gibson <hermes@gibson.dropbear.id.au>, et al)"; |
933 | 910 | ||
934 | static struct pcmcia_device_id spectrum_cs_ids[] = { | 911 | static struct pcmcia_device_id spectrum_cs_ids[] = { |
935 | PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4100 */ | 912 | PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4137 */ |
936 | PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */ | 913 | PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */ |
937 | PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */ | 914 | PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */ |
938 | PCMCIA_DEVICE_NULL, | 915 | PCMCIA_DEVICE_NULL, |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 2329f941a0dc..8d107c6c2c70 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -164,7 +164,6 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) | |||
164 | return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap); | 164 | return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap); |
165 | } | 165 | } |
166 | 166 | ||
167 | #if 0 | ||
168 | /** | 167 | /** |
169 | * pci_find_ext_capability - Find an extended capability | 168 | * pci_find_ext_capability - Find an extended capability |
170 | * @dev: PCI device to query | 169 | * @dev: PCI device to query |
@@ -212,7 +211,7 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap) | |||
212 | 211 | ||
213 | return 0; | 212 | return 0; |
214 | } | 213 | } |
215 | #endif /* 0 */ | 214 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); |
216 | 215 | ||
217 | /** | 216 | /** |
218 | * pci_find_parent_resource - return resource region of parent bus of given region | 217 | * pci_find_parent_resource - return resource region of parent bus of given region |
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index 90d4d0ef3dd4..6775a837d646 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # S/390 network devices | 2 | # S/390 network devices |
3 | # | 3 | # |
4 | 4 | ||
5 | ctc-objs := ctcmain.o ctctty.o ctcdbug.o | 5 | ctc-objs := ctcmain.o ctcdbug.o |
6 | 6 | ||
7 | obj-$(CONFIG_IUCV) += iucv.o | 7 | obj-$(CONFIG_IUCV) += iucv.o |
8 | obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o | 8 | obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o |
@@ -10,6 +10,7 @@ obj-$(CONFIG_SMSGIUCV) += smsgiucv.o | |||
10 | obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o | 10 | obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o |
11 | obj-$(CONFIG_LCS) += lcs.o cu3088.o | 11 | obj-$(CONFIG_LCS) += lcs.o cu3088.o |
12 | obj-$(CONFIG_CLAW) += claw.o cu3088.o | 12 | obj-$(CONFIG_CLAW) += claw.o cu3088.o |
13 | obj-$(CONFIG_MPC) += ctcmpc.o fsm.o cu3088.o | ||
13 | qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o | 14 | qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o |
14 | qeth-$(CONFIG_PROC_FS) += qeth_proc.o | 15 | qeth-$(CONFIG_PROC_FS) += qeth_proc.o |
15 | obj-$(CONFIG_QETH) += qeth.o | 16 | obj-$(CONFIG_QETH) += qeth.o |
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index fe986af884f8..20c8eb16f464 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * Fixes by : Jochen Röhrig (roehrig@de.ibm.com) | 6 | * Fixes by : Jochen Röhrig (roehrig@de.ibm.com) |
7 | * Arnaldo Carvalho de Melo <acme@conectiva.com.br> | 7 | * Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
8 | Peter Tiedemann (ptiedem@de.ibm.com) | 8 | Peter Tiedemann (ptiedem@de.ibm.com) |
9 | * Driver Model stuff by : Cornelia Huck <huckc@de.ibm.com> | 9 | * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com> |
10 | * | 10 | * |
11 | * Documentation used: | 11 | * Documentation used: |
12 | * - Principles of Operation (IBM doc#: SA22-7201-06) | 12 | * - Principles of Operation (IBM doc#: SA22-7201-06) |
@@ -65,7 +65,6 @@ | |||
65 | 65 | ||
66 | #include <asm/idals.h> | 66 | #include <asm/idals.h> |
67 | 67 | ||
68 | #include "ctctty.h" | ||
69 | #include "fsm.h" | 68 | #include "fsm.h" |
70 | #include "cu3088.h" | 69 | #include "cu3088.h" |
71 | 70 | ||
@@ -479,10 +478,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
479 | skb->dev = pskb->dev; | 478 | skb->dev = pskb->dev; |
480 | skb->protocol = pskb->protocol; | 479 | skb->protocol = pskb->protocol; |
481 | pskb->ip_summed = CHECKSUM_UNNECESSARY; | 480 | pskb->ip_summed = CHECKSUM_UNNECESSARY; |
482 | if (ch->protocol == CTC_PROTO_LINUX_TTY) | 481 | netif_rx_ni(skb); |
483 | ctc_tty_netif_rx(skb); | ||
484 | else | ||
485 | netif_rx_ni(skb); | ||
486 | /** | 482 | /** |
487 | * Successful rx; reset logflags | 483 | * Successful rx; reset logflags |
488 | */ | 484 | */ |
@@ -557,8 +553,7 @@ ccw_unit_check(struct channel *ch, unsigned char sense) | |||
557 | DBF_TEXT(trace, 5, __FUNCTION__); | 553 | DBF_TEXT(trace, 5, __FUNCTION__); |
558 | if (sense & SNS0_INTERVENTION_REQ) { | 554 | if (sense & SNS0_INTERVENTION_REQ) { |
559 | if (sense & 0x01) { | 555 | if (sense & 0x01) { |
560 | if (ch->protocol != CTC_PROTO_LINUX_TTY) | 556 | ctc_pr_debug("%s: Interface disc. or Sel. reset " |
561 | ctc_pr_debug("%s: Interface disc. or Sel. reset " | ||
562 | "(remote)\n", ch->id); | 557 | "(remote)\n", ch->id); |
563 | fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch); | 558 | fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch); |
564 | } else { | 559 | } else { |
@@ -2034,7 +2029,6 @@ static void | |||
2034 | dev_action_chup(fsm_instance * fi, int event, void *arg) | 2029 | dev_action_chup(fsm_instance * fi, int event, void *arg) |
2035 | { | 2030 | { |
2036 | struct net_device *dev = (struct net_device *) arg; | 2031 | struct net_device *dev = (struct net_device *) arg; |
2037 | struct ctc_priv *privptr = dev->priv; | ||
2038 | 2032 | ||
2039 | DBF_TEXT(trace, 3, __FUNCTION__); | 2033 | DBF_TEXT(trace, 3, __FUNCTION__); |
2040 | switch (fsm_getstate(fi)) { | 2034 | switch (fsm_getstate(fi)) { |
@@ -2049,8 +2043,6 @@ dev_action_chup(fsm_instance * fi, int event, void *arg) | |||
2049 | fsm_newstate(fi, DEV_STATE_RUNNING); | 2043 | fsm_newstate(fi, DEV_STATE_RUNNING); |
2050 | ctc_pr_info("%s: connected with remote side\n", | 2044 | ctc_pr_info("%s: connected with remote side\n", |
2051 | dev->name); | 2045 | dev->name); |
2052 | if (privptr->protocol == CTC_PROTO_LINUX_TTY) | ||
2053 | ctc_tty_setcarrier(dev, 1); | ||
2054 | ctc_clear_busy(dev); | 2046 | ctc_clear_busy(dev); |
2055 | } | 2047 | } |
2056 | break; | 2048 | break; |
@@ -2059,8 +2051,6 @@ dev_action_chup(fsm_instance * fi, int event, void *arg) | |||
2059 | fsm_newstate(fi, DEV_STATE_RUNNING); | 2051 | fsm_newstate(fi, DEV_STATE_RUNNING); |
2060 | ctc_pr_info("%s: connected with remote side\n", | 2052 | ctc_pr_info("%s: connected with remote side\n", |
2061 | dev->name); | 2053 | dev->name); |
2062 | if (privptr->protocol == CTC_PROTO_LINUX_TTY) | ||
2063 | ctc_tty_setcarrier(dev, 1); | ||
2064 | ctc_clear_busy(dev); | 2054 | ctc_clear_busy(dev); |
2065 | } | 2055 | } |
2066 | break; | 2056 | break; |
@@ -2086,14 +2076,10 @@ dev_action_chup(fsm_instance * fi, int event, void *arg) | |||
2086 | static void | 2076 | static void |
2087 | dev_action_chdown(fsm_instance * fi, int event, void *arg) | 2077 | dev_action_chdown(fsm_instance * fi, int event, void *arg) |
2088 | { | 2078 | { |
2089 | struct net_device *dev = (struct net_device *) arg; | ||
2090 | struct ctc_priv *privptr = dev->priv; | ||
2091 | 2079 | ||
2092 | DBF_TEXT(trace, 3, __FUNCTION__); | 2080 | DBF_TEXT(trace, 3, __FUNCTION__); |
2093 | switch (fsm_getstate(fi)) { | 2081 | switch (fsm_getstate(fi)) { |
2094 | case DEV_STATE_RUNNING: | 2082 | case DEV_STATE_RUNNING: |
2095 | if (privptr->protocol == CTC_PROTO_LINUX_TTY) | ||
2096 | ctc_tty_setcarrier(dev, 0); | ||
2097 | if (event == DEV_EVENT_TXDOWN) | 2083 | if (event == DEV_EVENT_TXDOWN) |
2098 | fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); | 2084 | fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); |
2099 | else | 2085 | else |
@@ -2397,8 +2383,6 @@ ctc_tx(struct sk_buff *skb, struct net_device * dev) | |||
2397 | */ | 2383 | */ |
2398 | if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { | 2384 | if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { |
2399 | fsm_event(privptr->fsm, DEV_EVENT_START, dev); | 2385 | fsm_event(privptr->fsm, DEV_EVENT_START, dev); |
2400 | if (privptr->protocol == CTC_PROTO_LINUX_TTY) | ||
2401 | return -EBUSY; | ||
2402 | dev_kfree_skb(skb); | 2386 | dev_kfree_skb(skb); |
2403 | privptr->stats.tx_dropped++; | 2387 | privptr->stats.tx_dropped++; |
2404 | privptr->stats.tx_errors++; | 2388 | privptr->stats.tx_errors++; |
@@ -2608,20 +2592,13 @@ ctc_netdev_unregister(struct net_device * dev) | |||
2608 | if (!dev) | 2592 | if (!dev) |
2609 | return; | 2593 | return; |
2610 | privptr = (struct ctc_priv *) dev->priv; | 2594 | privptr = (struct ctc_priv *) dev->priv; |
2611 | if (privptr->protocol != CTC_PROTO_LINUX_TTY) | 2595 | unregister_netdev(dev); |
2612 | unregister_netdev(dev); | ||
2613 | else | ||
2614 | ctc_tty_unregister_netdev(dev); | ||
2615 | } | 2596 | } |
2616 | 2597 | ||
2617 | static int | 2598 | static int |
2618 | ctc_netdev_register(struct net_device * dev) | 2599 | ctc_netdev_register(struct net_device * dev) |
2619 | { | 2600 | { |
2620 | struct ctc_priv *privptr = (struct ctc_priv *) dev->priv; | 2601 | return register_netdev(dev); |
2621 | if (privptr->protocol != CTC_PROTO_LINUX_TTY) | ||
2622 | return register_netdev(dev); | ||
2623 | else | ||
2624 | return ctc_tty_register_netdev(dev); | ||
2625 | } | 2602 | } |
2626 | 2603 | ||
2627 | static void | 2604 | static void |
@@ -2667,7 +2644,9 @@ ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *b | |||
2667 | if (!priv) | 2644 | if (!priv) |
2668 | return -ENODEV; | 2645 | return -ENODEV; |
2669 | sscanf(buf, "%u", &value); | 2646 | sscanf(buf, "%u", &value); |
2670 | if ((value < 0) || (value > CTC_PROTO_MAX)) | 2647 | if (!((value == CTC_PROTO_S390) || |
2648 | (value == CTC_PROTO_LINUX) || | ||
2649 | (value == CTC_PROTO_OS390))) | ||
2671 | return -EINVAL; | 2650 | return -EINVAL; |
2672 | priv->protocol = value; | 2651 | priv->protocol = value; |
2673 | 2652 | ||
@@ -2897,10 +2876,7 @@ ctc_new_device(struct ccwgroup_device *cgdev) | |||
2897 | goto out; | 2876 | goto out; |
2898 | } | 2877 | } |
2899 | 2878 | ||
2900 | if (privptr->protocol == CTC_PROTO_LINUX_TTY) | 2879 | strlcpy(dev->name, "ctc%d", IFNAMSIZ); |
2901 | strlcpy(dev->name, "ctctty%d", IFNAMSIZ); | ||
2902 | else | ||
2903 | strlcpy(dev->name, "ctc%d", IFNAMSIZ); | ||
2904 | 2880 | ||
2905 | for (direction = READ; direction <= WRITE; direction++) { | 2881 | for (direction = READ; direction <= WRITE; direction++) { |
2906 | privptr->channel[direction] = | 2882 | privptr->channel[direction] = |
@@ -3046,7 +3022,6 @@ ctc_exit(void) | |||
3046 | { | 3022 | { |
3047 | DBF_TEXT(setup, 3, __FUNCTION__); | 3023 | DBF_TEXT(setup, 3, __FUNCTION__); |
3048 | unregister_cu3088_discipline(&ctc_group_driver); | 3024 | unregister_cu3088_discipline(&ctc_group_driver); |
3049 | ctc_tty_cleanup(); | ||
3050 | ctc_unregister_dbf_views(); | 3025 | ctc_unregister_dbf_views(); |
3051 | ctc_pr_info("CTC driver unloaded\n"); | 3026 | ctc_pr_info("CTC driver unloaded\n"); |
3052 | } | 3027 | } |
@@ -3073,10 +3048,8 @@ ctc_init(void) | |||
3073 | ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret); | 3048 | ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret); |
3074 | return ret; | 3049 | return ret; |
3075 | } | 3050 | } |
3076 | ctc_tty_init(); | ||
3077 | ret = register_cu3088_discipline(&ctc_group_driver); | 3051 | ret = register_cu3088_discipline(&ctc_group_driver); |
3078 | if (ret) { | 3052 | if (ret) { |
3079 | ctc_tty_cleanup(); | ||
3080 | ctc_unregister_dbf_views(); | 3053 | ctc_unregister_dbf_views(); |
3081 | } | 3054 | } |
3082 | return ret; | 3055 | return ret; |
diff --git a/drivers/s390/net/ctcmain.h b/drivers/s390/net/ctcmain.h index d2e835c0c134..7f305d119f3d 100644 --- a/drivers/s390/net/ctcmain.h +++ b/drivers/s390/net/ctcmain.h | |||
@@ -35,7 +35,9 @@ | |||
35 | #include <asm/ccwdev.h> | 35 | #include <asm/ccwdev.h> |
36 | #include <asm/ccwgroup.h> | 36 | #include <asm/ccwgroup.h> |
37 | 37 | ||
38 | #include "ctctty.h" | 38 | #include <linux/skbuff.h> |
39 | #include <linux/netdevice.h> | ||
40 | |||
39 | #include "fsm.h" | 41 | #include "fsm.h" |
40 | #include "cu3088.h" | 42 | #include "cu3088.h" |
41 | 43 | ||
@@ -50,9 +52,7 @@ | |||
50 | 52 | ||
51 | #define CTC_PROTO_S390 0 | 53 | #define CTC_PROTO_S390 0 |
52 | #define CTC_PROTO_LINUX 1 | 54 | #define CTC_PROTO_LINUX 1 |
53 | #define CTC_PROTO_LINUX_TTY 2 | ||
54 | #define CTC_PROTO_OS390 3 | 55 | #define CTC_PROTO_OS390 3 |
55 | #define CTC_PROTO_MAX 3 | ||
56 | 56 | ||
57 | #define CTC_BUFSIZE_LIMIT 65535 | 57 | #define CTC_BUFSIZE_LIMIT 65535 |
58 | #define CTC_BUFSIZE_DEFAULT 32768 | 58 | #define CTC_BUFSIZE_DEFAULT 32768 |
@@ -257,15 +257,13 @@ static __inline__ void | |||
257 | ctc_clear_busy(struct net_device * dev) | 257 | ctc_clear_busy(struct net_device * dev) |
258 | { | 258 | { |
259 | clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy)); | 259 | clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy)); |
260 | if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY) | 260 | netif_wake_queue(dev); |
261 | netif_wake_queue(dev); | ||
262 | } | 261 | } |
263 | 262 | ||
264 | static __inline__ int | 263 | static __inline__ int |
265 | ctc_test_and_set_busy(struct net_device * dev) | 264 | ctc_test_and_set_busy(struct net_device * dev) |
266 | { | 265 | { |
267 | if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY) | 266 | netif_stop_queue(dev); |
268 | netif_stop_queue(dev); | ||
269 | return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy); | 267 | return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy); |
270 | } | 268 | } |
271 | 269 | ||
diff --git a/drivers/s390/net/ctctty.c b/drivers/s390/net/ctctty.c deleted file mode 100644 index af54d1de07bf..000000000000 --- a/drivers/s390/net/ctctty.c +++ /dev/null | |||
@@ -1,1259 +0,0 @@ | |||
1 | /* | ||
2 | * CTC / ESCON network driver, tty interface. | ||
3 | * | ||
4 | * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/tty.h> | ||
26 | #include <linux/tty_flip.h> | ||
27 | #include <linux/serial_reg.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <asm/uaccess.h> | ||
31 | #include <linux/devfs_fs_kernel.h> | ||
32 | #include "ctctty.h" | ||
33 | #include "ctcdbug.h" | ||
34 | |||
35 | #define CTC_TTY_MAJOR 43 | ||
36 | #define CTC_TTY_MAX_DEVICES 64 | ||
37 | |||
38 | #define CTC_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */ | ||
39 | #define CTC_ASYNC_INITIALIZED 0x80000000 /* port was initialized */ | ||
40 | #define CTC_ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device active */ | ||
41 | #define CTC_ASYNC_CLOSING 0x08000000 /* Serial port is closing */ | ||
42 | #define CTC_ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */ | ||
43 | #define CTC_ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */ | ||
44 | #define CTC_ASYNC_HUP_NOTIFY 0x0001 /* Notify tty on hangups/closes */ | ||
45 | #define CTC_ASYNC_NETDEV_OPEN 0x0002 /* Underlying netdev is open */ | ||
46 | #define CTC_ASYNC_TX_LINESTAT 0x0004 /* Must send line status */ | ||
47 | #define CTC_ASYNC_SPLIT_TERMIOS 0x0008 /* Sep. termios for dialin/out */ | ||
48 | #define CTC_TTY_XMIT_SIZE 1024 /* Default bufsize for write */ | ||
49 | #define CTC_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */ | ||
50 | |||
51 | /* Private data (similar to async_struct in <linux/serial.h>) */ | ||
52 | typedef struct { | ||
53 | int magic; | ||
54 | int flags; /* defined in tty.h */ | ||
55 | int mcr; /* Modem control register */ | ||
56 | int msr; /* Modem status register */ | ||
57 | int lsr; /* Line status register */ | ||
58 | int line; | ||
59 | int count; /* # of fd on device */ | ||
60 | int blocked_open; /* # of blocked opens */ | ||
61 | struct net_device *netdev; | ||
62 | struct sk_buff_head tx_queue; /* transmit queue */ | ||
63 | struct sk_buff_head rx_queue; /* receive queue */ | ||
64 | struct tty_struct *tty; /* Pointer to corresponding tty */ | ||
65 | wait_queue_head_t open_wait; | ||
66 | wait_queue_head_t close_wait; | ||
67 | struct semaphore write_sem; | ||
68 | struct tasklet_struct tasklet; | ||
69 | struct timer_list stoptimer; | ||
70 | } ctc_tty_info; | ||
71 | |||
72 | /* Description of one CTC-tty */ | ||
73 | typedef struct { | ||
74 | struct tty_driver *ctc_tty_device; /* tty-device */ | ||
75 | ctc_tty_info info[CTC_TTY_MAX_DEVICES]; /* Private data */ | ||
76 | } ctc_tty_driver; | ||
77 | |||
78 | static ctc_tty_driver *driver; | ||
79 | |||
80 | /* Leave this unchanged unless you know what you do! */ | ||
81 | #define MODEM_PARANOIA_CHECK | ||
82 | #define MODEM_DO_RESTART | ||
83 | |||
84 | #define CTC_TTY_NAME "ctctty" | ||
85 | |||
86 | static __u32 ctc_tty_magic = CTC_ASYNC_MAGIC; | ||
87 | static int ctc_tty_shuttingdown = 0; | ||
88 | |||
89 | static spinlock_t ctc_tty_lock; | ||
90 | |||
91 | /* ctc_tty_try_read() is called from within ctc_tty_rcv_skb() | ||
92 | * to stuff incoming data directly into a tty's flip-buffer. If the | ||
93 | * flip buffer is full, the packet gets queued up. | ||
94 | * | ||
95 | * Return: | ||
96 | * 1 = Success | ||
97 | * 0 = Failure, data has to be buffered and later processed by | ||
98 | * ctc_tty_readmodem(). | ||
99 | */ | ||
100 | static int | ||
101 | ctc_tty_try_read(ctc_tty_info * info, struct sk_buff *skb) | ||
102 | { | ||
103 | int len; | ||
104 | struct tty_struct *tty; | ||
105 | |||
106 | DBF_TEXT(trace, 5, __FUNCTION__); | ||
107 | if ((tty = info->tty)) { | ||
108 | if (info->mcr & UART_MCR_RTS) { | ||
109 | len = skb->len; | ||
110 | tty_insert_flip_string(tty, skb->data, len); | ||
111 | tty_flip_buffer_push(tty); | ||
112 | kfree_skb(skb); | ||
113 | return 1; | ||
114 | } | ||
115 | } | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | /* ctc_tty_readmodem() is called periodically from within timer-interrupt. | ||
120 | * It tries getting received data from the receive queue an stuff it into | ||
121 | * the tty's flip-buffer. | ||
122 | */ | ||
123 | static int | ||
124 | ctc_tty_readmodem(ctc_tty_info *info) | ||
125 | { | ||
126 | int ret = 1; | ||
127 | struct tty_struct *tty; | ||
128 | |||
129 | DBF_TEXT(trace, 5, __FUNCTION__); | ||
130 | if ((tty = info->tty)) { | ||
131 | if (info->mcr & UART_MCR_RTS) { | ||
132 | struct sk_buff *skb; | ||
133 | |||
134 | if ((skb = skb_dequeue(&info->rx_queue))) { | ||
135 | int len = skb->len; | ||
136 | tty_insert_flip_string(tty, skb->data, len); | ||
137 | skb_pull(skb, len); | ||
138 | tty_flip_buffer_push(tty); | ||
139 | if (skb->len > 0) | ||
140 | skb_queue_head(&info->rx_queue, skb); | ||
141 | else { | ||
142 | kfree_skb(skb); | ||
143 | ret = !skb_queue_empty(&info->rx_queue); | ||
144 | } | ||
145 | } | ||
146 | } | ||
147 | } | ||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | void | ||
152 | ctc_tty_setcarrier(struct net_device *netdev, int on) | ||
153 | { | ||
154 | int i; | ||
155 | |||
156 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
157 | if ((!driver) || ctc_tty_shuttingdown) | ||
158 | return; | ||
159 | for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) | ||
160 | if (driver->info[i].netdev == netdev) { | ||
161 | ctc_tty_info *info = &driver->info[i]; | ||
162 | if (on) | ||
163 | info->msr |= UART_MSR_DCD; | ||
164 | else | ||
165 | info->msr &= ~UART_MSR_DCD; | ||
166 | if ((info->flags & CTC_ASYNC_CHECK_CD) && (!on)) | ||
167 | tty_hangup(info->tty); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | void | ||
172 | ctc_tty_netif_rx(struct sk_buff *skb) | ||
173 | { | ||
174 | int i; | ||
175 | ctc_tty_info *info = NULL; | ||
176 | |||
177 | DBF_TEXT(trace, 5, __FUNCTION__); | ||
178 | if (!skb) | ||
179 | return; | ||
180 | if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) { | ||
181 | dev_kfree_skb(skb); | ||
182 | return; | ||
183 | } | ||
184 | for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) | ||
185 | if (driver->info[i].netdev == skb->dev) { | ||
186 | info = &driver->info[i]; | ||
187 | break; | ||
188 | } | ||
189 | if (!info) { | ||
190 | dev_kfree_skb(skb); | ||
191 | return; | ||
192 | } | ||
193 | if (skb->len < 6) { | ||
194 | dev_kfree_skb(skb); | ||
195 | return; | ||
196 | } | ||
197 | if (memcmp(skb->data, &ctc_tty_magic, sizeof(__u32))) { | ||
198 | dev_kfree_skb(skb); | ||
199 | return; | ||
200 | } | ||
201 | skb_pull(skb, sizeof(__u32)); | ||
202 | |||
203 | i = *((int *)skb->data); | ||
204 | skb_pull(skb, sizeof(info->mcr)); | ||
205 | if (i & UART_MCR_RTS) { | ||
206 | info->msr |= UART_MSR_CTS; | ||
207 | if (info->flags & CTC_ASYNC_CTS_FLOW) | ||
208 | info->tty->hw_stopped = 0; | ||
209 | } else { | ||
210 | info->msr &= ~UART_MSR_CTS; | ||
211 | if (info->flags & CTC_ASYNC_CTS_FLOW) | ||
212 | info->tty->hw_stopped = 1; | ||
213 | } | ||
214 | if (i & UART_MCR_DTR) | ||
215 | info->msr |= UART_MSR_DSR; | ||
216 | else | ||
217 | info->msr &= ~UART_MSR_DSR; | ||
218 | if (skb->len <= 0) { | ||
219 | kfree_skb(skb); | ||
220 | return; | ||
221 | } | ||
222 | /* Try to deliver directly via tty-flip-buf if queue is empty */ | ||
223 | if (skb_queue_empty(&info->rx_queue)) | ||
224 | if (ctc_tty_try_read(info, skb)) | ||
225 | return; | ||
226 | /* Direct deliver failed or queue wasn't empty. | ||
227 | * Queue up for later dequeueing via timer-irq. | ||
228 | */ | ||
229 | skb_queue_tail(&info->rx_queue, skb); | ||
230 | /* Schedule dequeuing */ | ||
231 | tasklet_schedule(&info->tasklet); | ||
232 | } | ||
233 | |||
234 | static int | ||
235 | ctc_tty_tint(ctc_tty_info * info) | ||
236 | { | ||
237 | struct sk_buff *skb = skb_dequeue(&info->tx_queue); | ||
238 | int stopped = (info->tty->hw_stopped || info->tty->stopped); | ||
239 | int wake = 1; | ||
240 | int rc; | ||
241 | |||
242 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
243 | if (!info->netdev) { | ||
244 | if (skb) | ||
245 | kfree_skb(skb); | ||
246 | return 0; | ||
247 | } | ||
248 | if (info->flags & CTC_ASYNC_TX_LINESTAT) { | ||
249 | int skb_res = info->netdev->hard_header_len + | ||
250 | sizeof(info->mcr) + sizeof(__u32); | ||
251 | /* If we must update line status, | ||
252 | * create an empty dummy skb and insert it. | ||
253 | */ | ||
254 | if (skb) | ||
255 | skb_queue_head(&info->tx_queue, skb); | ||
256 | |||
257 | skb = dev_alloc_skb(skb_res); | ||
258 | if (!skb) { | ||
259 | printk(KERN_WARNING | ||
260 | "ctc_tty: Out of memory in %s%d tint\n", | ||
261 | CTC_TTY_NAME, info->line); | ||
262 | return 1; | ||
263 | } | ||
264 | skb_reserve(skb, skb_res); | ||
265 | stopped = 0; | ||
266 | wake = 0; | ||
267 | } | ||
268 | if (!skb) | ||
269 | return 0; | ||
270 | if (stopped) { | ||
271 | skb_queue_head(&info->tx_queue, skb); | ||
272 | return 1; | ||
273 | } | ||
274 | #if 0 | ||
275 | if (skb->len > 0) | ||
276 | printk(KERN_DEBUG "tint: %d %02x\n", skb->len, *(skb->data)); | ||
277 | else | ||
278 | printk(KERN_DEBUG "tint: %d STAT\n", skb->len); | ||
279 | #endif | ||
280 | memcpy(skb_push(skb, sizeof(info->mcr)), &info->mcr, sizeof(info->mcr)); | ||
281 | memcpy(skb_push(skb, sizeof(__u32)), &ctc_tty_magic, sizeof(__u32)); | ||
282 | rc = info->netdev->hard_start_xmit(skb, info->netdev); | ||
283 | if (rc) { | ||
284 | skb_pull(skb, sizeof(info->mcr) + sizeof(__u32)); | ||
285 | if (skb->len > 0) | ||
286 | skb_queue_head(&info->tx_queue, skb); | ||
287 | else | ||
288 | kfree_skb(skb); | ||
289 | } else { | ||
290 | struct tty_struct *tty = info->tty; | ||
291 | |||
292 | info->flags &= ~CTC_ASYNC_TX_LINESTAT; | ||
293 | if (tty) { | ||
294 | tty_wakeup(tty); | ||
295 | } | ||
296 | } | ||
297 | return (skb_queue_empty(&info->tx_queue) ? 0 : 1); | ||
298 | } | ||
299 | |||
300 | /************************************************************ | ||
301 | * | ||
302 | * Modem-functions | ||
303 | * | ||
304 | * mostly "stolen" from original Linux-serial.c and friends. | ||
305 | * | ||
306 | ************************************************************/ | ||
307 | |||
308 | static inline int | ||
309 | ctc_tty_paranoia_check(ctc_tty_info * info, char *name, const char *routine) | ||
310 | { | ||
311 | #ifdef MODEM_PARANOIA_CHECK | ||
312 | if (!info) { | ||
313 | printk(KERN_WARNING "ctc_tty: null info_struct for %s in %s\n", | ||
314 | name, routine); | ||
315 | return 1; | ||
316 | } | ||
317 | if (info->magic != CTC_ASYNC_MAGIC) { | ||
318 | printk(KERN_WARNING "ctc_tty: bad magic for info struct %s in %s\n", | ||
319 | name, routine); | ||
320 | return 1; | ||
321 | } | ||
322 | #endif | ||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | static void | ||
327 | ctc_tty_inject(ctc_tty_info *info, char c) | ||
328 | { | ||
329 | int skb_res; | ||
330 | struct sk_buff *skb; | ||
331 | |||
332 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
333 | if (ctc_tty_shuttingdown) | ||
334 | return; | ||
335 | skb_res = info->netdev->hard_header_len + sizeof(info->mcr) + | ||
336 | sizeof(__u32) + 1; | ||
337 | skb = dev_alloc_skb(skb_res); | ||
338 | if (!skb) { | ||
339 | printk(KERN_WARNING | ||
340 | "ctc_tty: Out of memory in %s%d tx_inject\n", | ||
341 | CTC_TTY_NAME, info->line); | ||
342 | return; | ||
343 | } | ||
344 | skb_reserve(skb, skb_res); | ||
345 | *(skb_put(skb, 1)) = c; | ||
346 | skb_queue_head(&info->tx_queue, skb); | ||
347 | tasklet_schedule(&info->tasklet); | ||
348 | } | ||
349 | |||
350 | static void | ||
351 | ctc_tty_transmit_status(ctc_tty_info *info) | ||
352 | { | ||
353 | DBF_TEXT(trace, 5, __FUNCTION__); | ||
354 | if (ctc_tty_shuttingdown) | ||
355 | return; | ||
356 | info->flags |= CTC_ASYNC_TX_LINESTAT; | ||
357 | tasklet_schedule(&info->tasklet); | ||
358 | } | ||
359 | |||
360 | static void | ||
361 | ctc_tty_change_speed(ctc_tty_info * info) | ||
362 | { | ||
363 | unsigned int cflag; | ||
364 | unsigned int quot; | ||
365 | int i; | ||
366 | |||
367 | DBF_TEXT(trace, 3, __FUNCTION__); | ||
368 | if (!info->tty || !info->tty->termios) | ||
369 | return; | ||
370 | cflag = info->tty->termios->c_cflag; | ||
371 | |||
372 | quot = i = cflag & CBAUD; | ||
373 | if (i & CBAUDEX) { | ||
374 | i &= ~CBAUDEX; | ||
375 | if (i < 1 || i > 2) | ||
376 | info->tty->termios->c_cflag &= ~CBAUDEX; | ||
377 | else | ||
378 | i += 15; | ||
379 | } | ||
380 | if (quot) { | ||
381 | info->mcr |= UART_MCR_DTR; | ||
382 | info->mcr |= UART_MCR_RTS; | ||
383 | ctc_tty_transmit_status(info); | ||
384 | } else { | ||
385 | info->mcr &= ~UART_MCR_DTR; | ||
386 | info->mcr &= ~UART_MCR_RTS; | ||
387 | ctc_tty_transmit_status(info); | ||
388 | return; | ||
389 | } | ||
390 | |||
391 | /* CTS flow control flag and modem status interrupts */ | ||
392 | if (cflag & CRTSCTS) { | ||
393 | info->flags |= CTC_ASYNC_CTS_FLOW; | ||
394 | } else | ||
395 | info->flags &= ~CTC_ASYNC_CTS_FLOW; | ||
396 | if (cflag & CLOCAL) | ||
397 | info->flags &= ~CTC_ASYNC_CHECK_CD; | ||
398 | else { | ||
399 | info->flags |= CTC_ASYNC_CHECK_CD; | ||
400 | } | ||
401 | } | ||
402 | |||
403 | static int | ||
404 | ctc_tty_startup(ctc_tty_info * info) | ||
405 | { | ||
406 | DBF_TEXT(trace, 3, __FUNCTION__); | ||
407 | if (info->flags & CTC_ASYNC_INITIALIZED) | ||
408 | return 0; | ||
409 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
410 | printk(KERN_DEBUG "starting up %s%d ...\n", CTC_TTY_NAME, info->line); | ||
411 | #endif | ||
412 | /* | ||
413 | * Now, initialize the UART | ||
414 | */ | ||
415 | info->mcr = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2; | ||
416 | if (info->tty) | ||
417 | clear_bit(TTY_IO_ERROR, &info->tty->flags); | ||
418 | /* | ||
419 | * and set the speed of the serial port | ||
420 | */ | ||
421 | ctc_tty_change_speed(info); | ||
422 | |||
423 | info->flags |= CTC_ASYNC_INITIALIZED; | ||
424 | if (!(info->flags & CTC_ASYNC_NETDEV_OPEN)) | ||
425 | info->netdev->open(info->netdev); | ||
426 | info->flags |= CTC_ASYNC_NETDEV_OPEN; | ||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static void | ||
431 | ctc_tty_stopdev(unsigned long data) | ||
432 | { | ||
433 | ctc_tty_info *info = (ctc_tty_info *)data; | ||
434 | |||
435 | if ((!info) || (!info->netdev) || | ||
436 | (info->flags & CTC_ASYNC_INITIALIZED)) | ||
437 | return; | ||
438 | info->netdev->stop(info->netdev); | ||
439 | info->flags &= ~CTC_ASYNC_NETDEV_OPEN; | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * This routine will shutdown a serial port; interrupts are disabled, and | ||
444 | * DTR is dropped if the hangup on close termio flag is on. | ||
445 | */ | ||
446 | static void | ||
447 | ctc_tty_shutdown(ctc_tty_info * info) | ||
448 | { | ||
449 | DBF_TEXT(trace, 3, __FUNCTION__); | ||
450 | if (!(info->flags & CTC_ASYNC_INITIALIZED)) | ||
451 | return; | ||
452 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
453 | printk(KERN_DEBUG "Shutting down %s%d ....\n", CTC_TTY_NAME, info->line); | ||
454 | #endif | ||
455 | info->msr &= ~UART_MSR_RI; | ||
456 | if (!info->tty || (info->tty->termios->c_cflag & HUPCL)) | ||
457 | info->mcr &= ~(UART_MCR_DTR | UART_MCR_RTS); | ||
458 | if (info->tty) | ||
459 | set_bit(TTY_IO_ERROR, &info->tty->flags); | ||
460 | mod_timer(&info->stoptimer, jiffies + (10 * HZ)); | ||
461 | skb_queue_purge(&info->tx_queue); | ||
462 | skb_queue_purge(&info->rx_queue); | ||
463 | info->flags &= ~CTC_ASYNC_INITIALIZED; | ||
464 | } | ||
465 | |||
466 | /* ctc_tty_write() is the main send-routine. It is called from the upper | ||
467 | * levels within the kernel to perform sending data. Depending on the | ||
468 | * online-flag it either directs output to the at-command-interpreter or | ||
469 | * to the lower level. Additional tasks done here: | ||
470 | * - If online, check for escape-sequence (+++) | ||
471 | * - If sending audio-data, call ctc_tty_DLEdown() to parse DLE-codes. | ||
472 | * - If receiving audio-data, call ctc_tty_end_vrx() to abort if needed. | ||
473 | * - If dialing, abort dial. | ||
474 | */ | ||
475 | static int | ||
476 | ctc_tty_write(struct tty_struct *tty, const u_char * buf, int count) | ||
477 | { | ||
478 | int c; | ||
479 | int total = 0; | ||
480 | ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; | ||
481 | |||
482 | DBF_TEXT(trace, 5, __FUNCTION__); | ||
483 | if (ctc_tty_shuttingdown) | ||
484 | goto ex; | ||
485 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write")) | ||
486 | goto ex; | ||
487 | if (!tty) | ||
488 | goto ex; | ||
489 | if (!info->netdev) { | ||
490 | total = -ENODEV; | ||
491 | goto ex; | ||
492 | } | ||
493 | while (1) { | ||
494 | struct sk_buff *skb; | ||
495 | int skb_res; | ||
496 | |||
497 | c = (count < CTC_TTY_XMIT_SIZE) ? count : CTC_TTY_XMIT_SIZE; | ||
498 | if (c <= 0) | ||
499 | break; | ||
500 | |||
501 | skb_res = info->netdev->hard_header_len + sizeof(info->mcr) + | ||
502 | + sizeof(__u32); | ||
503 | skb = dev_alloc_skb(skb_res + c); | ||
504 | if (!skb) { | ||
505 | printk(KERN_WARNING | ||
506 | "ctc_tty: Out of memory in %s%d write\n", | ||
507 | CTC_TTY_NAME, info->line); | ||
508 | break; | ||
509 | } | ||
510 | skb_reserve(skb, skb_res); | ||
511 | memcpy(skb_put(skb, c), buf, c); | ||
512 | skb_queue_tail(&info->tx_queue, skb); | ||
513 | buf += c; | ||
514 | total += c; | ||
515 | count -= c; | ||
516 | } | ||
517 | if (!skb_queue_empty(&info->tx_queue)) { | ||
518 | info->lsr &= ~UART_LSR_TEMT; | ||
519 | tasklet_schedule(&info->tasklet); | ||
520 | } | ||
521 | ex: | ||
522 | DBF_TEXT(trace, 6, __FUNCTION__); | ||
523 | return total; | ||
524 | } | ||
525 | |||
526 | static int | ||
527 | ctc_tty_write_room(struct tty_struct *tty) | ||
528 | { | ||
529 | ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; | ||
530 | |||
531 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write_room")) | ||
532 | return 0; | ||
533 | return CTC_TTY_XMIT_SIZE; | ||
534 | } | ||
535 | |||
536 | static int | ||
537 | ctc_tty_chars_in_buffer(struct tty_struct *tty) | ||
538 | { | ||
539 | ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; | ||
540 | |||
541 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_chars_in_buffer")) | ||
542 | return 0; | ||
543 | return 0; | ||
544 | } | ||
545 | |||
546 | static void | ||
547 | ctc_tty_flush_buffer(struct tty_struct *tty) | ||
548 | { | ||
549 | ctc_tty_info *info; | ||
550 | unsigned long flags; | ||
551 | |||
552 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
553 | if (!tty) | ||
554 | goto ex; | ||
555 | spin_lock_irqsave(&ctc_tty_lock, flags); | ||
556 | info = (ctc_tty_info *) tty->driver_data; | ||
557 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_buffer")) { | ||
558 | spin_unlock_irqrestore(&ctc_tty_lock, flags); | ||
559 | goto ex; | ||
560 | } | ||
561 | skb_queue_purge(&info->tx_queue); | ||
562 | info->lsr |= UART_LSR_TEMT; | ||
563 | spin_unlock_irqrestore(&ctc_tty_lock, flags); | ||
564 | wake_up_interruptible(&tty->write_wait); | ||
565 | tty_wakeup(tty); | ||
566 | ex: | ||
567 | DBF_TEXT_(trace, 2, "ex: %s ", __FUNCTION__); | ||
568 | return; | ||
569 | } | ||
570 | |||
571 | static void | ||
572 | ctc_tty_flush_chars(struct tty_struct *tty) | ||
573 | { | ||
574 | ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; | ||
575 | |||
576 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
577 | if (ctc_tty_shuttingdown) | ||
578 | return; | ||
579 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars")) | ||
580 | return; | ||
581 | if (tty->stopped || tty->hw_stopped || skb_queue_empty(&info->tx_queue)) | ||
582 | return; | ||
583 | tasklet_schedule(&info->tasklet); | ||
584 | } | ||
585 | |||
586 | /* | ||
587 | * ------------------------------------------------------------ | ||
588 | * ctc_tty_throttle() | ||
589 | * | ||
590 | * This routine is called by the upper-layer tty layer to signal that | ||
591 | * incoming characters should be throttled. | ||
592 | * ------------------------------------------------------------ | ||
593 | */ | ||
594 | static void | ||
595 | ctc_tty_throttle(struct tty_struct *tty) | ||
596 | { | ||
597 | ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; | ||
598 | |||
599 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
600 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_throttle")) | ||
601 | return; | ||
602 | info->mcr &= ~UART_MCR_RTS; | ||
603 | if (I_IXOFF(tty)) | ||
604 | ctc_tty_inject(info, STOP_CHAR(tty)); | ||
605 | ctc_tty_transmit_status(info); | ||
606 | } | ||
607 | |||
608 | static void | ||
609 | ctc_tty_unthrottle(struct tty_struct *tty) | ||
610 | { | ||
611 | ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; | ||
612 | |||
613 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
614 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_unthrottle")) | ||
615 | return; | ||
616 | info->mcr |= UART_MCR_RTS; | ||
617 | if (I_IXOFF(tty)) | ||
618 | ctc_tty_inject(info, START_CHAR(tty)); | ||
619 | ctc_tty_transmit_status(info); | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * ------------------------------------------------------------ | ||
624 | * ctc_tty_ioctl() and friends | ||
625 | * ------------------------------------------------------------ | ||
626 | */ | ||
627 | |||
628 | /* | ||
629 | * ctc_tty_get_lsr_info - get line status register info | ||
630 | * | ||
631 | * Purpose: Let user call ioctl() to get info when the UART physically | ||
632 | * is emptied. On bus types like RS485, the transmitter must | ||
633 | * release the bus after transmitting. This must be done when | ||
634 | * the transmit shift register is empty, not be done when the | ||
635 | * transmit holding register is empty. This functionality | ||
636 | * allows RS485 driver to be written in user space. | ||
637 | */ | ||
638 | static int | ||
639 | ctc_tty_get_lsr_info(ctc_tty_info * info, uint __user *value) | ||
640 | { | ||
641 | u_char status; | ||
642 | uint result; | ||
643 | ulong flags; | ||
644 | |||
645 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
646 | spin_lock_irqsave(&ctc_tty_lock, flags); | ||
647 | status = info->lsr; | ||
648 | spin_unlock_irqrestore(&ctc_tty_lock, flags); | ||
649 | result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0); | ||
650 | put_user(result, value); | ||
651 | return 0; | ||
652 | } | ||
653 | |||
654 | |||
655 | static int ctc_tty_tiocmget(struct tty_struct *tty, struct file *file) | ||
656 | { | ||
657 | ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; | ||
658 | u_char control, | ||
659 | status; | ||
660 | uint result; | ||
661 | ulong flags; | ||
662 | |||
663 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
664 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl")) | ||
665 | return -ENODEV; | ||
666 | if (tty->flags & (1 << TTY_IO_ERROR)) | ||
667 | return -EIO; | ||
668 | |||
669 | control = info->mcr; | ||
670 | spin_lock_irqsave(&ctc_tty_lock, flags); | ||
671 | status = info->msr; | ||
672 | spin_unlock_irqrestore(&ctc_tty_lock, flags); | ||
673 | result = ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) | ||
674 | | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) | ||
675 | | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) | ||
676 | | ((status & UART_MSR_RI) ? TIOCM_RNG : 0) | ||
677 | | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) | ||
678 | | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0); | ||
679 | return result; | ||
680 | } | ||
681 | |||
682 | static int | ||
683 | ctc_tty_tiocmset(struct tty_struct *tty, struct file *file, | ||
684 | unsigned int set, unsigned int clear) | ||
685 | { | ||
686 | ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; | ||
687 | |||
688 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
689 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl")) | ||
690 | return -ENODEV; | ||
691 | if (tty->flags & (1 << TTY_IO_ERROR)) | ||
692 | return -EIO; | ||
693 | |||
694 | if (set & TIOCM_RTS) | ||
695 | info->mcr |= UART_MCR_RTS; | ||
696 | if (set & TIOCM_DTR) | ||
697 | info->mcr |= UART_MCR_DTR; | ||
698 | |||
699 | if (clear & TIOCM_RTS) | ||
700 | info->mcr &= ~UART_MCR_RTS; | ||
701 | if (clear & TIOCM_DTR) | ||
702 | info->mcr &= ~UART_MCR_DTR; | ||
703 | |||
704 | if ((set | clear) & (TIOCM_RTS|TIOCM_DTR)) | ||
705 | ctc_tty_transmit_status(info); | ||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | static int | ||
710 | ctc_tty_ioctl(struct tty_struct *tty, struct file *file, | ||
711 | uint cmd, ulong arg) | ||
712 | { | ||
713 | ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; | ||
714 | int error; | ||
715 | int retval; | ||
716 | |||
717 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
718 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl")) | ||
719 | return -ENODEV; | ||
720 | if (tty->flags & (1 << TTY_IO_ERROR)) | ||
721 | return -EIO; | ||
722 | switch (cmd) { | ||
723 | case TCSBRK: /* SVID version: non-zero arg --> no break */ | ||
724 | #ifdef CTC_DEBUG_MODEM_IOCTL | ||
725 | printk(KERN_DEBUG "%s%d ioctl TCSBRK\n", CTC_TTY_NAME, info->line); | ||
726 | #endif | ||
727 | retval = tty_check_change(tty); | ||
728 | if (retval) | ||
729 | return retval; | ||
730 | tty_wait_until_sent(tty, 0); | ||
731 | return 0; | ||
732 | case TCSBRKP: /* support for POSIX tcsendbreak() */ | ||
733 | #ifdef CTC_DEBUG_MODEM_IOCTL | ||
734 | printk(KERN_DEBUG "%s%d ioctl TCSBRKP\n", CTC_TTY_NAME, info->line); | ||
735 | #endif | ||
736 | retval = tty_check_change(tty); | ||
737 | if (retval) | ||
738 | return retval; | ||
739 | tty_wait_until_sent(tty, 0); | ||
740 | return 0; | ||
741 | case TIOCGSOFTCAR: | ||
742 | #ifdef CTC_DEBUG_MODEM_IOCTL | ||
743 | printk(KERN_DEBUG "%s%d ioctl TIOCGSOFTCAR\n", CTC_TTY_NAME, | ||
744 | info->line); | ||
745 | #endif | ||
746 | error = put_user(C_CLOCAL(tty) ? 1 : 0, (ulong __user *) arg); | ||
747 | return error; | ||
748 | case TIOCSSOFTCAR: | ||
749 | #ifdef CTC_DEBUG_MODEM_IOCTL | ||
750 | printk(KERN_DEBUG "%s%d ioctl TIOCSSOFTCAR\n", CTC_TTY_NAME, | ||
751 | info->line); | ||
752 | #endif | ||
753 | error = get_user(arg, (ulong __user *) arg); | ||
754 | if (error) | ||
755 | return error; | ||
756 | tty->termios->c_cflag = | ||
757 | ((tty->termios->c_cflag & ~CLOCAL) | | ||
758 | (arg ? CLOCAL : 0)); | ||
759 | return 0; | ||
760 | case TIOCSERGETLSR: /* Get line status register */ | ||
761 | #ifdef CTC_DEBUG_MODEM_IOCTL | ||
762 | printk(KERN_DEBUG "%s%d ioctl TIOCSERGETLSR\n", CTC_TTY_NAME, | ||
763 | info->line); | ||
764 | #endif | ||
765 | if (access_ok(VERIFY_WRITE, (void __user *) arg, sizeof(uint))) | ||
766 | return ctc_tty_get_lsr_info(info, (uint __user *) arg); | ||
767 | else | ||
768 | return -EFAULT; | ||
769 | default: | ||
770 | #ifdef CTC_DEBUG_MODEM_IOCTL | ||
771 | printk(KERN_DEBUG "UNKNOWN ioctl 0x%08x on %s%d\n", cmd, | ||
772 | CTC_TTY_NAME, info->line); | ||
773 | #endif | ||
774 | return -ENOIOCTLCMD; | ||
775 | } | ||
776 | return 0; | ||
777 | } | ||
778 | |||
779 | static void | ||
780 | ctc_tty_set_termios(struct tty_struct *tty, struct termios *old_termios) | ||
781 | { | ||
782 | ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; | ||
783 | unsigned int cflag = tty->termios->c_cflag; | ||
784 | |||
785 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
786 | ctc_tty_change_speed(info); | ||
787 | |||
788 | /* Handle transition to B0 */ | ||
789 | if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) { | ||
790 | info->mcr &= ~(UART_MCR_DTR|UART_MCR_RTS); | ||
791 | ctc_tty_transmit_status(info); | ||
792 | } | ||
793 | |||
794 | /* Handle transition from B0 to other */ | ||
795 | if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) { | ||
796 | info->mcr |= UART_MCR_DTR; | ||
797 | if (!(tty->termios->c_cflag & CRTSCTS) || | ||
798 | !test_bit(TTY_THROTTLED, &tty->flags)) { | ||
799 | info->mcr |= UART_MCR_RTS; | ||
800 | } | ||
801 | ctc_tty_transmit_status(info); | ||
802 | } | ||
803 | |||
804 | /* Handle turning off CRTSCTS */ | ||
805 | if ((old_termios->c_cflag & CRTSCTS) && | ||
806 | !(tty->termios->c_cflag & CRTSCTS)) | ||
807 | tty->hw_stopped = 0; | ||
808 | } | ||
809 | |||
810 | /* | ||
811 | * ------------------------------------------------------------ | ||
812 | * ctc_tty_open() and friends | ||
813 | * ------------------------------------------------------------ | ||
814 | */ | ||
815 | static int | ||
816 | ctc_tty_block_til_ready(struct tty_struct *tty, struct file *filp, ctc_tty_info *info) | ||
817 | { | ||
818 | DECLARE_WAITQUEUE(wait, NULL); | ||
819 | int do_clocal = 0; | ||
820 | unsigned long flags; | ||
821 | int retval; | ||
822 | |||
823 | DBF_TEXT(trace, 4, __FUNCTION__); | ||
824 | /* | ||
825 | * If the device is in the middle of being closed, then block | ||
826 | * until it's done, and then try again. | ||
827 | */ | ||
828 | if (tty_hung_up_p(filp) || | ||
829 | (info->flags & CTC_ASYNC_CLOSING)) { | ||
830 | if (info->flags & CTC_ASYNC_CLOSING) | ||
831 | wait_event(info->close_wait, | ||
832 | !(info->flags & CTC_ASYNC_CLOSING)); | ||
833 | #ifdef MODEM_DO_RESTART | ||
834 | if (info->flags & CTC_ASYNC_HUP_NOTIFY) | ||
835 | return -EAGAIN; | ||
836 | else | ||
837 | return -ERESTARTSYS; | ||
838 | #else | ||
839 | return -EAGAIN; | ||
840 | #endif | ||
841 | } | ||
842 | /* | ||
843 | * If non-blocking mode is set, then make the check up front | ||
844 | * and then exit. | ||
845 | */ | ||
846 | if ((filp->f_flags & O_NONBLOCK) || | ||
847 | (tty->flags & (1 << TTY_IO_ERROR))) { | ||
848 | info->flags |= CTC_ASYNC_NORMAL_ACTIVE; | ||
849 | return 0; | ||
850 | } | ||
851 | if (tty->termios->c_cflag & CLOCAL) | ||
852 | do_clocal = 1; | ||
853 | /* | ||
854 | * Block waiting for the carrier detect and the line to become | ||
855 | * free (i.e., not in use by the callout). While we are in | ||
856 | * this loop, info->count is dropped by one, so that | ||
857 | * ctc_tty_close() knows when to free things. We restore it upon | ||
858 | * exit, either normal or abnormal. | ||
859 | */ | ||
860 | retval = 0; | ||
861 | add_wait_queue(&info->open_wait, &wait); | ||
862 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
863 | printk(KERN_DEBUG "ctc_tty_block_til_ready before block: %s%d, count = %d\n", | ||
864 | CTC_TTY_NAME, info->line, info->count); | ||
865 | #endif | ||
866 | spin_lock_irqsave(&ctc_tty_lock, flags); | ||
867 | if (!(tty_hung_up_p(filp))) | ||
868 | info->count--; | ||
869 | spin_unlock_irqrestore(&ctc_tty_lock, flags); | ||
870 | info->blocked_open++; | ||
871 | while (1) { | ||
872 | set_current_state(TASK_INTERRUPTIBLE); | ||
873 | if (tty_hung_up_p(filp) || | ||
874 | !(info->flags & CTC_ASYNC_INITIALIZED)) { | ||
875 | #ifdef MODEM_DO_RESTART | ||
876 | if (info->flags & CTC_ASYNC_HUP_NOTIFY) | ||
877 | retval = -EAGAIN; | ||
878 | else | ||
879 | retval = -ERESTARTSYS; | ||
880 | #else | ||
881 | retval = -EAGAIN; | ||
882 | #endif | ||
883 | break; | ||
884 | } | ||
885 | if (!(info->flags & CTC_ASYNC_CLOSING) && | ||
886 | (do_clocal || (info->msr & UART_MSR_DCD))) { | ||
887 | break; | ||
888 | } | ||
889 | if (signal_pending(current)) { | ||
890 | retval = -ERESTARTSYS; | ||
891 | break; | ||
892 | } | ||
893 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
894 | printk(KERN_DEBUG "ctc_tty_block_til_ready blocking: %s%d, count = %d\n", | ||
895 | CTC_TTY_NAME, info->line, info->count); | ||
896 | #endif | ||
897 | schedule(); | ||
898 | } | ||
899 | current->state = TASK_RUNNING; | ||
900 | remove_wait_queue(&info->open_wait, &wait); | ||
901 | if (!tty_hung_up_p(filp)) | ||
902 | info->count++; | ||
903 | info->blocked_open--; | ||
904 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
905 | printk(KERN_DEBUG "ctc_tty_block_til_ready after blocking: %s%d, count = %d\n", | ||
906 | CTC_TTY_NAME, info->line, info->count); | ||
907 | #endif | ||
908 | if (retval) | ||
909 | return retval; | ||
910 | info->flags |= CTC_ASYNC_NORMAL_ACTIVE; | ||
911 | return 0; | ||
912 | } | ||
913 | |||
914 | /* | ||
915 | * This routine is called whenever a serial port is opened. It | ||
916 | * enables interrupts for a serial port, linking in its async structure into | ||
917 | * the IRQ chain. It also performs the serial-specific | ||
918 | * initialization for the tty structure. | ||
919 | */ | ||
920 | static int | ||
921 | ctc_tty_open(struct tty_struct *tty, struct file *filp) | ||
922 | { | ||
923 | ctc_tty_info *info; | ||
924 | unsigned long saveflags; | ||
925 | int retval, | ||
926 | line; | ||
927 | |||
928 | DBF_TEXT(trace, 3, __FUNCTION__); | ||
929 | line = tty->index; | ||
930 | if (line < 0 || line > CTC_TTY_MAX_DEVICES) | ||
931 | return -ENODEV; | ||
932 | info = &driver->info[line]; | ||
933 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_open")) | ||
934 | return -ENODEV; | ||
935 | if (!info->netdev) | ||
936 | return -ENODEV; | ||
937 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
938 | printk(KERN_DEBUG "ctc_tty_open %s, count = %d\n", tty->name, | ||
939 | info->count); | ||
940 | #endif | ||
941 | spin_lock_irqsave(&ctc_tty_lock, saveflags); | ||
942 | info->count++; | ||
943 | tty->driver_data = info; | ||
944 | info->tty = tty; | ||
945 | spin_unlock_irqrestore(&ctc_tty_lock, saveflags); | ||
946 | /* | ||
947 | * Start up serial port | ||
948 | */ | ||
949 | retval = ctc_tty_startup(info); | ||
950 | if (retval) { | ||
951 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
952 | printk(KERN_DEBUG "ctc_tty_open return after startup\n"); | ||
953 | #endif | ||
954 | return retval; | ||
955 | } | ||
956 | retval = ctc_tty_block_til_ready(tty, filp, info); | ||
957 | if (retval) { | ||
958 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
959 | printk(KERN_DEBUG "ctc_tty_open return after ctc_tty_block_til_ready \n"); | ||
960 | #endif | ||
961 | return retval; | ||
962 | } | ||
963 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
964 | printk(KERN_DEBUG "ctc_tty_open %s successful...\n", tty->name); | ||
965 | #endif | ||
966 | return 0; | ||
967 | } | ||
968 | |||
969 | static void | ||
970 | ctc_tty_close(struct tty_struct *tty, struct file *filp) | ||
971 | { | ||
972 | ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; | ||
973 | ulong flags; | ||
974 | ulong timeout; | ||
975 | DBF_TEXT(trace, 3, __FUNCTION__); | ||
976 | if (!info || ctc_tty_paranoia_check(info, tty->name, "ctc_tty_close")) | ||
977 | return; | ||
978 | spin_lock_irqsave(&ctc_tty_lock, flags); | ||
979 | if (tty_hung_up_p(filp)) { | ||
980 | spin_unlock_irqrestore(&ctc_tty_lock, flags); | ||
981 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
982 | printk(KERN_DEBUG "ctc_tty_close return after tty_hung_up_p\n"); | ||
983 | #endif | ||
984 | return; | ||
985 | } | ||
986 | if ((tty->count == 1) && (info->count != 1)) { | ||
987 | /* | ||
988 | * Uh, oh. tty->count is 1, which means that the tty | ||
989 | * structure will be freed. Info->count should always | ||
990 | * be one in these conditions. If it's greater than | ||
991 | * one, we've got real problems, since it means the | ||
992 | * serial port won't be shutdown. | ||
993 | */ | ||
994 | printk(KERN_ERR "ctc_tty_close: bad port count; tty->count is 1, " | ||
995 | "info->count is %d\n", info->count); | ||
996 | info->count = 1; | ||
997 | } | ||
998 | if (--info->count < 0) { | ||
999 | printk(KERN_ERR "ctc_tty_close: bad port count for %s%d: %d\n", | ||
1000 | CTC_TTY_NAME, info->line, info->count); | ||
1001 | info->count = 0; | ||
1002 | } | ||
1003 | if (info->count) { | ||
1004 | local_irq_restore(flags); | ||
1005 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
1006 | printk(KERN_DEBUG "ctc_tty_close after info->count != 0\n"); | ||
1007 | #endif | ||
1008 | return; | ||
1009 | } | ||
1010 | info->flags |= CTC_ASYNC_CLOSING; | ||
1011 | tty->closing = 1; | ||
1012 | /* | ||
1013 | * At this point we stop accepting input. To do this, we | ||
1014 | * disable the receive line status interrupts, and tell the | ||
1015 | * interrupt driver to stop checking the data ready bit in the | ||
1016 | * line status register. | ||
1017 | */ | ||
1018 | if (info->flags & CTC_ASYNC_INITIALIZED) { | ||
1019 | tty_wait_until_sent(tty, 30*HZ); /* 30 seconds timeout */ | ||
1020 | /* | ||
1021 | * Before we drop DTR, make sure the UART transmitter | ||
1022 | * has completely drained; this is especially | ||
1023 | * important if there is a transmit FIFO! | ||
1024 | */ | ||
1025 | timeout = jiffies + HZ; | ||
1026 | while (!(info->lsr & UART_LSR_TEMT)) { | ||
1027 | spin_unlock_irqrestore(&ctc_tty_lock, flags); | ||
1028 | msleep(500); | ||
1029 | spin_lock_irqsave(&ctc_tty_lock, flags); | ||
1030 | if (time_after(jiffies,timeout)) | ||
1031 | break; | ||
1032 | } | ||
1033 | } | ||
1034 | ctc_tty_shutdown(info); | ||
1035 | if (tty->driver->flush_buffer) { | ||
1036 | skb_queue_purge(&info->tx_queue); | ||
1037 | info->lsr |= UART_LSR_TEMT; | ||
1038 | } | ||
1039 | tty_ldisc_flush(tty); | ||
1040 | info->tty = 0; | ||
1041 | tty->closing = 0; | ||
1042 | if (info->blocked_open) { | ||
1043 | msleep_interruptible(500); | ||
1044 | wake_up_interruptible(&info->open_wait); | ||
1045 | } | ||
1046 | info->flags &= ~(CTC_ASYNC_NORMAL_ACTIVE | CTC_ASYNC_CLOSING); | ||
1047 | wake_up_interruptible(&info->close_wait); | ||
1048 | spin_unlock_irqrestore(&ctc_tty_lock, flags); | ||
1049 | #ifdef CTC_DEBUG_MODEM_OPEN | ||
1050 | printk(KERN_DEBUG "ctc_tty_close normal exit\n"); | ||
1051 | #endif | ||
1052 | } | ||
1053 | |||
1054 | /* | ||
1055 | * ctc_tty_hangup() --- called by tty_hangup() when a hangup is signaled. | ||
1056 | */ | ||
1057 | static void | ||
1058 | ctc_tty_hangup(struct tty_struct *tty) | ||
1059 | { | ||
1060 | ctc_tty_info *info = (ctc_tty_info *)tty->driver_data; | ||
1061 | unsigned long saveflags; | ||
1062 | DBF_TEXT(trace, 3, __FUNCTION__); | ||
1063 | if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup")) | ||
1064 | return; | ||
1065 | ctc_tty_shutdown(info); | ||
1066 | info->count = 0; | ||
1067 | info->flags &= ~CTC_ASYNC_NORMAL_ACTIVE; | ||
1068 | spin_lock_irqsave(&ctc_tty_lock, saveflags); | ||
1069 | info->tty = 0; | ||
1070 | spin_unlock_irqrestore(&ctc_tty_lock, saveflags); | ||
1071 | wake_up_interruptible(&info->open_wait); | ||
1072 | } | ||
1073 | |||
1074 | |||
1075 | /* | ||
1076 | * For all online tty's, try sending data to | ||
1077 | * the lower levels. | ||
1078 | */ | ||
1079 | static void | ||
1080 | ctc_tty_task(unsigned long arg) | ||
1081 | { | ||
1082 | ctc_tty_info *info = (void *)arg; | ||
1083 | unsigned long saveflags; | ||
1084 | int again; | ||
1085 | |||
1086 | DBF_TEXT(trace, 3, __FUNCTION__); | ||
1087 | spin_lock_irqsave(&ctc_tty_lock, saveflags); | ||
1088 | if ((!ctc_tty_shuttingdown) && info) { | ||
1089 | again = ctc_tty_tint(info); | ||
1090 | if (!again) | ||
1091 | info->lsr |= UART_LSR_TEMT; | ||
1092 | again |= ctc_tty_readmodem(info); | ||
1093 | if (again) { | ||
1094 | tasklet_schedule(&info->tasklet); | ||
1095 | } | ||
1096 | } | ||
1097 | spin_unlock_irqrestore(&ctc_tty_lock, saveflags); | ||
1098 | } | ||
1099 | |||
1100 | static struct tty_operations ctc_ops = { | ||
1101 | .open = ctc_tty_open, | ||
1102 | .close = ctc_tty_close, | ||
1103 | .write = ctc_tty_write, | ||
1104 | .flush_chars = ctc_tty_flush_chars, | ||
1105 | .write_room = ctc_tty_write_room, | ||
1106 | .chars_in_buffer = ctc_tty_chars_in_buffer, | ||
1107 | .flush_buffer = ctc_tty_flush_buffer, | ||
1108 | .ioctl = ctc_tty_ioctl, | ||
1109 | .throttle = ctc_tty_throttle, | ||
1110 | .unthrottle = ctc_tty_unthrottle, | ||
1111 | .set_termios = ctc_tty_set_termios, | ||
1112 | .hangup = ctc_tty_hangup, | ||
1113 | .tiocmget = ctc_tty_tiocmget, | ||
1114 | .tiocmset = ctc_tty_tiocmset, | ||
1115 | }; | ||
1116 | |||
1117 | int | ||
1118 | ctc_tty_init(void) | ||
1119 | { | ||
1120 | int i; | ||
1121 | ctc_tty_info *info; | ||
1122 | struct tty_driver *device; | ||
1123 | |||
1124 | DBF_TEXT(trace, 2, __FUNCTION__); | ||
1125 | driver = kmalloc(sizeof(ctc_tty_driver), GFP_KERNEL); | ||
1126 | if (driver == NULL) { | ||
1127 | printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n"); | ||
1128 | return -ENOMEM; | ||
1129 | } | ||
1130 | memset(driver, 0, sizeof(ctc_tty_driver)); | ||
1131 | device = alloc_tty_driver(CTC_TTY_MAX_DEVICES); | ||
1132 | if (!device) { | ||
1133 | kfree(driver); | ||
1134 | printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n"); | ||
1135 | return -ENOMEM; | ||
1136 | } | ||
1137 | |||
1138 | device->devfs_name = "ctc/" CTC_TTY_NAME; | ||
1139 | device->name = CTC_TTY_NAME; | ||
1140 | device->major = CTC_TTY_MAJOR; | ||
1141 | device->minor_start = 0; | ||
1142 | device->type = TTY_DRIVER_TYPE_SERIAL; | ||
1143 | device->subtype = SERIAL_TYPE_NORMAL; | ||
1144 | device->init_termios = tty_std_termios; | ||
1145 | device->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; | ||
1146 | device->flags = TTY_DRIVER_REAL_RAW; | ||
1147 | device->driver_name = "ctc_tty", | ||
1148 | tty_set_operations(device, &ctc_ops); | ||
1149 | if (tty_register_driver(device)) { | ||
1150 | printk(KERN_WARNING "ctc_tty: Couldn't register serial-device\n"); | ||
1151 | put_tty_driver(device); | ||
1152 | kfree(driver); | ||
1153 | return -1; | ||
1154 | } | ||
1155 | driver->ctc_tty_device = device; | ||
1156 | for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) { | ||
1157 | info = &driver->info[i]; | ||
1158 | init_MUTEX(&info->write_sem); | ||
1159 | tasklet_init(&info->tasklet, ctc_tty_task, | ||
1160 | (unsigned long) info); | ||
1161 | info->magic = CTC_ASYNC_MAGIC; | ||
1162 | info->line = i; | ||
1163 | info->tty = 0; | ||
1164 | info->count = 0; | ||
1165 | info->blocked_open = 0; | ||
1166 | init_waitqueue_head(&info->open_wait); | ||
1167 | init_waitqueue_head(&info->close_wait); | ||
1168 | skb_queue_head_init(&info->tx_queue); | ||
1169 | skb_queue_head_init(&info->rx_queue); | ||
1170 | init_timer(&info->stoptimer); | ||
1171 | info->stoptimer.function = ctc_tty_stopdev; | ||
1172 | info->stoptimer.data = (unsigned long)info; | ||
1173 | info->mcr = UART_MCR_RTS; | ||
1174 | } | ||
1175 | return 0; | ||
1176 | } | ||
1177 | |||
1178 | int | ||
1179 | ctc_tty_register_netdev(struct net_device *dev) { | ||
1180 | int ttynum; | ||
1181 | char *err; | ||
1182 | char *p; | ||
1183 | |||
1184 | DBF_TEXT(trace, 2, __FUNCTION__); | ||
1185 | if ((!dev) || (!dev->name)) { | ||
1186 | printk(KERN_WARNING | ||
1187 | "ctc_tty_register_netdev called " | ||
1188 | "with NULL dev or NULL dev-name\n"); | ||
1189 | return -1; | ||
1190 | } | ||
1191 | |||
1192 | /* | ||
1193 | * If the name is a format string the caller wants us to | ||
1194 | * do a name allocation : format string must end with %d | ||
1195 | */ | ||
1196 | if (strchr(dev->name, '%')) | ||
1197 | { | ||
1198 | int err = dev_alloc_name(dev, dev->name); // dev->name is changed by this | ||
1199 | if (err < 0) { | ||
1200 | printk(KERN_DEBUG "dev_alloc returned error %d\n", err); | ||
1201 | return err; | ||
1202 | } | ||
1203 | |||
1204 | } | ||
1205 | |||
1206 | for (p = dev->name; p && ((*p < '0') || (*p > '9')); p++); | ||
1207 | ttynum = simple_strtoul(p, &err, 0); | ||
1208 | if ((ttynum < 0) || (ttynum >= CTC_TTY_MAX_DEVICES) || | ||
1209 | (err && *err)) { | ||
1210 | printk(KERN_WARNING | ||
1211 | "ctc_tty_register_netdev called " | ||
1212 | "with number in name '%s'\n", dev->name); | ||
1213 | return -1; | ||
1214 | } | ||
1215 | if (driver->info[ttynum].netdev) { | ||
1216 | printk(KERN_WARNING | ||
1217 | "ctc_tty_register_netdev called " | ||
1218 | "for already registered device '%s'\n", | ||
1219 | dev->name); | ||
1220 | return -1; | ||
1221 | } | ||
1222 | driver->info[ttynum].netdev = dev; | ||
1223 | return 0; | ||
1224 | } | ||
1225 | |||
1226 | void | ||
1227 | ctc_tty_unregister_netdev(struct net_device *dev) { | ||
1228 | int i; | ||
1229 | unsigned long saveflags; | ||
1230 | ctc_tty_info *info = NULL; | ||
1231 | |||
1232 | DBF_TEXT(trace, 2, __FUNCTION__); | ||
1233 | spin_lock_irqsave(&ctc_tty_lock, saveflags); | ||
1234 | for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) | ||
1235 | if (driver->info[i].netdev == dev) { | ||
1236 | info = &driver->info[i]; | ||
1237 | break; | ||
1238 | } | ||
1239 | if (info) { | ||
1240 | info->netdev = NULL; | ||
1241 | skb_queue_purge(&info->tx_queue); | ||
1242 | skb_queue_purge(&info->rx_queue); | ||
1243 | } | ||
1244 | spin_unlock_irqrestore(&ctc_tty_lock, saveflags); | ||
1245 | } | ||
1246 | |||
1247 | void | ||
1248 | ctc_tty_cleanup(void) { | ||
1249 | unsigned long saveflags; | ||
1250 | |||
1251 | DBF_TEXT(trace, 2, __FUNCTION__); | ||
1252 | spin_lock_irqsave(&ctc_tty_lock, saveflags); | ||
1253 | ctc_tty_shuttingdown = 1; | ||
1254 | spin_unlock_irqrestore(&ctc_tty_lock, saveflags); | ||
1255 | tty_unregister_driver(driver->ctc_tty_device); | ||
1256 | put_tty_driver(driver->ctc_tty_device); | ||
1257 | kfree(driver); | ||
1258 | driver = NULL; | ||
1259 | } | ||
diff --git a/drivers/s390/net/ctctty.h b/drivers/s390/net/ctctty.h deleted file mode 100644 index 7254dc006311..000000000000 --- a/drivers/s390/net/ctctty.h +++ /dev/null | |||
@@ -1,35 +0,0 @@ | |||
1 | /* | ||
2 | * CTC / ESCON network driver, tty interface. | ||
3 | * | ||
4 | * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | ||
21 | |||
22 | #ifndef _CTCTTY_H_ | ||
23 | #define _CTCTTY_H_ | ||
24 | |||
25 | #include <linux/skbuff.h> | ||
26 | #include <linux/netdevice.h> | ||
27 | |||
28 | extern int ctc_tty_register_netdev(struct net_device *); | ||
29 | extern void ctc_tty_unregister_netdev(struct net_device *); | ||
30 | extern void ctc_tty_netif_rx(struct sk_buff *); | ||
31 | extern int ctc_tty_init(void); | ||
32 | extern void ctc_tty_cleanup(void); | ||
33 | extern void ctc_tty_setcarrier(struct net_device *, int); | ||
34 | |||
35 | #endif | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index 3a6a4e37a482..6fd36cb09160 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -442,6 +442,7 @@ struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int devic | |||
442 | struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn); | 442 | struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn); |
443 | int pci_find_capability (struct pci_dev *dev, int cap); | 443 | int pci_find_capability (struct pci_dev *dev, int cap); |
444 | int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap); | 444 | int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap); |
445 | int pci_find_ext_capability (struct pci_dev *dev, int cap); | ||
445 | struct pci_bus * pci_find_next_bus(const struct pci_bus *from); | 446 | struct pci_bus * pci_find_next_bus(const struct pci_bus *from); |
446 | 447 | ||
447 | struct pci_dev *pci_get_device (unsigned int vendor, unsigned int device, struct pci_dev *from); | 448 | struct pci_dev *pci_get_device (unsigned int vendor, unsigned int device, struct pci_dev *from); |
@@ -662,6 +663,7 @@ static inline int pci_register_driver(struct pci_driver *drv) { return 0;} | |||
662 | static inline void pci_unregister_driver(struct pci_driver *drv) { } | 663 | static inline void pci_unregister_driver(struct pci_driver *drv) { } |
663 | static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; } | 664 | static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; } |
664 | static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; } | 665 | static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; } |
666 | static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) {return 0; } | ||
665 | static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; } | 667 | static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; } |
666 | 668 | ||
667 | /* Power management related routines */ | 669 | /* Power management related routines */ |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 590dc6dca315..c3fe769c9129 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -935,6 +935,7 @@ | |||
935 | #define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151 | 935 | #define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151 |
936 | #define PCI_DEVICE_ID_PLX_R753 0x1152 | 936 | #define PCI_DEVICE_ID_PLX_R753 0x1152 |
937 | #define PCI_DEVICE_ID_PLX_OLITEC 0x1187 | 937 | #define PCI_DEVICE_ID_PLX_OLITEC 0x1187 |
938 | #define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196 | ||
938 | #define PCI_DEVICE_ID_PLX_9050 0x9050 | 939 | #define PCI_DEVICE_ID_PLX_9050 0x9050 |
939 | #define PCI_DEVICE_ID_PLX_9080 0x9080 | 940 | #define PCI_DEVICE_ID_PLX_9080 0x9080 |
940 | #define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001 | 941 | #define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001 |
@@ -1182,6 +1183,14 @@ | |||
1182 | #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E | 1183 | #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E |
1183 | #define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372 | 1184 | #define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372 |
1184 | #define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 | 1185 | #define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 |
1186 | #define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5 | ||
1187 | #define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6 | ||
1188 | #define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE | ||
1189 | #define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF | ||
1190 | #define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450 | ||
1191 | #define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 | ||
1192 | #define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 | ||
1193 | #define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453 | ||
1185 | 1194 | ||
1186 | #define PCI_VENDOR_ID_IMS 0x10e0 | 1195 | #define PCI_VENDOR_ID_IMS 0x10e0 |
1187 | #define PCI_DEVICE_ID_IMS_TT128 0x9128 | 1196 | #define PCI_DEVICE_ID_IMS_TT128 0x9128 |
@@ -1827,6 +1836,7 @@ | |||
1827 | 1836 | ||
1828 | #define PCI_VENDOR_ID_SAMSUNG 0x144d | 1837 | #define PCI_VENDOR_ID_SAMSUNG 0x144d |
1829 | 1838 | ||
1839 | #define PCI_VENDOR_ID_MYRICOM 0x14c1 | ||
1830 | 1840 | ||
1831 | #define PCI_VENDOR_ID_TITAN 0x14D2 | 1841 | #define PCI_VENDOR_ID_TITAN 0x14D2 |
1832 | #define PCI_DEVICE_ID_TITAN_010L 0x8001 | 1842 | #define PCI_DEVICE_ID_TITAN_010L 0x8001 |
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index d5926bfb1fc9..293e920ca59d 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/kernel.h> /* ARRAY_SIZE */ | 29 | #include <linux/kernel.h> /* ARRAY_SIZE */ |
30 | #include <linux/wireless.h> | 30 | #include <linux/wireless.h> |
31 | 31 | ||
32 | #define IEEE80211_VERSION "git-1.1.7" | 32 | #define IEEE80211_VERSION "git-1.1.13" |
33 | 33 | ||
34 | #define IEEE80211_DATA_LEN 2304 | 34 | #define IEEE80211_DATA_LEN 2304 |
35 | /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section | 35 | /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section |
@@ -104,6 +104,9 @@ | |||
104 | #define IEEE80211_SCTL_FRAG 0x000F | 104 | #define IEEE80211_SCTL_FRAG 0x000F |
105 | #define IEEE80211_SCTL_SEQ 0xFFF0 | 105 | #define IEEE80211_SCTL_SEQ 0xFFF0 |
106 | 106 | ||
107 | /* QOS control */ | ||
108 | #define IEEE80211_QCTL_TID 0x000F | ||
109 | |||
107 | /* debug macros */ | 110 | /* debug macros */ |
108 | 111 | ||
109 | #ifdef CONFIG_IEEE80211_DEBUG | 112 | #ifdef CONFIG_IEEE80211_DEBUG |
@@ -1075,6 +1078,7 @@ struct ieee80211_device { | |||
1075 | 1078 | ||
1076 | int (*handle_management) (struct net_device * dev, | 1079 | int (*handle_management) (struct net_device * dev, |
1077 | struct ieee80211_network * network, u16 type); | 1080 | struct ieee80211_network * network, u16 type); |
1081 | int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb); | ||
1078 | 1082 | ||
1079 | /* Typical STA methods */ | 1083 | /* Typical STA methods */ |
1080 | int (*handle_auth) (struct net_device * dev, | 1084 | int (*handle_auth) (struct net_device * dev, |
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h index 052ed596a4e4..703463a8828b 100644 --- a/include/net/ieee80211softmac.h +++ b/include/net/ieee80211softmac.h | |||
@@ -86,6 +86,9 @@ struct ieee80211softmac_assoc_info { | |||
86 | 86 | ||
87 | /* BSSID we're trying to associate to */ | 87 | /* BSSID we're trying to associate to */ |
88 | char bssid[ETH_ALEN]; | 88 | char bssid[ETH_ALEN]; |
89 | |||
90 | /* Rates supported by the network */ | ||
91 | struct ieee80211softmac_ratesinfo supported_rates; | ||
89 | 92 | ||
90 | /* some flags. | 93 | /* some flags. |
91 | * static_essid is valid if the essid is constant, | 94 | * static_essid is valid if the essid is constant, |
@@ -132,23 +135,26 @@ enum { | |||
132 | struct ieee80211softmac_txrates { | 135 | struct ieee80211softmac_txrates { |
133 | /* The Bit-Rate to be used for multicast frames. */ | 136 | /* The Bit-Rate to be used for multicast frames. */ |
134 | u8 mcast_rate; | 137 | u8 mcast_rate; |
135 | /* The Bit-Rate to be used for multicast fallback | 138 | |
136 | * (If the device supports fallback and hardware-retry) | 139 | /* The Bit-Rate to be used for multicast management frames. */ |
137 | */ | 140 | u8 mgt_mcast_rate; |
138 | u8 mcast_fallback; | 141 | |
139 | /* The Bit-Rate to be used for any other (normal) data packet. */ | 142 | /* The Bit-Rate to be used for any other (normal) data packet. */ |
140 | u8 default_rate; | 143 | u8 default_rate; |
141 | /* The Bit-Rate to be used for default fallback | 144 | /* The Bit-Rate to be used for default fallback |
142 | * (If the device supports fallback and hardware-retry) | 145 | * (If the device supports fallback and hardware-retry) |
143 | */ | 146 | */ |
144 | u8 default_fallback; | 147 | u8 default_fallback; |
148 | |||
149 | /* This is the rate that the user asked for */ | ||
150 | u8 user_rate; | ||
145 | }; | 151 | }; |
146 | 152 | ||
147 | /* Bits for txrates_change callback. */ | 153 | /* Bits for txrates_change callback. */ |
148 | #define IEEE80211SOFTMAC_TXRATECHG_DEFAULT (1 << 0) /* default_rate */ | 154 | #define IEEE80211SOFTMAC_TXRATECHG_DEFAULT (1 << 0) /* default_rate */ |
149 | #define IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK (1 << 1) /* default_fallback */ | 155 | #define IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK (1 << 1) /* default_fallback */ |
150 | #define IEEE80211SOFTMAC_TXRATECHG_MCAST (1 << 2) /* mcast_rate */ | 156 | #define IEEE80211SOFTMAC_TXRATECHG_MCAST (1 << 2) /* mcast_rate */ |
151 | #define IEEE80211SOFTMAC_TXRATECHG_MCAST_FBACK (1 << 3) /* mcast_fallback */ | 157 | #define IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST (1 << 3) /* mgt_mcast_rate */ |
152 | 158 | ||
153 | struct ieee80211softmac_device { | 159 | struct ieee80211softmac_device { |
154 | /* 802.11 structure for data stuff */ | 160 | /* 802.11 structure for data stuff */ |
@@ -250,6 +256,28 @@ extern void ieee80211softmac_fragment_lost(struct net_device *dev, | |||
250 | * Note that the rates need to be sorted. */ | 256 | * Note that the rates need to be sorted. */ |
251 | extern void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates); | 257 | extern void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates); |
252 | 258 | ||
259 | /* Helper function which advises you the rate at which a frame should be | ||
260 | * transmitted at. */ | ||
261 | static inline u8 ieee80211softmac_suggest_txrate(struct ieee80211softmac_device *mac, | ||
262 | int is_multicast, | ||
263 | int is_mgt) | ||
264 | { | ||
265 | struct ieee80211softmac_txrates *txrates = &mac->txrates; | ||
266 | |||
267 | if (!mac->associated) | ||
268 | return txrates->mgt_mcast_rate; | ||
269 | |||
270 | /* We are associated, sending unicast frame */ | ||
271 | if (!is_multicast) | ||
272 | return txrates->default_rate; | ||
273 | |||
274 | /* We are associated, sending multicast frame */ | ||
275 | if (is_mgt) | ||
276 | return txrates->mgt_mcast_rate; | ||
277 | else | ||
278 | return txrates->mcast_rate; | ||
279 | } | ||
280 | |||
253 | /* Start the SoftMAC. Call this after you initialized the device | 281 | /* Start the SoftMAC. Call this after you initialized the device |
254 | * and it is ready to run. | 282 | * and it is ready to run. |
255 | */ | 283 | */ |
diff --git a/include/net/ieee80211softmac_wx.h b/include/net/ieee80211softmac_wx.h index 3e0be453ecea..4ee3ad57283f 100644 --- a/include/net/ieee80211softmac_wx.h +++ b/include/net/ieee80211softmac_wx.h | |||
@@ -91,4 +91,9 @@ ieee80211softmac_wx_get_genie(struct net_device *dev, | |||
91 | struct iw_request_info *info, | 91 | struct iw_request_info *info, |
92 | union iwreq_data *wrqu, | 92 | union iwreq_data *wrqu, |
93 | char *extra); | 93 | char *extra); |
94 | extern int | ||
95 | ieee80211softmac_wx_set_mlme(struct net_device *dev, | ||
96 | struct iw_request_info *info, | ||
97 | union iwreq_data *wrqu, | ||
98 | char *extra); | ||
94 | #endif /* _IEEE80211SOFTMAC_WX */ | 99 | #endif /* _IEEE80211SOFTMAC_WX */ |
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index 93def94c1b32..3fa5df2e1f0b 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c | |||
@@ -501,8 +501,11 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr, | |||
501 | static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) | 501 | static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) |
502 | { | 502 | { |
503 | struct ieee80211_hdr_4addr *hdr11; | 503 | struct ieee80211_hdr_4addr *hdr11; |
504 | u16 stype; | ||
504 | 505 | ||
505 | hdr11 = (struct ieee80211_hdr_4addr *)skb->data; | 506 | hdr11 = (struct ieee80211_hdr_4addr *)skb->data; |
507 | stype = WLAN_FC_GET_STYPE(le16_to_cpu(hdr11->frame_ctl)); | ||
508 | |||
506 | switch (le16_to_cpu(hdr11->frame_ctl) & | 509 | switch (le16_to_cpu(hdr11->frame_ctl) & |
507 | (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { | 510 | (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { |
508 | case IEEE80211_FCTL_TODS: | 511 | case IEEE80211_FCTL_TODS: |
@@ -523,7 +526,13 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) | |||
523 | break; | 526 | break; |
524 | } | 527 | } |
525 | 528 | ||
526 | hdr[12] = 0; /* priority */ | 529 | if (stype & IEEE80211_STYPE_QOS_DATA) { |
530 | const struct ieee80211_hdr_3addrqos *qoshdr = | ||
531 | (struct ieee80211_hdr_3addrqos *)skb->data; | ||
532 | hdr[12] = le16_to_cpu(qoshdr->qos_ctl) & IEEE80211_QCTL_TID; | ||
533 | } else | ||
534 | hdr[12] = 0; /* priority */ | ||
535 | |||
527 | hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ | 536 | hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ |
528 | } | 537 | } |
529 | 538 | ||
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 604b7b0097bc..2bf567fd5a17 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -369,7 +369,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
369 | 369 | ||
370 | /* Put this code here so that we avoid duplicating it in all | 370 | /* Put this code here so that we avoid duplicating it in all |
371 | * Rx paths. - Jean II */ | 371 | * Rx paths. - Jean II */ |
372 | #ifdef CONFIG_WIRELESS_EXT | ||
373 | #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ | 372 | #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ |
374 | /* If spy monitoring on */ | 373 | /* If spy monitoring on */ |
375 | if (ieee->spy_data.spy_number > 0) { | 374 | if (ieee->spy_data.spy_number > 0) { |
@@ -398,7 +397,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
398 | wireless_spy_update(ieee->dev, hdr->addr2, &wstats); | 397 | wireless_spy_update(ieee->dev, hdr->addr2, &wstats); |
399 | } | 398 | } |
400 | #endif /* IW_WIRELESS_SPY */ | 399 | #endif /* IW_WIRELESS_SPY */ |
401 | #endif /* CONFIG_WIRELESS_EXT */ | ||
402 | 400 | ||
403 | #ifdef NOT_YET | 401 | #ifdef NOT_YET |
404 | hostap_update_rx_stats(local->ap, hdr, rx_stats); | 402 | hostap_update_rx_stats(local->ap, hdr, rx_stats); |
@@ -1692,8 +1690,8 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, | |||
1692 | WLAN_FC_GET_STYPE(le16_to_cpu | 1690 | WLAN_FC_GET_STYPE(le16_to_cpu |
1693 | (header->frame_ctl))); | 1691 | (header->frame_ctl))); |
1694 | 1692 | ||
1695 | IEEE80211_WARNING("%s: IEEE80211_REASSOC_REQ received\n", | 1693 | IEEE80211_DEBUG_MGMT("%s: IEEE80211_REASSOC_REQ received\n", |
1696 | ieee->dev->name); | 1694 | ieee->dev->name); |
1697 | if (ieee->handle_reassoc_request != NULL) | 1695 | if (ieee->handle_reassoc_request != NULL) |
1698 | ieee->handle_reassoc_request(ieee->dev, | 1696 | ieee->handle_reassoc_request(ieee->dev, |
1699 | (struct ieee80211_reassoc_request *) | 1697 | (struct ieee80211_reassoc_request *) |
@@ -1705,8 +1703,8 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, | |||
1705 | WLAN_FC_GET_STYPE(le16_to_cpu | 1703 | WLAN_FC_GET_STYPE(le16_to_cpu |
1706 | (header->frame_ctl))); | 1704 | (header->frame_ctl))); |
1707 | 1705 | ||
1708 | IEEE80211_WARNING("%s: IEEE80211_ASSOC_REQ received\n", | 1706 | IEEE80211_DEBUG_MGMT("%s: IEEE80211_ASSOC_REQ received\n", |
1709 | ieee->dev->name); | 1707 | ieee->dev->name); |
1710 | if (ieee->handle_assoc_request != NULL) | 1708 | if (ieee->handle_assoc_request != NULL) |
1711 | ieee->handle_assoc_request(ieee->dev); | 1709 | ieee->handle_assoc_request(ieee->dev); |
1712 | break; | 1710 | break; |
@@ -1722,10 +1720,10 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, | |||
1722 | IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n", | 1720 | IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n", |
1723 | WLAN_FC_GET_STYPE(le16_to_cpu | 1721 | WLAN_FC_GET_STYPE(le16_to_cpu |
1724 | (header->frame_ctl))); | 1722 | (header->frame_ctl))); |
1725 | IEEE80211_WARNING("%s: Unknown management packet: %d\n", | 1723 | IEEE80211_DEBUG_MGMT("%s: Unknown management packet: %d\n", |
1726 | ieee->dev->name, | 1724 | ieee->dev->name, |
1727 | WLAN_FC_GET_STYPE(le16_to_cpu | 1725 | WLAN_FC_GET_STYPE(le16_to_cpu |
1728 | (header->frame_ctl))); | 1726 | (header->frame_ctl))); |
1729 | break; | 1727 | break; |
1730 | } | 1728 | } |
1731 | } | 1729 | } |
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 8b4332f53394..233d527c6953 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c | |||
@@ -220,13 +220,43 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, | |||
220 | return txb; | 220 | return txb; |
221 | } | 221 | } |
222 | 222 | ||
223 | static int ieee80211_classify(struct sk_buff *skb) | ||
224 | { | ||
225 | struct ethhdr *eth; | ||
226 | struct iphdr *ip; | ||
227 | |||
228 | eth = (struct ethhdr *)skb->data; | ||
229 | if (eth->h_proto != __constant_htons(ETH_P_IP)) | ||
230 | return 0; | ||
231 | |||
232 | ip = skb->nh.iph; | ||
233 | switch (ip->tos & 0xfc) { | ||
234 | case 0x20: | ||
235 | return 2; | ||
236 | case 0x40: | ||
237 | return 1; | ||
238 | case 0x60: | ||
239 | return 3; | ||
240 | case 0x80: | ||
241 | return 4; | ||
242 | case 0xa0: | ||
243 | return 5; | ||
244 | case 0xc0: | ||
245 | return 6; | ||
246 | case 0xe0: | ||
247 | return 7; | ||
248 | default: | ||
249 | return 0; | ||
250 | } | ||
251 | } | ||
252 | |||
223 | /* Incoming skb is converted to a txb which consists of | 253 | /* Incoming skb is converted to a txb which consists of |
224 | * a block of 802.11 fragment packets (stored as skbs) */ | 254 | * a block of 802.11 fragment packets (stored as skbs) */ |
225 | int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | 255 | int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) |
226 | { | 256 | { |
227 | struct ieee80211_device *ieee = netdev_priv(dev); | 257 | struct ieee80211_device *ieee = netdev_priv(dev); |
228 | struct ieee80211_txb *txb = NULL; | 258 | struct ieee80211_txb *txb = NULL; |
229 | struct ieee80211_hdr_3addr *frag_hdr; | 259 | struct ieee80211_hdr_3addrqos *frag_hdr; |
230 | int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, | 260 | int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, |
231 | rts_required; | 261 | rts_required; |
232 | unsigned long flags; | 262 | unsigned long flags; |
@@ -234,9 +264,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
234 | int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv; | 264 | int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv; |
235 | int bytes, fc, hdr_len; | 265 | int bytes, fc, hdr_len; |
236 | struct sk_buff *skb_frag; | 266 | struct sk_buff *skb_frag; |
237 | struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */ | 267 | struct ieee80211_hdr_3addrqos header = {/* Ensure zero initialized */ |
238 | .duration_id = 0, | 268 | .duration_id = 0, |
239 | .seq_ctl = 0 | 269 | .seq_ctl = 0, |
270 | .qos_ctl = 0 | ||
240 | }; | 271 | }; |
241 | u8 dest[ETH_ALEN], src[ETH_ALEN]; | 272 | u8 dest[ETH_ALEN], src[ETH_ALEN]; |
242 | struct ieee80211_crypt_data *crypt; | 273 | struct ieee80211_crypt_data *crypt; |
@@ -282,12 +313,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
282 | memcpy(dest, skb->data, ETH_ALEN); | 313 | memcpy(dest, skb->data, ETH_ALEN); |
283 | memcpy(src, skb->data + ETH_ALEN, ETH_ALEN); | 314 | memcpy(src, skb->data + ETH_ALEN, ETH_ALEN); |
284 | 315 | ||
285 | /* Advance the SKB to the start of the payload */ | ||
286 | skb_pull(skb, sizeof(struct ethhdr)); | ||
287 | |||
288 | /* Determine total amount of storage required for TXB packets */ | ||
289 | bytes = skb->len + SNAP_SIZE + sizeof(u16); | ||
290 | |||
291 | if (host_encrypt || host_build_iv) | 316 | if (host_encrypt || host_build_iv) |
292 | fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | | 317 | fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | |
293 | IEEE80211_FCTL_PROTECTED; | 318 | IEEE80211_FCTL_PROTECTED; |
@@ -306,9 +331,23 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
306 | memcpy(header.addr2, src, ETH_ALEN); | 331 | memcpy(header.addr2, src, ETH_ALEN); |
307 | memcpy(header.addr3, ieee->bssid, ETH_ALEN); | 332 | memcpy(header.addr3, ieee->bssid, ETH_ALEN); |
308 | } | 333 | } |
309 | header.frame_ctl = cpu_to_le16(fc); | ||
310 | hdr_len = IEEE80211_3ADDR_LEN; | 334 | hdr_len = IEEE80211_3ADDR_LEN; |
311 | 335 | ||
336 | if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) { | ||
337 | fc |= IEEE80211_STYPE_QOS_DATA; | ||
338 | hdr_len += 2; | ||
339 | |||
340 | skb->priority = ieee80211_classify(skb); | ||
341 | header.qos_ctl |= skb->priority & IEEE80211_QCTL_TID; | ||
342 | } | ||
343 | header.frame_ctl = cpu_to_le16(fc); | ||
344 | |||
345 | /* Advance the SKB to the start of the payload */ | ||
346 | skb_pull(skb, sizeof(struct ethhdr)); | ||
347 | |||
348 | /* Determine total amount of storage required for TXB packets */ | ||
349 | bytes = skb->len + SNAP_SIZE + sizeof(u16); | ||
350 | |||
312 | /* Encrypt msdu first on the whole data packet. */ | 351 | /* Encrypt msdu first on the whole data packet. */ |
313 | if ((host_encrypt || host_encrypt_msdu) && | 352 | if ((host_encrypt || host_encrypt_msdu) && |
314 | crypt && crypt->ops && crypt->ops->encrypt_msdu) { | 353 | crypt && crypt->ops && crypt->ops->encrypt_msdu) { |
@@ -402,7 +441,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
402 | if (rts_required) { | 441 | if (rts_required) { |
403 | skb_frag = txb->fragments[0]; | 442 | skb_frag = txb->fragments[0]; |
404 | frag_hdr = | 443 | frag_hdr = |
405 | (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); | 444 | (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len); |
406 | 445 | ||
407 | /* | 446 | /* |
408 | * Set header frame_ctl to the RTS. | 447 | * Set header frame_ctl to the RTS. |
@@ -433,7 +472,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
433 | crypt->ops->extra_mpdu_prefix_len); | 472 | crypt->ops->extra_mpdu_prefix_len); |
434 | 473 | ||
435 | frag_hdr = | 474 | frag_hdr = |
436 | (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); | 475 | (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len); |
437 | memcpy(frag_hdr, &header, hdr_len); | 476 | memcpy(frag_hdr, &header, hdr_len); |
438 | 477 | ||
439 | /* If this is not the last fragment, then add the MOREFRAGS | 478 | /* If this is not the last fragment, then add the MOREFRAGS |
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index b885fd189403..a78c4f845f66 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c | |||
@@ -50,7 +50,8 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
50 | char *p; | 50 | char *p; |
51 | struct iw_event iwe; | 51 | struct iw_event iwe; |
52 | int i, j; | 52 | int i, j; |
53 | u8 max_rate, rate; | 53 | char *current_val; /* For rates */ |
54 | u8 rate; | ||
54 | 55 | ||
55 | /* First entry *MUST* be the AP MAC address */ | 56 | /* First entry *MUST* be the AP MAC address */ |
56 | iwe.cmd = SIOCGIWAP; | 57 | iwe.cmd = SIOCGIWAP; |
@@ -107,9 +108,13 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
107 | start = iwe_stream_add_point(start, stop, &iwe, network->ssid); | 108 | start = iwe_stream_add_point(start, stop, &iwe, network->ssid); |
108 | 109 | ||
109 | /* Add basic and extended rates */ | 110 | /* Add basic and extended rates */ |
110 | max_rate = 0; | 111 | /* Rate : stuffing multiple values in a single event require a bit |
111 | p = custom; | 112 | * more of magic - Jean II */ |
112 | p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); | 113 | current_val = start + IW_EV_LCP_LEN; |
114 | iwe.cmd = SIOCGIWRATE; | ||
115 | /* Those two flags are ignored... */ | ||
116 | iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; | ||
117 | |||
113 | for (i = 0, j = 0; i < network->rates_len;) { | 118 | for (i = 0, j = 0; i < network->rates_len;) { |
114 | if (j < network->rates_ex_len && | 119 | if (j < network->rates_ex_len && |
115 | ((network->rates_ex[j] & 0x7F) < | 120 | ((network->rates_ex[j] & 0x7F) < |
@@ -117,28 +122,21 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee, | |||
117 | rate = network->rates_ex[j++] & 0x7F; | 122 | rate = network->rates_ex[j++] & 0x7F; |
118 | else | 123 | else |
119 | rate = network->rates[i++] & 0x7F; | 124 | rate = network->rates[i++] & 0x7F; |
120 | if (rate > max_rate) | 125 | /* Bit rate given in 500 kb/s units (+ 0x80) */ |
121 | max_rate = rate; | 126 | iwe.u.bitrate.value = ((rate & 0x7f) * 500000); |
122 | p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), | 127 | /* Add new value to event */ |
123 | "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); | 128 | current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN); |
124 | } | 129 | } |
125 | for (; j < network->rates_ex_len; j++) { | 130 | for (; j < network->rates_ex_len; j++) { |
126 | rate = network->rates_ex[j] & 0x7F; | 131 | rate = network->rates_ex[j] & 0x7F; |
127 | p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), | 132 | /* Bit rate given in 500 kb/s units (+ 0x80) */ |
128 | "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); | 133 | iwe.u.bitrate.value = ((rate & 0x7f) * 500000); |
129 | if (rate > max_rate) | 134 | /* Add new value to event */ |
130 | max_rate = rate; | 135 | current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN); |
131 | } | 136 | } |
132 | 137 | /* Check if we added any rate */ | |
133 | iwe.cmd = SIOCGIWRATE; | 138 | if((current_val - start) > IW_EV_LCP_LEN) |
134 | iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; | 139 | start = current_val; |
135 | iwe.u.bitrate.value = max_rate * 500000; | ||
136 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_PARAM_LEN); | ||
137 | |||
138 | iwe.cmd = IWEVCUSTOM; | ||
139 | iwe.u.data.length = p - custom; | ||
140 | if (iwe.u.data.length) | ||
141 | start = iwe_stream_add_point(start, stop, &iwe, custom); | ||
142 | 140 | ||
143 | /* Add quality statistics */ | 141 | /* Add quality statistics */ |
144 | iwe.cmd = IWEVQUAL; | 142 | iwe.cmd = IWEVQUAL; |
@@ -505,7 +503,7 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, | |||
505 | len = sec->key_sizes[key]; | 503 | len = sec->key_sizes[key]; |
506 | memcpy(keybuf, sec->keys[key], len); | 504 | memcpy(keybuf, sec->keys[key], len); |
507 | 505 | ||
508 | erq->length = (len >= 0 ? len : 0); | 506 | erq->length = len; |
509 | erq->flags |= IW_ENCODE_ENABLED; | 507 | erq->flags |= IW_ENCODE_ENABLED; |
510 | 508 | ||
511 | if (ieee->open_wep) | 509 | if (ieee->open_wep) |
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index 57ea9f6f465c..5d90b9a6ee50 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c | |||
@@ -82,51 +82,52 @@ ieee80211softmac_assoc_timeout(void *d) | |||
82 | ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT, NULL); | 82 | ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT, NULL); |
83 | } | 83 | } |
84 | 84 | ||
85 | /* Sends out a disassociation request to the desired AP */ | 85 | void |
86 | static void | 86 | ieee80211softmac_disassoc(struct ieee80211softmac_device *mac) |
87 | ieee80211softmac_disassoc(struct ieee80211softmac_device *mac, u16 reason) | ||
88 | { | 87 | { |
89 | unsigned long flags; | 88 | unsigned long flags; |
89 | |||
90 | spin_lock_irqsave(&mac->lock, flags); | ||
91 | if (mac->associnfo.associating) | ||
92 | cancel_delayed_work(&mac->associnfo.timeout); | ||
93 | |||
94 | netif_carrier_off(mac->dev); | ||
95 | |||
96 | mac->associated = 0; | ||
97 | mac->associnfo.bssvalid = 0; | ||
98 | mac->associnfo.associating = 0; | ||
99 | ieee80211softmac_init_txrates(mac); | ||
100 | ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL); | ||
101 | spin_unlock_irqrestore(&mac->lock, flags); | ||
102 | } | ||
103 | |||
104 | /* Sends out a disassociation request to the desired AP */ | ||
105 | void | ||
106 | ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason) | ||
107 | { | ||
90 | struct ieee80211softmac_network *found; | 108 | struct ieee80211softmac_network *found; |
91 | 109 | ||
92 | if (mac->associnfo.bssvalid && mac->associated) { | 110 | if (mac->associnfo.bssvalid && mac->associated) { |
93 | found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); | 111 | found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); |
94 | if (found) | 112 | if (found) |
95 | ieee80211softmac_send_mgt_frame(mac, found, IEEE80211_STYPE_DISASSOC, reason); | 113 | ieee80211softmac_send_mgt_frame(mac, found, IEEE80211_STYPE_DISASSOC, reason); |
96 | } else if (mac->associnfo.associating) { | ||
97 | cancel_delayed_work(&mac->associnfo.timeout); | ||
98 | } | 114 | } |
99 | 115 | ||
100 | /* Change our state */ | 116 | ieee80211softmac_disassoc(mac); |
101 | spin_lock_irqsave(&mac->lock, flags); | ||
102 | /* Do NOT clear bssvalid as that will break ieee80211softmac_assoc_work! */ | ||
103 | mac->associated = 0; | ||
104 | mac->associnfo.associating = 0; | ||
105 | ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL); | ||
106 | spin_unlock_irqrestore(&mac->lock, flags); | ||
107 | } | 117 | } |
108 | 118 | ||
109 | static inline int | 119 | static inline int |
110 | we_support_all_basic_rates(struct ieee80211softmac_device *mac, u8 *from, u8 from_len) | 120 | we_support_all_basic_rates(struct ieee80211softmac_device *mac, u8 *from, u8 from_len) |
111 | { | 121 | { |
112 | int idx, search, found; | 122 | int idx; |
113 | u8 rate, search_rate; | 123 | u8 rate; |
114 | 124 | ||
115 | for (idx = 0; idx < (from_len); idx++) { | 125 | for (idx = 0; idx < (from_len); idx++) { |
116 | rate = (from)[idx]; | 126 | rate = (from)[idx]; |
117 | if (!(rate & IEEE80211_BASIC_RATE_MASK)) | 127 | if (!(rate & IEEE80211_BASIC_RATE_MASK)) |
118 | continue; | 128 | continue; |
119 | found = 0; | ||
120 | rate &= ~IEEE80211_BASIC_RATE_MASK; | 129 | rate &= ~IEEE80211_BASIC_RATE_MASK; |
121 | for (search = 0; search < mac->ratesinfo.count; search++) { | 130 | if (!ieee80211softmac_ratesinfo_rate_supported(&mac->ratesinfo, rate)) |
122 | search_rate = mac->ratesinfo.rates[search]; | ||
123 | search_rate &= ~IEEE80211_BASIC_RATE_MASK; | ||
124 | if (rate == search_rate) { | ||
125 | found = 1; | ||
126 | break; | ||
127 | } | ||
128 | } | ||
129 | if (!found) | ||
130 | return 0; | 131 | return 0; |
131 | } | 132 | } |
132 | return 1; | 133 | return 1; |
@@ -176,14 +177,18 @@ ieee80211softmac_assoc_work(void *d) | |||
176 | struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; | 177 | struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; |
177 | struct ieee80211softmac_network *found = NULL; | 178 | struct ieee80211softmac_network *found = NULL; |
178 | struct ieee80211_network *net = NULL, *best = NULL; | 179 | struct ieee80211_network *net = NULL, *best = NULL; |
180 | int bssvalid; | ||
179 | unsigned long flags; | 181 | unsigned long flags; |
180 | 182 | ||
183 | /* ieee80211_disassoc might clear this */ | ||
184 | bssvalid = mac->associnfo.bssvalid; | ||
185 | |||
181 | /* meh */ | 186 | /* meh */ |
182 | if (mac->associated) | 187 | if (mac->associated) |
183 | ieee80211softmac_disassoc(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT); | 188 | ieee80211softmac_send_disassoc_req(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT); |
184 | 189 | ||
185 | /* try to find the requested network in our list, if we found one already */ | 190 | /* try to find the requested network in our list, if we found one already */ |
186 | if (mac->associnfo.bssvalid || mac->associnfo.bssfixed) | 191 | if (bssvalid || mac->associnfo.bssfixed) |
187 | found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); | 192 | found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); |
188 | 193 | ||
189 | /* Search the ieee80211 networks for this network if we didn't find it by bssid, | 194 | /* Search the ieee80211 networks for this network if we didn't find it by bssid, |
@@ -297,6 +302,9 @@ ieee80211softmac_associated(struct ieee80211softmac_device *mac, | |||
297 | struct ieee80211softmac_network *net) | 302 | struct ieee80211softmac_network *net) |
298 | { | 303 | { |
299 | mac->associnfo.associating = 0; | 304 | mac->associnfo.associating = 0; |
305 | mac->associnfo.supported_rates = net->supported_rates; | ||
306 | ieee80211softmac_recalc_txrates(mac); | ||
307 | |||
300 | mac->associated = 1; | 308 | mac->associated = 1; |
301 | if (mac->set_bssid_filter) | 309 | if (mac->set_bssid_filter) |
302 | mac->set_bssid_filter(mac->dev, net->bssid); | 310 | mac->set_bssid_filter(mac->dev, net->bssid); |
@@ -380,7 +388,6 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
380 | struct ieee80211_disassoc *disassoc) | 388 | struct ieee80211_disassoc *disassoc) |
381 | { | 389 | { |
382 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | 390 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); |
383 | unsigned long flags; | ||
384 | 391 | ||
385 | if (unlikely(!mac->running)) | 392 | if (unlikely(!mac->running)) |
386 | return -ENODEV; | 393 | return -ENODEV; |
@@ -392,14 +399,11 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
392 | return 0; | 399 | return 0; |
393 | 400 | ||
394 | dprintk(KERN_INFO PFX "got disassoc frame\n"); | 401 | dprintk(KERN_INFO PFX "got disassoc frame\n"); |
395 | netif_carrier_off(dev); | 402 | ieee80211softmac_disassoc(mac); |
396 | spin_lock_irqsave(&mac->lock, flags); | 403 | |
397 | mac->associnfo.bssvalid = 0; | 404 | /* try to reassociate */ |
398 | mac->associated = 0; | ||
399 | ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL); | ||
400 | schedule_work(&mac->associnfo.work); | 405 | schedule_work(&mac->associnfo.work); |
401 | spin_unlock_irqrestore(&mac->lock, flags); | 406 | |
402 | |||
403 | return 0; | 407 | return 0; |
404 | } | 408 | } |
405 | 409 | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 06e332624665..084b6211f293 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c | |||
@@ -279,6 +279,9 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac, | |||
279 | struct list_head *list_ptr; | 279 | struct list_head *list_ptr; |
280 | unsigned long flags; | 280 | unsigned long flags; |
281 | 281 | ||
282 | /* deauthentication implies disassociation */ | ||
283 | ieee80211softmac_disassoc(mac); | ||
284 | |||
282 | /* Lock and reset status flags */ | 285 | /* Lock and reset status flags */ |
283 | spin_lock_irqsave(&mac->lock, flags); | 286 | spin_lock_irqsave(&mac->lock, flags); |
284 | net->authenticating = 0; | 287 | net->authenticating = 0; |
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c index 8cc8f3f0f8e7..4b153f7cc96c 100644 --- a/net/ieee80211/softmac/ieee80211softmac_event.c +++ b/net/ieee80211/softmac/ieee80211softmac_event.c | |||
@@ -38,7 +38,8 @@ | |||
38 | * The event context is private and can only be used from | 38 | * The event context is private and can only be used from |
39 | * within this module. Its meaning varies with the event | 39 | * within this module. Its meaning varies with the event |
40 | * type: | 40 | * type: |
41 | * SCAN_FINISHED: no special meaning | 41 | * SCAN_FINISHED, |
42 | * DISASSOCIATED: NULL | ||
42 | * ASSOCIATED, | 43 | * ASSOCIATED, |
43 | * ASSOCIATE_FAILED, | 44 | * ASSOCIATE_FAILED, |
44 | * ASSOCIATE_TIMEOUT, | 45 | * ASSOCIATE_TIMEOUT, |
@@ -59,15 +60,15 @@ | |||
59 | */ | 60 | */ |
60 | 61 | ||
61 | static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = { | 62 | static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = { |
62 | "scan finished", | 63 | NULL, /* scan finished */ |
63 | "associated", | 64 | NULL, /* associated */ |
64 | "associating failed", | 65 | "associating failed", |
65 | "associating timed out", | 66 | "associating timed out", |
66 | "authenticated", | 67 | "authenticated", |
67 | "authenticating failed", | 68 | "authenticating failed", |
68 | "authenticating timed out", | 69 | "authenticating timed out", |
69 | "associating failed because no suitable network was found", | 70 | "associating failed because no suitable network was found", |
70 | "disassociated", | 71 | NULL, /* disassociated */ |
71 | }; | 72 | }; |
72 | 73 | ||
73 | 74 | ||
@@ -136,30 +137,24 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve | |||
136 | int we_event; | 137 | int we_event; |
137 | char *msg = NULL; | 138 | char *msg = NULL; |
138 | 139 | ||
140 | memset(&wrqu, '\0', sizeof (union iwreq_data)); | ||
141 | |||
139 | switch(event) { | 142 | switch(event) { |
140 | case IEEE80211SOFTMAC_EVENT_ASSOCIATED: | 143 | case IEEE80211SOFTMAC_EVENT_ASSOCIATED: |
141 | network = (struct ieee80211softmac_network *)event_ctx; | 144 | network = (struct ieee80211softmac_network *)event_ctx; |
142 | wrqu.data.length = 0; | ||
143 | wrqu.data.flags = 0; | ||
144 | memcpy(wrqu.ap_addr.sa_data, &network->bssid[0], ETH_ALEN); | 145 | memcpy(wrqu.ap_addr.sa_data, &network->bssid[0], ETH_ALEN); |
145 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | 146 | /* fall through */ |
146 | we_event = SIOCGIWAP; | ||
147 | break; | ||
148 | case IEEE80211SOFTMAC_EVENT_DISASSOCIATED: | 147 | case IEEE80211SOFTMAC_EVENT_DISASSOCIATED: |
149 | wrqu.data.length = 0; | ||
150 | wrqu.data.flags = 0; | ||
151 | memset(&wrqu, '\0', sizeof (union iwreq_data)); | ||
152 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | 148 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; |
153 | we_event = SIOCGIWAP; | 149 | we_event = SIOCGIWAP; |
154 | break; | 150 | break; |
155 | case IEEE80211SOFTMAC_EVENT_SCAN_FINISHED: | 151 | case IEEE80211SOFTMAC_EVENT_SCAN_FINISHED: |
156 | wrqu.data.length = 0; | ||
157 | wrqu.data.flags = 0; | ||
158 | memset(&wrqu, '\0', sizeof (union iwreq_data)); | ||
159 | we_event = SIOCGIWSCAN; | 152 | we_event = SIOCGIWSCAN; |
160 | break; | 153 | break; |
161 | default: | 154 | default: |
162 | msg = event_descriptions[event]; | 155 | msg = event_descriptions[event]; |
156 | if (!msg) | ||
157 | msg = "SOFTMAC EVENT BUG"; | ||
163 | wrqu.data.length = strlen(msg); | 158 | wrqu.data.length = strlen(msg); |
164 | we_event = IWEVCUSTOM; | 159 | we_event = IWEVCUSTOM; |
165 | break; | 160 | break; |
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index 6252be2c0db9..4b2e57d12418 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include "ieee80211softmac_priv.h" | 27 | #include "ieee80211softmac_priv.h" |
28 | #include <linux/sort.h> | 28 | #include <linux/sort.h> |
29 | #include <linux/etherdevice.h> | ||
29 | 30 | ||
30 | struct net_device *alloc_ieee80211softmac(int sizeof_priv) | 31 | struct net_device *alloc_ieee80211softmac(int sizeof_priv) |
31 | { | 32 | { |
@@ -61,14 +62,6 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv) | |||
61 | softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; | 62 | softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; |
62 | softmac->stop_scan = ieee80211softmac_stop_scan_implementation; | 63 | softmac->stop_scan = ieee80211softmac_stop_scan_implementation; |
63 | 64 | ||
64 | //TODO: The mcast rate has to be assigned dynamically somewhere (in scanning, association. Not sure...) | ||
65 | // It has to be set to the highest rate all stations in the current network can handle. | ||
66 | softmac->txrates.mcast_rate = IEEE80211_CCK_RATE_1MB; | ||
67 | softmac->txrates.mcast_fallback = IEEE80211_CCK_RATE_1MB; | ||
68 | /* This is reassigned in ieee80211softmac_start to sane values. */ | ||
69 | softmac->txrates.default_rate = IEEE80211_CCK_RATE_1MB; | ||
70 | softmac->txrates.default_fallback = IEEE80211_CCK_RATE_1MB; | ||
71 | |||
72 | /* to start with, we can't send anything ... */ | 65 | /* to start with, we can't send anything ... */ |
73 | netif_carrier_off(dev); | 66 | netif_carrier_off(dev); |
74 | 67 | ||
@@ -170,15 +163,82 @@ static void ieee80211softmac_start_check_rates(struct ieee80211softmac_device *m | |||
170 | } | 163 | } |
171 | } | 164 | } |
172 | 165 | ||
173 | void ieee80211softmac_start(struct net_device *dev) | 166 | int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate) |
167 | { | ||
168 | int search; | ||
169 | u8 search_rate; | ||
170 | |||
171 | for (search = 0; search < ri->count; search++) { | ||
172 | search_rate = ri->rates[search]; | ||
173 | search_rate &= ~IEEE80211_BASIC_RATE_MASK; | ||
174 | if (rate == search_rate) | ||
175 | return 1; | ||
176 | } | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | /* Finds the highest rate which is: | ||
182 | * 1. Present in ri (optionally a basic rate) | ||
183 | * 2. Supported by the device | ||
184 | * 3. Less than or equal to the user-defined rate | ||
185 | */ | ||
186 | static u8 highest_supported_rate(struct ieee80211softmac_device *mac, | ||
187 | struct ieee80211softmac_ratesinfo *ri, int basic_only) | ||
188 | { | ||
189 | u8 user_rate = mac->txrates.user_rate; | ||
190 | int i; | ||
191 | |||
192 | if (ri->count == 0) { | ||
193 | dprintk(KERN_ERR PFX "empty ratesinfo?\n"); | ||
194 | return IEEE80211_CCK_RATE_1MB; | ||
195 | } | ||
196 | |||
197 | for (i = ri->count - 1; i >= 0; i--) { | ||
198 | u8 rate = ri->rates[i]; | ||
199 | if (basic_only && !(rate & IEEE80211_BASIC_RATE_MASK)) | ||
200 | continue; | ||
201 | rate &= ~IEEE80211_BASIC_RATE_MASK; | ||
202 | if (rate > user_rate) | ||
203 | continue; | ||
204 | if (ieee80211softmac_ratesinfo_rate_supported(&mac->ratesinfo, rate)) | ||
205 | return rate; | ||
206 | } | ||
207 | |||
208 | /* If we haven't found a suitable rate by now, just trust the user */ | ||
209 | return user_rate; | ||
210 | } | ||
211 | |||
212 | void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac) | ||
213 | { | ||
214 | struct ieee80211softmac_txrates *txrates = &mac->txrates; | ||
215 | struct ieee80211softmac_txrates oldrates; | ||
216 | u32 change = 0; | ||
217 | |||
218 | if (mac->txrates_change) | ||
219 | oldrates = mac->txrates; | ||
220 | |||
221 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; | ||
222 | txrates->default_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 0); | ||
223 | |||
224 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK; | ||
225 | txrates->default_fallback = lower_rate(mac, txrates->default_rate); | ||
226 | |||
227 | change |= IEEE80211SOFTMAC_TXRATECHG_MCAST; | ||
228 | txrates->mcast_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 1); | ||
229 | |||
230 | if (mac->txrates_change) | ||
231 | mac->txrates_change(mac->dev, change, &oldrates); | ||
232 | |||
233 | } | ||
234 | |||
235 | void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac) | ||
174 | { | 236 | { |
175 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
176 | struct ieee80211_device *ieee = mac->ieee; | 237 | struct ieee80211_device *ieee = mac->ieee; |
177 | u32 change = 0; | 238 | u32 change = 0; |
239 | struct ieee80211softmac_txrates *txrates = &mac->txrates; | ||
178 | struct ieee80211softmac_txrates oldrates; | 240 | struct ieee80211softmac_txrates oldrates; |
179 | 241 | ||
180 | ieee80211softmac_start_check_rates(mac); | ||
181 | |||
182 | /* TODO: We need some kind of state machine to lower the default rates | 242 | /* TODO: We need some kind of state machine to lower the default rates |
183 | * if we loose too many packets. | 243 | * if we loose too many packets. |
184 | */ | 244 | */ |
@@ -193,22 +253,37 @@ void ieee80211softmac_start(struct net_device *dev) | |||
193 | more reliable. Note similar logic in | 253 | more reliable. Note similar logic in |
194 | ieee80211softmac_wx_set_rate() */ | 254 | ieee80211softmac_wx_set_rate() */ |
195 | if (ieee->modulation & IEEE80211_CCK_MODULATION) { | 255 | if (ieee->modulation & IEEE80211_CCK_MODULATION) { |
196 | mac->txrates.default_rate = IEEE80211_CCK_RATE_11MB; | 256 | txrates->user_rate = IEEE80211_CCK_RATE_11MB; |
197 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; | ||
198 | mac->txrates.default_fallback = IEEE80211_CCK_RATE_5MB; | ||
199 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK; | ||
200 | } else if (ieee->modulation & IEEE80211_OFDM_MODULATION) { | 257 | } else if (ieee->modulation & IEEE80211_OFDM_MODULATION) { |
201 | mac->txrates.default_rate = IEEE80211_OFDM_RATE_54MB; | 258 | txrates->user_rate = IEEE80211_OFDM_RATE_54MB; |
202 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; | ||
203 | mac->txrates.default_fallback = IEEE80211_OFDM_RATE_24MB; | ||
204 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK; | ||
205 | } else | 259 | } else |
206 | assert(0); | 260 | assert(0); |
261 | |||
262 | txrates->default_rate = IEEE80211_CCK_RATE_1MB; | ||
263 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; | ||
264 | |||
265 | txrates->default_fallback = IEEE80211_CCK_RATE_1MB; | ||
266 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK; | ||
267 | |||
268 | txrates->mcast_rate = IEEE80211_CCK_RATE_1MB; | ||
269 | change |= IEEE80211SOFTMAC_TXRATECHG_MCAST; | ||
270 | |||
271 | txrates->mgt_mcast_rate = IEEE80211_CCK_RATE_1MB; | ||
272 | change |= IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST; | ||
273 | |||
207 | if (mac->txrates_change) | 274 | if (mac->txrates_change) |
208 | mac->txrates_change(dev, change, &oldrates); | 275 | mac->txrates_change(mac->dev, change, &oldrates); |
209 | 276 | ||
210 | mac->running = 1; | 277 | mac->running = 1; |
211 | } | 278 | } |
279 | |||
280 | void ieee80211softmac_start(struct net_device *dev) | ||
281 | { | ||
282 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
283 | |||
284 | ieee80211softmac_start_check_rates(mac); | ||
285 | ieee80211softmac_init_txrates(mac); | ||
286 | } | ||
212 | EXPORT_SYMBOL_GPL(ieee80211softmac_start); | 287 | EXPORT_SYMBOL_GPL(ieee80211softmac_start); |
213 | 288 | ||
214 | void ieee80211softmac_stop(struct net_device *dev) | 289 | void ieee80211softmac_stop(struct net_device *dev) |
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h index 65d9816c8ecc..fa1f8e3acfc0 100644 --- a/net/ieee80211/softmac/ieee80211softmac_priv.h +++ b/net/ieee80211/softmac/ieee80211softmac_priv.h | |||
@@ -116,7 +116,10 @@ ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac, | |||
116 | struct ieee80211softmac_essid *essid); | 116 | struct ieee80211softmac_essid *essid); |
117 | 117 | ||
118 | /* Rates related */ | 118 | /* Rates related */ |
119 | int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate); | ||
119 | u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta); | 120 | u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta); |
121 | void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac); | ||
122 | void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac); | ||
120 | static inline u8 lower_rate(struct ieee80211softmac_device *mac, u8 rate) { | 123 | static inline u8 lower_rate(struct ieee80211softmac_device *mac, u8 rate) { |
121 | return ieee80211softmac_lower_rate_delta(mac, rate, 1); | 124 | return ieee80211softmac_lower_rate_delta(mac, rate, 1); |
122 | } | 125 | } |
@@ -150,6 +153,8 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
150 | int ieee80211softmac_handle_reassoc_req(struct net_device * dev, | 153 | int ieee80211softmac_handle_reassoc_req(struct net_device * dev, |
151 | struct ieee80211_reassoc_request * reassoc); | 154 | struct ieee80211_reassoc_request * reassoc); |
152 | void ieee80211softmac_assoc_timeout(void *d); | 155 | void ieee80211softmac_assoc_timeout(void *d); |
156 | void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); | ||
157 | void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); | ||
153 | 158 | ||
154 | /* some helper functions */ | 159 | /* some helper functions */ |
155 | static inline int ieee80211softmac_scan_handlers_check_self(struct ieee80211softmac_device *sm) | 160 | static inline int ieee80211softmac_scan_handlers_check_self(struct ieee80211softmac_device *sm) |
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 27edb2b5581a..22aa6199185b 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c | |||
@@ -211,8 +211,8 @@ ieee80211softmac_wx_set_rate(struct net_device *net_dev, | |||
211 | if (is_ofdm && !(ieee->modulation & IEEE80211_OFDM_MODULATION)) | 211 | if (is_ofdm && !(ieee->modulation & IEEE80211_OFDM_MODULATION)) |
212 | goto out_unlock; | 212 | goto out_unlock; |
213 | 213 | ||
214 | mac->txrates.default_rate = rate; | 214 | mac->txrates.user_rate = rate; |
215 | mac->txrates.default_fallback = lower_rate(mac, rate); | 215 | ieee80211softmac_recalc_txrates(mac); |
216 | err = 0; | 216 | err = 0; |
217 | 217 | ||
218 | out_unlock: | 218 | out_unlock: |
@@ -431,3 +431,35 @@ ieee80211softmac_wx_get_genie(struct net_device *dev, | |||
431 | } | 431 | } |
432 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_genie); | 432 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_genie); |
433 | 433 | ||
434 | int | ||
435 | ieee80211softmac_wx_set_mlme(struct net_device *dev, | ||
436 | struct iw_request_info *info, | ||
437 | union iwreq_data *wrqu, | ||
438 | char *extra) | ||
439 | { | ||
440 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
441 | struct iw_mlme *mlme = (struct iw_mlme *)extra; | ||
442 | u16 reason = cpu_to_le16(mlme->reason_code); | ||
443 | struct ieee80211softmac_network *net; | ||
444 | |||
445 | if (memcmp(mac->associnfo.bssid, mlme->addr.sa_data, ETH_ALEN)) { | ||
446 | printk(KERN_DEBUG PFX "wx_set_mlme: requested operation on net we don't use\n"); | ||
447 | return -EINVAL; | ||
448 | } | ||
449 | |||
450 | switch (mlme->cmd) { | ||
451 | case IW_MLME_DEAUTH: | ||
452 | net = ieee80211softmac_get_network_by_bssid_locked(mac, mlme->addr.sa_data); | ||
453 | if (!net) { | ||
454 | printk(KERN_DEBUG PFX "wx_set_mlme: we should know the net here...\n"); | ||
455 | return -EINVAL; | ||
456 | } | ||
457 | return ieee80211softmac_deauth_req(mac, net, reason); | ||
458 | case IW_MLME_DISASSOC: | ||
459 | ieee80211softmac_send_disassoc_req(mac, reason); | ||
460 | return 0; | ||
461 | default: | ||
462 | return -EOPNOTSUPP; | ||
463 | } | ||
464 | } | ||
465 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_mlme); | ||